[RISCV] Rename some assembler mnemonic and intrinsic functions for RVV 1.0.

Rename vpopc/vmandnot/vmornot to vcpop/vmandn/vmorn assembler mnemonic.

Reviewed By: frasercrmck, jrtc27, craig.topper

Differential Revision: https://reviews.llvm.org/D111062
This commit is contained in:
Zakk Chen 2021-11-04 09:22:34 -07:00
parent 4aa9b39824
commit 0649dfebba
46 changed files with 1390 additions and 1373 deletions

View File

@ -1916,11 +1916,11 @@ defm vfwredosum : RVVFloatingWidenReductionBuiltin;
// 16.1. Vector Mask-Register Logical Instructions
def vmand : RVVMaskBinBuiltin;
def vmnand : RVVMaskBinBuiltin;
def vmandnot : RVVMaskBinBuiltin;
def vmandn : RVVMaskBinBuiltin;
def vmxor : RVVMaskBinBuiltin;
def vmor : RVVMaskBinBuiltin;
def vmnor : RVVMaskBinBuiltin;
def vmornot : RVVMaskBinBuiltin;
def vmorn : RVVMaskBinBuiltin;
def vmxnor : RVVMaskBinBuiltin;
// pseudoinstructions
def vmclr : RVVMaskNullaryBuiltin;
@ -1929,8 +1929,8 @@ defm vmmv_m : RVVPseudoMaskBuiltin<"vmand", "c">;
defm vmnot_m : RVVPseudoMaskBuiltin<"vmnand", "c">;
let HasPolicy = false in {
// 16.2. Vector mask population count vpopc
def vpopc : RVVMaskOp0Builtin<"um">;
// 16.2. Vector count population in mask vcpop.m
def vcpop : RVVMaskOp0Builtin<"um">;
// 16.3. vfirst find-first-set mask bit
def vfirst : RVVMaskOp0Builtin<"lm">;

View File

@ -0,0 +1,131 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
#include <riscv_vector.h>
// CHECK-RV64-LABEL: @test_vcpop_m_b1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b1(vbool1_t op1, size_t vl) {
return vcpop(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b2(vbool2_t op1, size_t vl) {
return vcpop(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b4(vbool4_t op1, size_t vl) {
return vcpop(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b8(vbool8_t op1, size_t vl) {
return vcpop(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b16(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b16(vbool16_t op1, size_t vl) {
return vcpop(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b32(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b32(vbool32_t op1, size_t vl) {
return vcpop(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b64(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b64(vbool64_t op1, size_t vl) {
return vcpop(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
return vcpop(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
return vcpop(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
return vcpop(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
return vcpop(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b16_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
return vcpop(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b32_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
return vcpop(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b64_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
return vcpop(mask, op1, vl);
}

View File

@ -67,65 +67,65 @@ vbool64_t test_vmand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
return vmand(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandnot_mm_b1(
// CHECK-RV64-LABEL: @test_vmandn_mm_b1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandn.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmandnot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
return vmandnot(op1, op2, vl);
vbool1_t test_vmandn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
return vmandn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandnot_mm_b2(
// CHECK-RV64-LABEL: @test_vmandn_mm_b2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandn.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmandnot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
return vmandnot(op1, op2, vl);
vbool2_t test_vmandn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
return vmandn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandnot_mm_b4(
// CHECK-RV64-LABEL: @test_vmandn_mm_b4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandn.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmandnot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
return vmandnot(op1, op2, vl);
vbool4_t test_vmandn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
return vmandn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandnot_mm_b8(
// CHECK-RV64-LABEL: @test_vmandn_mm_b8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandn.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmandnot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
return vmandnot(op1, op2, vl);
vbool8_t test_vmandn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
return vmandn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandnot_mm_b16(
// CHECK-RV64-LABEL: @test_vmandn_mm_b16(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandn.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmandnot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
return vmandnot(op1, op2, vl);
vbool16_t test_vmandn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
return vmandn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandnot_mm_b32(
// CHECK-RV64-LABEL: @test_vmandn_mm_b32(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandn.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmandnot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
return vmandnot(op1, op2, vl);
vbool32_t test_vmandn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
return vmandn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandnot_mm_b64(
// CHECK-RV64-LABEL: @test_vmandn_mm_b64(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmandnot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
return vmandnot(op1, op2, vl);
vbool64_t test_vmandn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
return vmandn(op1, op2, vl);
}

View File

@ -67,65 +67,65 @@ vbool64_t test_vmor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
return vmor(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmornot_mm_b1(
// CHECK-RV64-LABEL: @test_vmorn_mm_b1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmorn.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmornot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
return vmornot(op1, op2, vl);
vbool1_t test_vmorn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
return vmorn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmornot_mm_b2(
// CHECK-RV64-LABEL: @test_vmorn_mm_b2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmorn.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmornot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
return vmornot(op1, op2, vl);
vbool2_t test_vmorn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
return vmorn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmornot_mm_b4(
// CHECK-RV64-LABEL: @test_vmorn_mm_b4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmorn.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmornot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
return vmornot(op1, op2, vl);
vbool4_t test_vmorn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
return vmorn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmornot_mm_b8(
// CHECK-RV64-LABEL: @test_vmorn_mm_b8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmorn.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmornot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
return vmornot(op1, op2, vl);
vbool8_t test_vmorn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
return vmorn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmornot_mm_b16(
// CHECK-RV64-LABEL: @test_vmorn_mm_b16(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmorn.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmornot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
return vmornot(op1, op2, vl);
vbool16_t test_vmorn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
return vmorn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmornot_mm_b32(
// CHECK-RV64-LABEL: @test_vmorn_mm_b32(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmorn.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmornot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
return vmornot(op1, op2, vl);
vbool32_t test_vmorn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
return vmorn(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmornot_mm_b64(
// CHECK-RV64-LABEL: @test_vmorn_mm_b64(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmornot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
return vmornot(op1, op2, vl);
vbool64_t test_vmorn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
return vmorn(op1, op2, vl);
}

View File

@ -1,131 +0,0 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
#include <riscv_vector.h>
// CHECK-RV64-LABEL: @test_vpopc_m_b1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b1(vbool1_t op1, size_t vl) {
return vpopc(op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b2(vbool2_t op1, size_t vl) {
return vpopc(op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b4(vbool4_t op1, size_t vl) {
return vpopc(op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b8(vbool8_t op1, size_t vl) {
return vpopc(op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b16(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b16(vbool16_t op1, size_t vl) {
return vpopc(op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b32(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b32(vbool32_t op1, size_t vl) {
return vpopc(op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b64(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b64(vbool64_t op1, size_t vl) {
return vpopc(op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
return vpopc(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
return vpopc(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
return vpopc(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
return vpopc(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b16_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
return vpopc(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b32_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
return vpopc(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b64_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
return vpopc(mask, op1, vl);
}

View File

@ -0,0 +1,131 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
#include <riscv_vector.h>
// CHECK-RV64-LABEL: @test_vcpop_m_b1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b1(vbool1_t op1, size_t vl) {
return vcpop_m_b1(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b2(vbool2_t op1, size_t vl) {
return vcpop_m_b2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b4(vbool4_t op1, size_t vl) {
return vcpop_m_b4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b8(vbool8_t op1, size_t vl) {
return vcpop_m_b8(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b16(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b16(vbool16_t op1, size_t vl) {
return vcpop_m_b16(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b32(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b32(vbool32_t op1, size_t vl) {
return vcpop_m_b32(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b64(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b64(vbool64_t op1, size_t vl) {
return vcpop_m_b64(op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
return vcpop_m_b1_m(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
return vcpop_m_b2_m(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
return vcpop_m_b4_m(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
return vcpop_m_b8_m(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b16_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
return vcpop_m_b16_m(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b32_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
return vcpop_m_b32_m(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vcpop_m_b64_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vcpop_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
return vcpop_m_b64_m(mask, op1, vl);
}

View File

@ -67,65 +67,65 @@ vbool64_t test_vmand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
return vmand_mm_b64(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandnot_mm_b1(
// CHECK-RV64-LABEL: @test_vmandn_mm_b1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandn.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmandnot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
return vmandnot_mm_b1(op1, op2, vl);
vbool1_t test_vmandn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
return vmandn_mm_b1(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandnot_mm_b2(
// CHECK-RV64-LABEL: @test_vmandn_mm_b2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandn.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmandnot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
return vmandnot_mm_b2(op1, op2, vl);
vbool2_t test_vmandn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
return vmandn_mm_b2(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandnot_mm_b4(
// CHECK-RV64-LABEL: @test_vmandn_mm_b4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandn.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmandnot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
return vmandnot_mm_b4(op1, op2, vl);
vbool4_t test_vmandn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
return vmandn_mm_b4(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandnot_mm_b8(
// CHECK-RV64-LABEL: @test_vmandn_mm_b8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandn.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmandnot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
return vmandnot_mm_b8(op1, op2, vl);
vbool8_t test_vmandn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
return vmandn_mm_b8(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandnot_mm_b16(
// CHECK-RV64-LABEL: @test_vmandn_mm_b16(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandn.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmandnot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
return vmandnot_mm_b16(op1, op2, vl);
vbool16_t test_vmandn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
return vmandn_mm_b16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandnot_mm_b32(
// CHECK-RV64-LABEL: @test_vmandn_mm_b32(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandn.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmandnot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
return vmandnot_mm_b32(op1, op2, vl);
vbool32_t test_vmandn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
return vmandn_mm_b32(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmandnot_mm_b64(
// CHECK-RV64-LABEL: @test_vmandn_mm_b64(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmandnot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
return vmandnot_mm_b64(op1, op2, vl);
vbool64_t test_vmandn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
return vmandn_mm_b64(op1, op2, vl);
}

View File

@ -67,65 +67,65 @@ vbool64_t test_vmor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
return vmor_mm_b64(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmornot_mm_b1(
// CHECK-RV64-LABEL: @test_vmorn_mm_b1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmorn.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmornot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
return vmornot_mm_b1(op1, op2, vl);
vbool1_t test_vmorn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
return vmorn_mm_b1(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmornot_mm_b2(
// CHECK-RV64-LABEL: @test_vmorn_mm_b2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmorn.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmornot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
return vmornot_mm_b2(op1, op2, vl);
vbool2_t test_vmorn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
return vmorn_mm_b2(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmornot_mm_b4(
// CHECK-RV64-LABEL: @test_vmorn_mm_b4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmorn.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmornot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
return vmornot_mm_b4(op1, op2, vl);
vbool4_t test_vmorn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
return vmorn_mm_b4(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmornot_mm_b8(
// CHECK-RV64-LABEL: @test_vmorn_mm_b8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmorn.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmornot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
return vmornot_mm_b8(op1, op2, vl);
vbool8_t test_vmorn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
return vmorn_mm_b8(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmornot_mm_b16(
// CHECK-RV64-LABEL: @test_vmorn_mm_b16(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmorn.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmornot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
return vmornot_mm_b16(op1, op2, vl);
vbool16_t test_vmorn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
return vmorn_mm_b16(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmornot_mm_b32(
// CHECK-RV64-LABEL: @test_vmorn_mm_b32(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmorn.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmornot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
return vmornot_mm_b32(op1, op2, vl);
vbool32_t test_vmorn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
return vmorn_mm_b32(op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmornot_mm_b64(
// CHECK-RV64-LABEL: @test_vmorn_mm_b64(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmornot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
return vmornot_mm_b64(op1, op2, vl);
vbool64_t test_vmorn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
return vmorn_mm_b64(op1, op2, vl);
}

View File

@ -1,131 +0,0 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
#include <riscv_vector.h>
// CHECK-RV64-LABEL: @test_vpopc_m_b1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b1(vbool1_t op1, size_t vl) {
return vpopc_m_b1(op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b2(vbool2_t op1, size_t vl) {
return vpopc_m_b2(op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b4(vbool4_t op1, size_t vl) {
return vpopc_m_b4(op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b8(vbool8_t op1, size_t vl) {
return vpopc_m_b8(op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b16(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b16(vbool16_t op1, size_t vl) {
return vpopc_m_b16(op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b32(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b32(vbool32_t op1, size_t vl) {
return vpopc_m_b32(op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b64(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b64(vbool64_t op1, size_t vl) {
return vpopc_m_b64(op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
return vpopc_m_b1_m(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
return vpopc_m_b2_m(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
return vpopc_m_b4_m(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
return vpopc_m_b8_m(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b16_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
return vpopc_m_b16_m(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b32_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
return vpopc_m_b32_m(mask, op1, vl);
}
// CHECK-RV64-LABEL: @test_vpopc_m_b64_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret i64 [[TMP0]]
//
unsigned long test_vpopc_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
return vpopc_m_b64_m(mask, op1, vl);
}

View File

@ -1194,16 +1194,16 @@ let TargetPrefix = "riscv" in {
def int_riscv_vmand: RISCVBinaryAAANoMask;
def int_riscv_vmnand: RISCVBinaryAAANoMask;
def int_riscv_vmandnot: RISCVBinaryAAANoMask;
def int_riscv_vmandn: RISCVBinaryAAANoMask;
def int_riscv_vmxor: RISCVBinaryAAANoMask;
def int_riscv_vmor: RISCVBinaryAAANoMask;
def int_riscv_vmnor: RISCVBinaryAAANoMask;
def int_riscv_vmornot: RISCVBinaryAAANoMask;
def int_riscv_vmorn: RISCVBinaryAAANoMask;
def int_riscv_vmxnor: RISCVBinaryAAANoMask;
def int_riscv_vmclr : RISCVNullaryIntrinsic;
def int_riscv_vmset : RISCVNullaryIntrinsic;
defm vpopc : RISCVMaskUnarySOut;
defm vcpop : RISCVMaskUnarySOut;
defm vfirst : RISCVMaskUnarySOut;
defm vmsbf : RISCVMaskUnaryMOut;
defm vmsof : RISCVMaskUnaryMOut;

View File

@ -2377,7 +2377,7 @@ void RISCVAsmParser::emitVMSGE(MCInst &Inst, unsigned Opcode, SMLoc IDLoc,
// masked va >= x, vd == v0
//
// pseudoinstruction: vmsge{u}.vx vd, va, x, v0.t, vt
// expansion: vmslt{u}.vx vt, va, x; vmandnot.mm vd, vd, vt
// expansion: vmslt{u}.vx vt, va, x; vmandn.mm vd, vd, vt
assert(Inst.getOperand(0).getReg() == RISCV::V0 &&
"The destination register should be V0.");
assert(Inst.getOperand(1).getReg() != RISCV::V0 &&
@ -2387,7 +2387,7 @@ void RISCVAsmParser::emitVMSGE(MCInst &Inst, unsigned Opcode, SMLoc IDLoc,
.addOperand(Inst.getOperand(2))
.addOperand(Inst.getOperand(3))
.addOperand(Inst.getOperand(4)));
emitToStreamer(Out, MCInstBuilder(RISCV::VMANDNOT_MM)
emitToStreamer(Out, MCInstBuilder(RISCV::VMANDN_MM)
.addOperand(Inst.getOperand(0))
.addOperand(Inst.getOperand(0))
.addOperand(Inst.getOperand(1)));
@ -2395,7 +2395,7 @@ void RISCVAsmParser::emitVMSGE(MCInst &Inst, unsigned Opcode, SMLoc IDLoc,
// masked va >= x, any vd
//
// pseudoinstruction: vmsge{u}.vx vd, va, x, v0.t, vt
// expansion: vmslt{u}.vx vt, va, x; vmandnot.mm vt, v0, vt; vmandnot.mm vd,
// expansion: vmslt{u}.vx vt, va, x; vmandn.mm vt, v0, vt; vmandn.mm vd,
// vd, v0; vmor.mm vd, vt, vd
assert(Inst.getOperand(1).getReg() != RISCV::V0 &&
"The temporary vector register should not be V0.");
@ -2404,11 +2404,11 @@ void RISCVAsmParser::emitVMSGE(MCInst &Inst, unsigned Opcode, SMLoc IDLoc,
.addOperand(Inst.getOperand(2))
.addOperand(Inst.getOperand(3))
.addReg(RISCV::NoRegister));
emitToStreamer(Out, MCInstBuilder(RISCV::VMANDNOT_MM)
emitToStreamer(Out, MCInstBuilder(RISCV::VMANDN_MM)
.addOperand(Inst.getOperand(1))
.addReg(RISCV::V0)
.addOperand(Inst.getOperand(1)));
emitToStreamer(Out, MCInstBuilder(RISCV::VMANDNOT_MM)
emitToStreamer(Out, MCInstBuilder(RISCV::VMANDN_MM)
.addOperand(Inst.getOperand(0))
.addOperand(Inst.getOperand(0))
.addReg(RISCV::V0));

View File

@ -808,7 +808,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
}
bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
MVT Src1VT = Src1.getSimpleValueType();
unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOTOpcode;
unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode;
switch (RISCVTargetLowering::getLMUL(Src1VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
@ -861,31 +861,31 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
llvm_unreachable("Unexpected LMUL!");
case RISCVII::VLMUL::LMUL_F8:
VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8;
VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8;
VMANDNOpcode = RISCV::PseudoVMANDN_MM_MF8;
break;
case RISCVII::VLMUL::LMUL_F4:
VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4;
VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4;
VMANDNOpcode = RISCV::PseudoVMANDN_MM_MF4;
break;
case RISCVII::VLMUL::LMUL_F2:
VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2;
VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2;
VMANDNOpcode = RISCV::PseudoVMANDN_MM_MF2;
break;
case RISCVII::VLMUL::LMUL_1:
VMXOROpcode = RISCV::PseudoVMXOR_MM_M1;
VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1;
VMANDNOpcode = RISCV::PseudoVMANDN_MM_M1;
break;
case RISCVII::VLMUL::LMUL_2:
VMXOROpcode = RISCV::PseudoVMXOR_MM_M2;
VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2;
VMANDNOpcode = RISCV::PseudoVMANDN_MM_M2;
break;
case RISCVII::VLMUL::LMUL_4:
VMXOROpcode = RISCV::PseudoVMXOR_MM_M4;
VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4;
VMANDNOpcode = RISCV::PseudoVMANDN_MM_M4;
break;
case RISCVII::VLMUL::LMUL_8:
VMXOROpcode = RISCV::PseudoVMXOR_MM_M8;
VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8;
VMANDNOpcode = RISCV::PseudoVMANDN_MM_M8;
break;
}
SDValue SEW = CurDAG->getTargetConstant(
@ -896,13 +896,13 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
SDValue MaskedOff = Node->getOperand(1);
SDValue Mask = Node->getOperand(4);
// If the MaskedOff value and the Mask are the same value use
// vmslt{u}.vx vt, va, x; vmandnot.mm vd, vd, vt
// vmslt{u}.vx vt, va, x; vmandn.mm vd, vd, vt
// This avoids needing to copy v0 to vd before starting the next sequence.
if (Mask == MaskedOff) {
SDValue Cmp = SDValue(
CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
0);
ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOTOpcode, DL, VT,
ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT,
{Mask, Cmp, VL, MaskSEW}));
return;
}

View File

@ -4189,26 +4189,26 @@ SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
llvm_unreachable("Unhandled reduction");
case ISD::VECREDUCE_AND:
case ISD::VP_REDUCE_AND: {
// vpopc ~x == 0
// vcpop ~x == 0
SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
CC = ISD::SETEQ;
BaseOpc = ISD::AND;
break;
}
case ISD::VECREDUCE_OR:
case ISD::VP_REDUCE_OR:
// vpopc x != 0
Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
// vcpop x != 0
Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
CC = ISD::SETNE;
BaseOpc = ISD::OR;
break;
case ISD::VECREDUCE_XOR:
case ISD::VP_REDUCE_XOR: {
// ((vpopc x) & 1) != 0
// ((vcpop x) & 1) != 0
SDValue One = DAG.getConstant(1, DL, XLenVT);
Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
CC = ISD::SETNE;
BaseOpc = ISD::XOR;
@ -4223,7 +4223,7 @@ SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
// Now include the start value in the operation.
// Note that we must return the start value when no elements are operated
// upon. The vpopc instructions we've emitted in each case above will return
// upon. The vcpop instructions we've emitted in each case above will return
// 0 for an inactive vector, and so we've already received the neutral value:
// AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
// can simply include the start value.
@ -9273,7 +9273,7 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(VRGATHEREI16_VV_VL)
NODE_NAME_CASE(VSEXT_VL)
NODE_NAME_CASE(VZEXT_VL)
NODE_NAME_CASE(VPOPC_VL)
NODE_NAME_CASE(VCPOP_VL)
NODE_NAME_CASE(VLE_VL)
NODE_NAME_CASE(VSE_VL)
NODE_NAME_CASE(READ_CSR)

View File

@ -263,8 +263,8 @@ enum NodeType : unsigned {
VSEXT_VL,
VZEXT_VL,
// vpopc.m with additional mask and VL operands.
VPOPC_VL,
// vcpop.m with additional mask and VL operands.
VCPOP_VL,
// Reads value of CSR.
// The first operand is a chain pointer. The second specifies address of the

View File

@ -1361,11 +1361,11 @@ let Predicates = [HasStdExtV] in {
let RVVConstraint = NoConstraint in {
defm VMAND_M : VMALU_MV_Mask<"vmand", 0b011001, "m">;
defm VMNAND_M : VMALU_MV_Mask<"vmnand", 0b011101, "m">;
defm VMANDNOT_M : VMALU_MV_Mask<"vmandnot", 0b011000, "m">;
defm VMANDN_M : VMALU_MV_Mask<"vmandn", 0b011000, "m">;
defm VMXOR_M : VMALU_MV_Mask<"vmxor", 0b011011, "m">;
defm VMOR_M : VMALU_MV_Mask<"vmor", 0b011010, "m">;
defm VMNOR_M : VMALU_MV_Mask<"vmnor", 0b011110, "m">;
defm VMORNOT_M : VMALU_MV_Mask<"vmornot", 0b011100, "m">;
defm VMORN_M : VMALU_MV_Mask<"vmorn", 0b011100, "m">;
defm VMXNOR_M : VMALU_MV_Mask<"vmxnor", 0b011111, "m">;
}
@ -1378,13 +1378,18 @@ def : InstAlias<"vmset.m $vd",
def : InstAlias<"vmnot.m $vd, $vs",
(VMNAND_MM VR:$vd, VR:$vs, VR:$vs)>;
def : InstAlias<"vmandnot.mm $vd, $vs2, $vs1",
(VMANDN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
def : InstAlias<"vmornot.mm $vd, $vs2, $vs1",
(VMORN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
RVVConstraint = NoConstraint in {
// Vector mask population count vpopc
def VPOPC_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd),
// Vector mask population count vcpop
def VCPOP_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd),
(ins VR:$vs2, VMaskOp:$vm),
"vpopc.m", "$vd, $vs2$vm">,
"vcpop.m", "$vd, $vs2$vm">,
Sched<[WriteVMPopV, ReadVMPopV, ReadVMask]>;
// vfirst find-first-set mask bit
@ -1395,6 +1400,9 @@ def VFIRST_M : RVInstV<0b010000, 0b10001, OPMVV, (outs GPR:$vd),
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
def : InstAlias<"vpopc.m $vd, $vs2$vm",
(VCPOP_M GPR:$vd, VR:$vs2, VMaskOp:$vm), 0>;
let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in {
// vmsbf.m set-before-first mask bit

View File

@ -4007,11 +4007,11 @@ defm PseudoVFWREDOSUM : VPseudoReductionV_VS;
defm PseudoVMAND: VPseudoBinaryM_MM;
defm PseudoVMNAND: VPseudoBinaryM_MM;
defm PseudoVMANDNOT: VPseudoBinaryM_MM;
defm PseudoVMANDN: VPseudoBinaryM_MM;
defm PseudoVMXOR: VPseudoBinaryM_MM;
defm PseudoVMOR: VPseudoBinaryM_MM;
defm PseudoVMNOR: VPseudoBinaryM_MM;
defm PseudoVMORNOT: VPseudoBinaryM_MM;
defm PseudoVMORN: VPseudoBinaryM_MM;
defm PseudoVMXNOR: VPseudoBinaryM_MM;
// Pseudo instructions
@ -4019,10 +4019,10 @@ defm PseudoVMCLR : VPseudoNullaryPseudoM<"VMXOR">;
defm PseudoVMSET : VPseudoNullaryPseudoM<"VMXNOR">;
//===----------------------------------------------------------------------===//
// 16.2. Vector mask population count vpopc
// 16.2. Vector mask population count vcpop
//===----------------------------------------------------------------------===//
defm PseudoVPOPC: VPseudoUnaryS_M;
defm PseudoVCPOP: VPseudoUnaryS_M;
//===----------------------------------------------------------------------===//
// 16.3. vfirst find-first-set mask bit
@ -4676,11 +4676,11 @@ let Predicates = [HasVInstructions] in {
//===----------------------------------------------------------------------===//
defm : VPatBinaryM_MM<"int_riscv_vmand", "PseudoVMAND">;
defm : VPatBinaryM_MM<"int_riscv_vmnand", "PseudoVMNAND">;
defm : VPatBinaryM_MM<"int_riscv_vmandnot", "PseudoVMANDNOT">;
defm : VPatBinaryM_MM<"int_riscv_vmandn", "PseudoVMANDN">;
defm : VPatBinaryM_MM<"int_riscv_vmxor", "PseudoVMXOR">;
defm : VPatBinaryM_MM<"int_riscv_vmor", "PseudoVMOR">;
defm : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">;
defm : VPatBinaryM_MM<"int_riscv_vmornot", "PseudoVMORNOT">;
defm : VPatBinaryM_MM<"int_riscv_vmorn", "PseudoVMORN">;
defm : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">;
// pseudo instructions
@ -4688,9 +4688,9 @@ defm : VPatNullaryM<"int_riscv_vmclr", "PseudoVMCLR">;
defm : VPatNullaryM<"int_riscv_vmset", "PseudoVMSET">;
//===----------------------------------------------------------------------===//
// 16.2. Vector mask population count vpopc
// 16.2. Vector count population in mask vcpop.m
//===----------------------------------------------------------------------===//
defm : VPatUnaryS_M<"int_riscv_vpopc", "PseudoVPOPC">;
defm : VPatUnaryS_M<"int_riscv_vcpop", "PseudoVCPOP">;
//===----------------------------------------------------------------------===//
// 16.3. vfirst find-first-set mask bit

View File

@ -561,10 +561,10 @@ foreach mti = AllMasks in {
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (and VR:$rs1, (rvv_vnot VR:$rs2))),
(!cast<Instruction>("PseudoVMANDNOT_MM_"#mti.LMul.MX)
(!cast<Instruction>("PseudoVMANDN_MM_"#mti.LMul.MX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (or VR:$rs1, (rvv_vnot VR:$rs2))),
(!cast<Instruction>("PseudoVMORNOT_MM_"#mti.LMul.MX)
(!cast<Instruction>("PseudoVMORN_MM_"#mti.LMul.MX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
// Handle rvv_vnot the same as the vmnot.m pseudoinstruction.

View File

@ -199,7 +199,7 @@ def true_mask : PatLeaf<(riscv_vmset_vl (XLenVT srcvalue))>;
def riscv_vmnot_vl : PatFrag<(ops node:$rs, node:$vl),
(riscv_vmxor_vl node:$rs, true_mask, node:$vl)>;
def riscv_vpopc_vl : SDNode<"RISCVISD::VPOPC_VL",
def riscv_vcpop_vl : SDNode<"RISCVISD::VCPOP_VL",
SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>,
SDTCisVec<1>, SDTCisInt<1>,
SDTCVecEltisVT<2, i1>,
@ -1233,12 +1233,12 @@ foreach mti = AllMasks in {
def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1,
(riscv_vmnot_vl VR:$rs2, VLOpFrag),
VLOpFrag)),
(!cast<Instruction>("PseudoVMANDNOT_MM_" # mti.LMul.MX)
(!cast<Instruction>("PseudoVMANDN_MM_" # mti.LMul.MX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1,
(riscv_vmnot_vl VR:$rs2, VLOpFrag),
VLOpFrag)),
(!cast<Instruction>("PseudoVMORNOT_MM_" # mti.LMul.MX)
(!cast<Instruction>("PseudoVMORN_MM_" # mti.LMul.MX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
// XOR is associative so we need 2 patterns for VMXNOR.
def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1,
@ -1268,14 +1268,14 @@ foreach mti = AllMasks in {
(!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX)
VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>;
// 16.2 Vector Mask Population Count vpopc
def : Pat<(XLenVT (riscv_vpopc_vl (mti.Mask VR:$rs2), (mti.Mask true_mask),
// 16.2 Vector count population in mask vcpop.m
def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVPOPC_M_" # mti.BX)
(!cast<Instruction>("PseudoVCPOP_M_" # mti.BX)
VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(XLenVT (riscv_vpopc_vl (mti.Mask VR:$rs2), (mti.Mask V0),
def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask V0),
VLOpFrag)),
(!cast<Instruction>("PseudoVPOPC_M_" # mti.BX # "_MASK")
(!cast<Instruction>("PseudoVCPOP_M_" # mti.BX # "_MASK")
VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
}

View File

@ -75,7 +75,7 @@ define void @andnot_v8i1(<8 x i1>* %x, <8 x i1>* %y) {
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vlm.v v9, (a1)
; CHECK-NEXT: vmandnot.mm v8, v9, v8
; CHECK-NEXT: vmandn.mm v8, v9, v8
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = load <8 x i1>, <8 x i1>* %x
@ -92,7 +92,7 @@ define void @ornot_v16i1(<16 x i1>* %x, <16 x i1>* %y) {
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vlm.v v9, (a1)
; CHECK-NEXT: vmornot.mm v8, v9, v8
; CHECK-NEXT: vmorn.mm v8, v9, v8
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = load <16 x i1>, <16 x i1>* %x

View File

@ -12,7 +12,7 @@ define signext i1 @vpreduce_and_v1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i3
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
@ -29,7 +29,7 @@ define signext i1 @vpreduce_or_v1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
@ -47,7 +47,7 @@ define signext i1 @vpreduce_xor_v1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i3
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
@ -64,7 +64,7 @@ define signext i1 @vpreduce_and_v2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i3
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
@ -81,7 +81,7 @@ define signext i1 @vpreduce_or_v2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
@ -99,7 +99,7 @@ define signext i1 @vpreduce_xor_v2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i3
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
@ -116,7 +116,7 @@ define signext i1 @vpreduce_and_v4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i3
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
@ -133,7 +133,7 @@ define signext i1 @vpreduce_or_v4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
@ -151,7 +151,7 @@ define signext i1 @vpreduce_xor_v4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i3
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
@ -168,7 +168,7 @@ define signext i1 @vpreduce_and_v8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i3
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
@ -185,7 +185,7 @@ define signext i1 @vpreduce_or_v8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
@ -203,7 +203,7 @@ define signext i1 @vpreduce_xor_v8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i3
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
@ -220,7 +220,7 @@ define signext i1 @vpreduce_and_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m,
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
@ -237,7 +237,7 @@ define signext i1 @vpreduce_or_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m,
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
@ -255,7 +255,7 @@ define signext i1 @vpreduce_xor_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m,
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0

View File

@ -10,7 +10,7 @@ define <1 x i1> @select_v1i1(i1 zeroext %c, <1 x i1> %a, <1 x i1> %b) {
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -26,7 +26,7 @@ define <1 x i1> @selectcc_v1i1(i1 signext %a, i1 signext %b, <1 x i1> %c, <1 x i
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -41,7 +41,7 @@ define <2 x i1> @select_v2i1(i1 zeroext %c, <2 x i1> %a, <2 x i1> %b) {
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -57,7 +57,7 @@ define <2 x i1> @selectcc_v2i1(i1 signext %a, i1 signext %b, <2 x i1> %c, <2 x i
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -72,7 +72,7 @@ define <4 x i1> @select_v4i1(i1 zeroext %c, <4 x i1> %a, <4 x i1> %b) {
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -88,7 +88,7 @@ define <4 x i1> @selectcc_v4i1(i1 signext %a, i1 signext %b, <4 x i1> %c, <4 x i
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -103,7 +103,7 @@ define <8 x i1> @select_v8i1(i1 zeroext %c, <8 x i1> %a, <8 x i1> %b) {
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -119,7 +119,7 @@ define <8 x i1> @selectcc_v8i1(i1 signext %a, i1 signext %b, <8 x i1> %c, <8 x i
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -134,7 +134,7 @@ define <16 x i1> @select_v16i1(i1 zeroext %c, <16 x i1> %a, <16 x i1> %b) {
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -150,7 +150,7 @@ define <16 x i1> @selectcc_v16i1(i1 signext %a, i1 signext %b, <16 x i1> %c, <16
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret

View File

@ -58,7 +58,7 @@ define signext i1 @vreduce_or_v2i1(<2 x i1> %v) {
; CHECK-LABEL: vreduce_or_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -72,7 +72,7 @@ define signext i1 @vreduce_xor_v2i1(<2 x i1> %v) {
; CHECK-LABEL: vreduce_xor_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -87,7 +87,7 @@ define signext i1 @vreduce_and_v2i1(<2 x i1> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
; CHECK-NEXT: vpopc.m a0, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -101,7 +101,7 @@ define signext i1 @vreduce_or_v4i1(<4 x i1> %v) {
; CHECK-LABEL: vreduce_or_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -115,7 +115,7 @@ define signext i1 @vreduce_xor_v4i1(<4 x i1> %v) {
; CHECK-LABEL: vreduce_xor_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -130,7 +130,7 @@ define signext i1 @vreduce_and_v4i1(<4 x i1> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
; CHECK-NEXT: vpopc.m a0, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -144,7 +144,7 @@ define signext i1 @vreduce_or_v8i1(<8 x i1> %v) {
; CHECK-LABEL: vreduce_or_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -158,7 +158,7 @@ define signext i1 @vreduce_xor_v8i1(<8 x i1> %v) {
; CHECK-LABEL: vreduce_xor_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -173,7 +173,7 @@ define signext i1 @vreduce_and_v8i1(<8 x i1> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
; CHECK-NEXT: vpopc.m a0, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -187,7 +187,7 @@ define signext i1 @vreduce_or_v16i1(<16 x i1> %v) {
; CHECK-LABEL: vreduce_or_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -201,7 +201,7 @@ define signext i1 @vreduce_xor_v16i1(<16 x i1> %v) {
; CHECK-LABEL: vreduce_xor_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -216,7 +216,7 @@ define signext i1 @vreduce_and_v16i1(<16 x i1> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
; CHECK-NEXT: vpopc.m a0, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -231,7 +231,7 @@ define signext i1 @vreduce_or_v32i1(<32 x i1> %v) {
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-NEXT: vmor.mm v8, v0, v8
; LMULMAX1-NEXT: vpopc.m a0, v8
; LMULMAX1-NEXT: vcpop.m a0, v8
; LMULMAX1-NEXT: snez a0, a0
; LMULMAX1-NEXT: neg a0, a0
; LMULMAX1-NEXT: ret
@ -240,7 +240,7 @@ define signext i1 @vreduce_or_v32i1(<32 x i1> %v) {
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: addi a0, zero, 32
; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; LMULMAX8-NEXT: vpopc.m a0, v0
; LMULMAX8-NEXT: vcpop.m a0, v0
; LMULMAX8-NEXT: snez a0, a0
; LMULMAX8-NEXT: neg a0, a0
; LMULMAX8-NEXT: ret
@ -255,7 +255,7 @@ define signext i1 @vreduce_xor_v32i1(<32 x i1> %v) {
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-NEXT: vmxor.mm v8, v0, v8
; LMULMAX1-NEXT: vpopc.m a0, v8
; LMULMAX1-NEXT: vcpop.m a0, v8
; LMULMAX1-NEXT: andi a0, a0, 1
; LMULMAX1-NEXT: neg a0, a0
; LMULMAX1-NEXT: ret
@ -264,7 +264,7 @@ define signext i1 @vreduce_xor_v32i1(<32 x i1> %v) {
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: addi a0, zero, 32
; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; LMULMAX8-NEXT: vpopc.m a0, v0
; LMULMAX8-NEXT: vcpop.m a0, v0
; LMULMAX8-NEXT: andi a0, a0, 1
; LMULMAX8-NEXT: neg a0, a0
; LMULMAX8-NEXT: ret
@ -279,7 +279,7 @@ define signext i1 @vreduce_and_v32i1(<32 x i1> %v) {
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-NEXT: vmnand.mm v8, v0, v8
; LMULMAX1-NEXT: vpopc.m a0, v8
; LMULMAX1-NEXT: vcpop.m a0, v8
; LMULMAX1-NEXT: seqz a0, a0
; LMULMAX1-NEXT: neg a0, a0
; LMULMAX1-NEXT: ret
@ -289,7 +289,7 @@ define signext i1 @vreduce_and_v32i1(<32 x i1> %v) {
; LMULMAX8-NEXT: addi a0, zero, 32
; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; LMULMAX8-NEXT: vmnand.mm v8, v0, v0
; LMULMAX8-NEXT: vpopc.m a0, v8
; LMULMAX8-NEXT: vcpop.m a0, v8
; LMULMAX8-NEXT: seqz a0, a0
; LMULMAX8-NEXT: neg a0, a0
; LMULMAX8-NEXT: ret
@ -306,7 +306,7 @@ define signext i1 @vreduce_or_v64i1(<64 x i1> %v) {
; LMULMAX1-NEXT: vmor.mm v8, v8, v10
; LMULMAX1-NEXT: vmor.mm v9, v0, v9
; LMULMAX1-NEXT: vmor.mm v8, v9, v8
; LMULMAX1-NEXT: vpopc.m a0, v8
; LMULMAX1-NEXT: vcpop.m a0, v8
; LMULMAX1-NEXT: snez a0, a0
; LMULMAX1-NEXT: neg a0, a0
; LMULMAX1-NEXT: ret
@ -315,7 +315,7 @@ define signext i1 @vreduce_or_v64i1(<64 x i1> %v) {
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: addi a0, zero, 64
; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; LMULMAX8-NEXT: vpopc.m a0, v0
; LMULMAX8-NEXT: vcpop.m a0, v0
; LMULMAX8-NEXT: snez a0, a0
; LMULMAX8-NEXT: neg a0, a0
; LMULMAX8-NEXT: ret
@ -332,7 +332,7 @@ define signext i1 @vreduce_xor_v64i1(<64 x i1> %v) {
; LMULMAX1-NEXT: vmxor.mm v8, v8, v10
; LMULMAX1-NEXT: vmxor.mm v9, v0, v9
; LMULMAX1-NEXT: vmxor.mm v8, v9, v8
; LMULMAX1-NEXT: vpopc.m a0, v8
; LMULMAX1-NEXT: vcpop.m a0, v8
; LMULMAX1-NEXT: andi a0, a0, 1
; LMULMAX1-NEXT: neg a0, a0
; LMULMAX1-NEXT: ret
@ -341,7 +341,7 @@ define signext i1 @vreduce_xor_v64i1(<64 x i1> %v) {
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: addi a0, zero, 64
; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; LMULMAX8-NEXT: vpopc.m a0, v0
; LMULMAX8-NEXT: vcpop.m a0, v0
; LMULMAX8-NEXT: andi a0, a0, 1
; LMULMAX8-NEXT: neg a0, a0
; LMULMAX8-NEXT: ret
@ -358,7 +358,7 @@ define signext i1 @vreduce_and_v64i1(<64 x i1> %v) {
; LMULMAX1-NEXT: vmand.mm v8, v8, v10
; LMULMAX1-NEXT: vmand.mm v9, v0, v9
; LMULMAX1-NEXT: vmnand.mm v8, v9, v8
; LMULMAX1-NEXT: vpopc.m a0, v8
; LMULMAX1-NEXT: vcpop.m a0, v8
; LMULMAX1-NEXT: seqz a0, a0
; LMULMAX1-NEXT: neg a0, a0
; LMULMAX1-NEXT: ret
@ -368,7 +368,7 @@ define signext i1 @vreduce_and_v64i1(<64 x i1> %v) {
; LMULMAX8-NEXT: addi a0, zero, 64
; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; LMULMAX8-NEXT: vmnand.mm v8, v0, v0
; LMULMAX8-NEXT: vpopc.m a0, v8
; LMULMAX8-NEXT: vcpop.m a0, v8
; LMULMAX8-NEXT: seqz a0, a0
; LMULMAX8-NEXT: neg a0, a0
; LMULMAX8-NEXT: ret

View File

@ -225,7 +225,7 @@ define <2 x i1> @vselect_v2i1(<2 x i1> %a, <2 x i1> %b, <2 x i1> %cc) {
; CHECK-LABEL: vselect_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -237,7 +237,7 @@ define <4 x i1> @vselect_v4i1(<4 x i1> %a, <4 x i1> %b, <4 x i1> %cc) {
; CHECK-LABEL: vselect_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -249,7 +249,7 @@ define <8 x i1> @vselect_v8i1(<8 x i1> %a, <8 x i1> %b, <8 x i1> %cc) {
; CHECK-LABEL: vselect_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -261,7 +261,7 @@ define <16 x i1> @vselect_v16i1(<16 x i1> %a, <16 x i1> %b, <16 x i1> %cc) {
; CHECK-LABEL: vselect_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -274,7 +274,7 @@ define <32 x i1> @vselect_v32i1(<32 x i1> %a, <32 x i1> %b, <32 x i1> %cc) {
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 32
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -287,7 +287,7 @@ define <64 x i1> @vselect_v64i1(<64 x i1> %a, <64 x i1> %b, <64 x i1> %cc) {
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 64
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret

View File

@ -10,7 +10,7 @@ define <vscale x 1 x i1> @select_nxv1i1(i1 zeroext %c, <vscale x 1 x i1> %a, <vs
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -26,7 +26,7 @@ define <vscale x 1 x i1> @selectcc_nxv1i1(i1 signext %a, i1 signext %b, <vscale
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -41,7 +41,7 @@ define <vscale x 2 x i1> @select_nxv2i1(i1 zeroext %c, <vscale x 2 x i1> %a, <vs
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -57,7 +57,7 @@ define <vscale x 2 x i1> @selectcc_nxv2i1(i1 signext %a, i1 signext %b, <vscale
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -72,7 +72,7 @@ define <vscale x 4 x i1> @select_nxv4i1(i1 zeroext %c, <vscale x 4 x i1> %a, <vs
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -88,7 +88,7 @@ define <vscale x 4 x i1> @selectcc_nxv4i1(i1 signext %a, i1 signext %b, <vscale
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -103,7 +103,7 @@ define <vscale x 8 x i1> @select_nxv8i1(i1 zeroext %c, <vscale x 8 x i1> %a, <vs
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -119,7 +119,7 @@ define <vscale x 8 x i1> @selectcc_nxv8i1(i1 signext %a, i1 signext %b, <vscale
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -134,7 +134,7 @@ define <vscale x 16 x i1> @select_nxv16i1(i1 zeroext %c, <vscale x 16 x i1> %a,
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vmv.v.x v10, a0
; CHECK-NEXT: vmsne.vi v9, v10, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -150,7 +150,7 @@ define <vscale x 16 x i1> @selectcc_nxv16i1(i1 signext %a, i1 signext %b, <vscal
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vmv.v.x v10, a0
; CHECK-NEXT: vmsne.vi v9, v10, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -165,7 +165,7 @@ define <vscale x 32 x i1> @select_nxv32i1(i1 zeroext %c, <vscale x 32 x i1> %a,
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vmsne.vi v9, v12, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -181,7 +181,7 @@ define <vscale x 32 x i1> @selectcc_nxv32i1(i1 signext %a, i1 signext %b, <vscal
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vmsne.vi v9, v12, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -196,7 +196,7 @@ define <vscale x 64 x i1> @select_nxv64i1(i1 zeroext %c, <vscale x 64 x i1> %a,
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vmsne.vi v9, v16, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -212,7 +212,7 @@ define <vscale x 64 x i1> @selectcc_nxv64i1(i1 signext %a, i1 signext %b, <vscal
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vmsne.vi v9, v16, 0
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret

View File

@ -1,39 +1,39 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare i32 @llvm.riscv.vpopc.i32.nxv1i1(
declare i32 @llvm.riscv.vcpop.i32.nxv1i1(
<vscale x 1 x i1>,
i32);
define i32 @intrinsic_vpopc_m_i32_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv1i1:
define i32 @intrinsic_vcpop_m_i32_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vpopc.i32.nxv1i1(
%a = call i32 @llvm.riscv.vcpop.i32.nxv1i1(
<vscale x 1 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.mask.i32.nxv1i1(
declare i32 @llvm.riscv.vcpop.mask.i32.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i32);
define i32 @intrinsic_vpopc_mask_m_i32_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv1i1:
define i32 @intrinsic_vcpop_mask_m_i32_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a0, v9, v0.t
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vpopc.mask.i32.nxv1i1(
%a = call i32 @llvm.riscv.vcpop.mask.i32.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i32 %2)
@ -41,39 +41,39 @@ entry:
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.i32.nxv2i1(
declare i32 @llvm.riscv.vcpop.i32.nxv2i1(
<vscale x 2 x i1>,
i32);
define i32 @intrinsic_vpopc_m_i32_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv2i1:
define i32 @intrinsic_vcpop_m_i32_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vpopc.i32.nxv2i1(
%a = call i32 @llvm.riscv.vcpop.i32.nxv2i1(
<vscale x 2 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.mask.i32.nxv2i1(
declare i32 @llvm.riscv.vcpop.mask.i32.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i32);
define i32 @intrinsic_vpopc_mask_m_i32_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv2i1:
define i32 @intrinsic_vcpop_mask_m_i32_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a0, v9, v0.t
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vpopc.mask.i32.nxv2i1(
%a = call i32 @llvm.riscv.vcpop.mask.i32.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i32 %2)
@ -81,39 +81,39 @@ entry:
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.i32.nxv4i1(
declare i32 @llvm.riscv.vcpop.i32.nxv4i1(
<vscale x 4 x i1>,
i32);
define i32 @intrinsic_vpopc_m_i32_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv4i1:
define i32 @intrinsic_vcpop_m_i32_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vpopc.i32.nxv4i1(
%a = call i32 @llvm.riscv.vcpop.i32.nxv4i1(
<vscale x 4 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.mask.i32.nxv4i1(
declare i32 @llvm.riscv.vcpop.mask.i32.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i32);
define i32 @intrinsic_vpopc_mask_m_i32_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv4i1:
define i32 @intrinsic_vcpop_mask_m_i32_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a0, v9, v0.t
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vpopc.mask.i32.nxv4i1(
%a = call i32 @llvm.riscv.vcpop.mask.i32.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i32 %2)
@ -121,39 +121,39 @@ entry:
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.i32.nxv8i1(
declare i32 @llvm.riscv.vcpop.i32.nxv8i1(
<vscale x 8 x i1>,
i32);
define i32 @intrinsic_vpopc_m_i32_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv8i1:
define i32 @intrinsic_vcpop_m_i32_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vpopc.i32.nxv8i1(
%a = call i32 @llvm.riscv.vcpop.i32.nxv8i1(
<vscale x 8 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.mask.i32.nxv8i1(
declare i32 @llvm.riscv.vcpop.mask.i32.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i32);
define i32 @intrinsic_vpopc_mask_m_i32_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv8i1:
define i32 @intrinsic_vcpop_mask_m_i32_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a0, v9, v0.t
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vpopc.mask.i32.nxv8i1(
%a = call i32 @llvm.riscv.vcpop.mask.i32.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i32 %2)
@ -161,39 +161,39 @@ entry:
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.i32.nxv16i1(
declare i32 @llvm.riscv.vcpop.i32.nxv16i1(
<vscale x 16 x i1>,
i32);
define i32 @intrinsic_vpopc_m_i32_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv16i1:
define i32 @intrinsic_vcpop_m_i32_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vpopc.i32.nxv16i1(
%a = call i32 @llvm.riscv.vcpop.i32.nxv16i1(
<vscale x 16 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.mask.i32.nxv16i1(
declare i32 @llvm.riscv.vcpop.mask.i32.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i32);
define i32 @intrinsic_vpopc_mask_m_i32_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv16i1:
define i32 @intrinsic_vcpop_mask_m_i32_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a0, v9, v0.t
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vpopc.mask.i32.nxv16i1(
%a = call i32 @llvm.riscv.vcpop.mask.i32.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i32 %2)
@ -201,39 +201,39 @@ entry:
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.i32.nxv32i1(
declare i32 @llvm.riscv.vcpop.i32.nxv32i1(
<vscale x 32 x i1>,
i32);
define i32 @intrinsic_vpopc_m_i32_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv32i1:
define i32 @intrinsic_vcpop_m_i32_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vpopc.i32.nxv32i1(
%a = call i32 @llvm.riscv.vcpop.i32.nxv32i1(
<vscale x 32 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.mask.i32.nxv32i1(
declare i32 @llvm.riscv.vcpop.mask.i32.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i32);
define i32 @intrinsic_vpopc_mask_m_i32_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv32i1:
define i32 @intrinsic_vcpop_mask_m_i32_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a0, v9, v0.t
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vpopc.mask.i32.nxv32i1(
%a = call i32 @llvm.riscv.vcpop.mask.i32.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
i32 %2)
@ -241,39 +241,39 @@ entry:
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.i32.nxv64i1(
declare i32 @llvm.riscv.vcpop.i32.nxv64i1(
<vscale x 64 x i1>,
i32);
define i32 @intrinsic_vpopc_m_i32_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv64i1:
define i32 @intrinsic_vcpop_m_i32_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vpopc.i32.nxv64i1(
%a = call i32 @llvm.riscv.vcpop.i32.nxv64i1(
<vscale x 64 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.mask.i32.nxv64i1(
declare i32 @llvm.riscv.vcpop.mask.i32.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
i32);
define i32 @intrinsic_vpopc_mask_m_i32_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv64i1:
define i32 @intrinsic_vcpop_mask_m_i32_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a0, v9, v0.t
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vpopc.mask.i32.nxv64i1(
%a = call i32 @llvm.riscv.vcpop.mask.i32.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
i32 %2)

View File

@ -1,39 +1,39 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare i64 @llvm.riscv.vpopc.i64.nxv1i1(
declare i64 @llvm.riscv.vcpop.i64.nxv1i1(
<vscale x 1 x i1>,
i64);
define i64 @intrinsic_vpopc_m_i64_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv1i1:
define i64 @intrinsic_vcpop_m_i64_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vpopc.i64.nxv1i1(
%a = call i64 @llvm.riscv.vcpop.i64.nxv1i1(
<vscale x 1 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.mask.i64.nxv1i1(
declare i64 @llvm.riscv.vcpop.mask.i64.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i64);
define i64 @intrinsic_vpopc_mask_m_i64_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv1i1:
define i64 @intrinsic_vcpop_mask_m_i64_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a0, v9, v0.t
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vpopc.mask.i64.nxv1i1(
%a = call i64 @llvm.riscv.vcpop.mask.i64.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i64 %2)
@ -41,39 +41,39 @@ entry:
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.i64.nxv2i1(
declare i64 @llvm.riscv.vcpop.i64.nxv2i1(
<vscale x 2 x i1>,
i64);
define i64 @intrinsic_vpopc_m_i64_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv2i1:
define i64 @intrinsic_vcpop_m_i64_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vpopc.i64.nxv2i1(
%a = call i64 @llvm.riscv.vcpop.i64.nxv2i1(
<vscale x 2 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.mask.i64.nxv2i1(
declare i64 @llvm.riscv.vcpop.mask.i64.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i64);
define i64 @intrinsic_vpopc_mask_m_i64_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv2i1:
define i64 @intrinsic_vcpop_mask_m_i64_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a0, v9, v0.t
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vpopc.mask.i64.nxv2i1(
%a = call i64 @llvm.riscv.vcpop.mask.i64.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i64 %2)
@ -81,39 +81,39 @@ entry:
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.i64.nxv4i1(
declare i64 @llvm.riscv.vcpop.i64.nxv4i1(
<vscale x 4 x i1>,
i64);
define i64 @intrinsic_vpopc_m_i64_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv4i1:
define i64 @intrinsic_vcpop_m_i64_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vpopc.i64.nxv4i1(
%a = call i64 @llvm.riscv.vcpop.i64.nxv4i1(
<vscale x 4 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.mask.i64.nxv4i1(
declare i64 @llvm.riscv.vcpop.mask.i64.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i64);
define i64 @intrinsic_vpopc_mask_m_i64_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv4i1:
define i64 @intrinsic_vcpop_mask_m_i64_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a0, v9, v0.t
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vpopc.mask.i64.nxv4i1(
%a = call i64 @llvm.riscv.vcpop.mask.i64.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i64 %2)
@ -121,39 +121,39 @@ entry:
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.i64.nxv8i1(
declare i64 @llvm.riscv.vcpop.i64.nxv8i1(
<vscale x 8 x i1>,
i64);
define i64 @intrinsic_vpopc_m_i64_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv8i1:
define i64 @intrinsic_vcpop_m_i64_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vpopc.i64.nxv8i1(
%a = call i64 @llvm.riscv.vcpop.i64.nxv8i1(
<vscale x 8 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.mask.i64.nxv8i1(
declare i64 @llvm.riscv.vcpop.mask.i64.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i64);
define i64 @intrinsic_vpopc_mask_m_i64_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv8i1:
define i64 @intrinsic_vcpop_mask_m_i64_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a0, v9, v0.t
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vpopc.mask.i64.nxv8i1(
%a = call i64 @llvm.riscv.vcpop.mask.i64.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i64 %2)
@ -161,39 +161,39 @@ entry:
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.i64.nxv16i1(
declare i64 @llvm.riscv.vcpop.i64.nxv16i1(
<vscale x 16 x i1>,
i64);
define i64 @intrinsic_vpopc_m_i64_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv16i1:
define i64 @intrinsic_vcpop_m_i64_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vpopc.i64.nxv16i1(
%a = call i64 @llvm.riscv.vcpop.i64.nxv16i1(
<vscale x 16 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.mask.i64.nxv16i1(
declare i64 @llvm.riscv.vcpop.mask.i64.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i64);
define i64 @intrinsic_vpopc_mask_m_i64_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv16i1:
define i64 @intrinsic_vcpop_mask_m_i64_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a0, v9, v0.t
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vpopc.mask.i64.nxv16i1(
%a = call i64 @llvm.riscv.vcpop.mask.i64.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i64 %2)
@ -201,39 +201,39 @@ entry:
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.i64.nxv32i1(
declare i64 @llvm.riscv.vcpop.i64.nxv32i1(
<vscale x 32 x i1>,
i64);
define i64 @intrinsic_vpopc_m_i64_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv32i1:
define i64 @intrinsic_vcpop_m_i64_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vpopc.i64.nxv32i1(
%a = call i64 @llvm.riscv.vcpop.i64.nxv32i1(
<vscale x 32 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.mask.i64.nxv32i1(
declare i64 @llvm.riscv.vcpop.mask.i64.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i64);
define i64 @intrinsic_vpopc_mask_m_i64_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv32i1:
define i64 @intrinsic_vcpop_mask_m_i64_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a0, v9, v0.t
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vpopc.mask.i64.nxv32i1(
%a = call i64 @llvm.riscv.vcpop.mask.i64.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
i64 %2)
@ -241,39 +241,39 @@ entry:
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.i64.nxv64i1(
declare i64 @llvm.riscv.vcpop.i64.nxv64i1(
<vscale x 64 x i1>,
i64);
define i64 @intrinsic_vpopc_m_i64_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv64i1:
define i64 @intrinsic_vcpop_m_i64_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vpopc.i64.nxv64i1(
%a = call i64 @llvm.riscv.vcpop.i64.nxv64i1(
<vscale x 64 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.mask.i64.nxv64i1(
declare i64 @llvm.riscv.vcpop.mask.i64.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
i64);
define i64 @intrinsic_vpopc_mask_m_i64_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv64i1:
define i64 @intrinsic_vcpop_mask_m_i64_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a0, v9, v0.t
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vpopc.mask.i64.nxv64i1(
%a = call i64 @llvm.riscv.vcpop.mask.i64.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
i64 %2)

View File

@ -0,0 +1,142 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmandn_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandn_mm_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmandn.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmandn_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandn_mm_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmandn.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmandn.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmandn_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandn_mm_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmandn.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmandn.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i1> @intrinsic_vmandn_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandn_mm_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmandn.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i32 %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmandn.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i1> @intrinsic_vmandn_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandn_mm_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmandn.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i32 %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmandn.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i1> @intrinsic_vmandn_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandn_mm_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmandn.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
i32 %2)
ret <vscale x 32 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmandn.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
i32);
define <vscale x 64 x i1> @intrinsic_vmandn_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandn_mm_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i1> @llvm.riscv.vmandn.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
i32 %2)
ret <vscale x 64 x i1> %a
}

View File

@ -0,0 +1,142 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x i1> @intrinsic_vmandn_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandn_mm_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i64 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmandn.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x i1> @intrinsic_vmandn_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandn_mm_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmandn.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i64 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmandn.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x i1> @intrinsic_vmandn_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandn_mm_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmandn.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i64 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmandn.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x i1> @intrinsic_vmandn_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandn_mm_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmandn.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i64 %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmandn.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i64);
define <vscale x 16 x i1> @intrinsic_vmandn_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandn_mm_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmandn.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i64 %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmandn.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i64);
define <vscale x 32 x i1> @intrinsic_vmandn_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandn_mm_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmandn.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
i64 %2)
ret <vscale x 32 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmandn.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
i64);
define <vscale x 64 x i1> @intrinsic_vmandn_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandn_mm_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i1> @llvm.riscv.vmandn.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
i64 %2)
ret <vscale x 64 x i1> %a
}

View File

@ -1,142 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmandnot_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandnot_mm_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmandnot_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandnot_mm_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmandnot_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandnot_mm_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i1> @intrinsic_vmandnot_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandnot_mm_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i32 %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i1> @intrinsic_vmandnot_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandnot_mm_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i32 %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i1> @intrinsic_vmandnot_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandnot_mm_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
i32 %2)
ret <vscale x 32 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
i32);
define <vscale x 64 x i1> @intrinsic_vmandnot_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandnot_mm_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
i32 %2)
ret <vscale x 64 x i1> %a
}

View File

@ -1,142 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x i1> @intrinsic_vmandnot_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandnot_mm_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i64 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x i1> @intrinsic_vmandnot_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandnot_mm_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i64 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x i1> @intrinsic_vmandnot_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandnot_mm_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i64 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x i1> @intrinsic_vmandnot_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandnot_mm_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i64 %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i64);
define <vscale x 16 x i1> @intrinsic_vmandnot_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandnot_mm_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i64 %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i64);
define <vscale x 32 x i1> @intrinsic_vmandnot_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandnot_mm_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
i64 %2)
ret <vscale x 32 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
i64);
define <vscale x 64 x i1> @intrinsic_vmandnot_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmandnot_mm_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
i64 %2)
ret <vscale x 64 x i1> %a
}

View File

@ -347,11 +347,11 @@ define <vscale x 16 x i1> @vmxnor_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 1
ret <vscale x 16 x i1> %not
}
define <vscale x 1 x i1> @vmandnot_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
; CHECK-LABEL: vmandnot_vv_nxv1i1:
define <vscale x 1 x i1> @vmandn_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
; CHECK-LABEL: vmandn_vv_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
@ -360,11 +360,11 @@ define <vscale x 1 x i1> @vmandnot_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1
ret <vscale x 1 x i1> %vc
}
define <vscale x 2 x i1> @vmandnot_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
; CHECK-LABEL: vmandnot_vv_nxv2i1:
define <vscale x 2 x i1> @vmandn_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
; CHECK-LABEL: vmandn_vv_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
@ -373,11 +373,11 @@ define <vscale x 2 x i1> @vmandnot_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2
ret <vscale x 2 x i1> %vc
}
define <vscale x 4 x i1> @vmandnot_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
; CHECK-LABEL: vmandnot_vv_nxv4i1:
define <vscale x 4 x i1> @vmandn_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
; CHECK-LABEL: vmandn_vv_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
@ -386,11 +386,11 @@ define <vscale x 4 x i1> @vmandnot_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4
ret <vscale x 4 x i1> %vc
}
define <vscale x 8 x i1> @vmandnot_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
; CHECK-LABEL: vmandnot_vv_nxv8i1:
define <vscale x 8 x i1> @vmandn_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
; CHECK-LABEL: vmandn_vv_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
@ -399,11 +399,11 @@ define <vscale x 8 x i1> @vmandnot_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8
ret <vscale x 8 x i1> %vc
}
define <vscale x 16 x i1> @vmandnot_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
; CHECK-LABEL: vmandnot_vv_nxv16i1:
define <vscale x 16 x i1> @vmandn_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
; CHECK-LABEL: vmandn_vv_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
@ -412,11 +412,11 @@ define <vscale x 16 x i1> @vmandnot_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x
ret <vscale x 16 x i1> %vc
}
define <vscale x 1 x i1> @vmornot_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
; CHECK-LABEL: vmornot_vv_nxv1i1:
define <vscale x 1 x i1> @vmorn_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
; CHECK-LABEL: vmorn_vv_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
@ -425,11 +425,11 @@ define <vscale x 1 x i1> @vmornot_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x
ret <vscale x 1 x i1> %vc
}
define <vscale x 2 x i1> @vmornot_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
; CHECK-LABEL: vmornot_vv_nxv2i1:
define <vscale x 2 x i1> @vmorn_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
; CHECK-LABEL: vmorn_vv_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
@ -438,11 +438,11 @@ define <vscale x 2 x i1> @vmornot_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x
ret <vscale x 2 x i1> %vc
}
define <vscale x 4 x i1> @vmornot_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
; CHECK-LABEL: vmornot_vv_nxv4i1:
define <vscale x 4 x i1> @vmorn_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
; CHECK-LABEL: vmorn_vv_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
@ -451,11 +451,11 @@ define <vscale x 4 x i1> @vmornot_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x
ret <vscale x 4 x i1> %vc
}
define <vscale x 8 x i1> @vmornot_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
; CHECK-LABEL: vmornot_vv_nxv8i1:
define <vscale x 8 x i1> @vmorn_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
; CHECK-LABEL: vmorn_vv_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
@ -464,11 +464,11 @@ define <vscale x 8 x i1> @vmornot_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x
ret <vscale x 8 x i1> %vc
}
define <vscale x 16 x i1> @vmornot_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
; CHECK-LABEL: vmornot_vv_nxv16i1:
define <vscale x 16 x i1> @vmorn_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
; CHECK-LABEL: vmorn_vv_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer

View File

@ -0,0 +1,142 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmorn_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmorn_mm_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmorn.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmorn_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmorn_mm_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmorn.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmorn.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmorn_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmorn_mm_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmorn.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmorn.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i1> @intrinsic_vmorn_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmorn_mm_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmorn.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i32 %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmorn.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i1> @intrinsic_vmorn_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmorn_mm_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmorn.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i32 %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmorn.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i1> @intrinsic_vmorn_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmorn_mm_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmorn.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
i32 %2)
ret <vscale x 32 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmorn.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
i32);
define <vscale x 64 x i1> @intrinsic_vmorn_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmorn_mm_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i1> @llvm.riscv.vmorn.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
i32 %2)
ret <vscale x 64 x i1> %a
}

View File

@ -0,0 +1,142 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x i1> @intrinsic_vmorn_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmorn_mm_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i64 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmorn.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x i1> @intrinsic_vmorn_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmorn_mm_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmorn.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i64 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmorn.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x i1> @intrinsic_vmorn_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmorn_mm_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmorn.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i64 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmorn.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x i1> @intrinsic_vmorn_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmorn_mm_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmorn.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i64 %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmorn.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i64);
define <vscale x 16 x i1> @intrinsic_vmorn_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmorn_mm_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmorn.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i64 %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmorn.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i64);
define <vscale x 32 x i1> @intrinsic_vmorn_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmorn_mm_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmorn.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
i64 %2)
ret <vscale x 32 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmorn.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
i64);
define <vscale x 64 x i1> @intrinsic_vmorn_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmorn_mm_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i1> @llvm.riscv.vmorn.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
i64 %2)
ret <vscale x 64 x i1> %a
}

View File

@ -1,142 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmornot_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmornot_mm_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmornot_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmornot_mm_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmornot_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmornot_mm_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i1> @intrinsic_vmornot_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmornot_mm_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i32 %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i1> @intrinsic_vmornot_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmornot_mm_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i32 %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i1> @intrinsic_vmornot_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmornot_mm_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
i32 %2)
ret <vscale x 32 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
i32);
define <vscale x 64 x i1> @intrinsic_vmornot_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmornot_mm_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
i32 %2)
ret <vscale x 64 x i1> %a
}

View File

@ -1,142 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x i1> @intrinsic_vmornot_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmornot_mm_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i64 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x i1> @intrinsic_vmornot_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmornot_mm_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i64 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x i1> @intrinsic_vmornot_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmornot_mm_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i64 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x i1> @intrinsic_vmornot_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmornot_mm_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i64 %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i64);
define <vscale x 16 x i1> @intrinsic_vmornot_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmornot_mm_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i64 %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i64);
define <vscale x 32 x i1> @intrinsic_vmornot_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmornot_mm_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
i64 %2)
ret <vscale x 32 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
i64);
define <vscale x 64 x i1> @intrinsic_vmornot_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmornot_mm_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vmornot.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
i64 %2)
ret <vscale x 64 x i1> %a
}

View File

@ -2470,7 +2470,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8(<vscale x
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
@ -2488,7 +2488,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8(<vscale x
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
@ -2506,7 +2506,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8(<vscale x
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
@ -2524,7 +2524,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8(<vscale x
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
@ -2542,7 +2542,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v10
; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
@ -2560,7 +2560,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v12
; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
@ -2578,7 +2578,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
@ -2596,7 +2596,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
@ -2614,7 +2614,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
@ -2632,7 +2632,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v10
; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
@ -2650,7 +2650,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16(<vscal
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v12
; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
@ -2668,7 +2668,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
@ -2686,7 +2686,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
@ -2704,7 +2704,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v10
; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
@ -2722,7 +2722,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v12
; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(

View File

@ -2437,7 +2437,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8(<vscale x
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
@ -2455,7 +2455,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8(<vscale x
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
@ -2473,7 +2473,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8(<vscale x
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
@ -2491,7 +2491,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8(<vscale x
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
@ -2509,7 +2509,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v10
; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
@ -2527,7 +2527,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v12
; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
@ -2545,7 +2545,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
@ -2563,7 +2563,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
@ -2581,7 +2581,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
@ -2599,7 +2599,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v10
; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
@ -2617,7 +2617,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16(<vscal
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v12
; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
@ -2635,7 +2635,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
@ -2653,7 +2653,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
@ -2671,7 +2671,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v10
; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
@ -2689,7 +2689,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v12
; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
@ -2707,7 +2707,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
@ -2725,7 +2725,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v10
; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
@ -2743,7 +2743,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v12
; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(

View File

@ -2470,7 +2470,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8(<vscale x
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
@ -2488,7 +2488,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8(<vscale x
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
@ -2506,7 +2506,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8(<vscale x
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
@ -2524,7 +2524,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8(<vscale x
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
@ -2542,7 +2542,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v10
; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
@ -2560,7 +2560,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v12
; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
@ -2578,7 +2578,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
@ -2596,7 +2596,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
@ -2614,7 +2614,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
@ -2632,7 +2632,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v10
; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
@ -2650,7 +2650,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16(<vsca
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v12
; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
@ -2668,7 +2668,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
@ -2686,7 +2686,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
@ -2704,7 +2704,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v10
; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
@ -2722,7 +2722,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v12
; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(

View File

@ -2437,7 +2437,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8(<vscale x
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
@ -2455,7 +2455,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8(<vscale x
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
@ -2473,7 +2473,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8(<vscale x
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
@ -2491,7 +2491,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8(<vscale x
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
@ -2509,7 +2509,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v10
; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
@ -2527,7 +2527,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v12
; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
@ -2545,7 +2545,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
@ -2563,7 +2563,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
@ -2581,7 +2581,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
@ -2599,7 +2599,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v10
; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
@ -2617,7 +2617,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16(<vsca
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v12
; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
@ -2635,7 +2635,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
@ -2653,7 +2653,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
@ -2671,7 +2671,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v10
; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
@ -2689,7 +2689,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v12
; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
@ -2707,7 +2707,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v8
; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
@ -2725,7 +2725,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v10
; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
@ -2743,7 +2743,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64(<vscale
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
; CHECK-NEXT: vmandnot.mm v0, v0, v12
; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(

View File

@ -10,7 +10,7 @@ define signext i1 @vpreduce_and_nxv1i1(i1 signext %s, <vscale x 1 x i1> %v, <vsc
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
@ -27,7 +27,7 @@ define signext i1 @vpreduce_or_nxv1i1(i1 signext %s, <vscale x 1 x i1> %v, <vsca
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
@ -45,7 +45,7 @@ define signext i1 @vpreduce_xor_nxv1i1(i1 signext %s, <vscale x 1 x i1> %v, <vsc
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
@ -62,7 +62,7 @@ define signext i1 @vpreduce_and_nxv2i1(i1 signext %s, <vscale x 2 x i1> %v, <vsc
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
@ -79,7 +79,7 @@ define signext i1 @vpreduce_or_nxv2i1(i1 signext %s, <vscale x 2 x i1> %v, <vsca
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
@ -97,7 +97,7 @@ define signext i1 @vpreduce_xor_nxv2i1(i1 signext %s, <vscale x 2 x i1> %v, <vsc
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
@ -114,7 +114,7 @@ define signext i1 @vpreduce_and_nxv4i1(i1 signext %s, <vscale x 4 x i1> %v, <vsc
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
@ -131,7 +131,7 @@ define signext i1 @vpreduce_or_nxv4i1(i1 signext %s, <vscale x 4 x i1> %v, <vsca
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
@ -149,7 +149,7 @@ define signext i1 @vpreduce_xor_nxv4i1(i1 signext %s, <vscale x 4 x i1> %v, <vsc
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
@ -166,7 +166,7 @@ define signext i1 @vpreduce_and_nxv8i1(i1 signext %s, <vscale x 8 x i1> %v, <vsc
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
@ -183,7 +183,7 @@ define signext i1 @vpreduce_or_nxv8i1(i1 signext %s, <vscale x 8 x i1> %v, <vsca
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
@ -201,7 +201,7 @@ define signext i1 @vpreduce_xor_nxv8i1(i1 signext %s, <vscale x 8 x i1> %v, <vsc
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
@ -218,7 +218,7 @@ define signext i1 @vpreduce_and_nxv16i1(i1 signext %s, <vscale x 16 x i1> %v, <v
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
@ -235,7 +235,7 @@ define signext i1 @vpreduce_or_nxv16i1(i1 signext %s, <vscale x 16 x i1> %v, <vs
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
@ -253,7 +253,7 @@ define signext i1 @vpreduce_xor_nxv16i1(i1 signext %s, <vscale x 16 x i1> %v, <v
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
@ -270,7 +270,7 @@ define signext i1 @vpreduce_and_nxv32i1(i1 signext %s, <vscale x 32 x i1> %v, <v
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
@ -287,7 +287,7 @@ define signext i1 @vpreduce_or_nxv32i1(i1 signext %s, <vscale x 32 x i1> %v, <vs
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
@ -305,7 +305,7 @@ define signext i1 @vpreduce_xor_nxv32i1(i1 signext %s, <vscale x 32 x i1> %v, <v
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
@ -322,7 +322,7 @@ define signext i1 @vpreduce_and_nxv64i1(i1 signext %s, <vscale x 64 x i1> %v, <v
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
@ -339,7 +339,7 @@ define signext i1 @vpreduce_or_nxv64i1(i1 signext %s, <vscale x 64 x i1> %v, <vs
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
@ -357,7 +357,7 @@ define signext i1 @vpreduce_xor_nxv64i1(i1 signext %s, <vscale x 64 x i1> %v, <v
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vpopc.m a1, v9, v0.t
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0

View File

@ -8,7 +8,7 @@ define signext i1 @vreduce_or_nxv1i1(<vscale x 1 x i1> %v) {
; CHECK-LABEL: vreduce_or_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -22,7 +22,7 @@ define signext i1 @vreduce_xor_nxv1i1(<vscale x 1 x i1> %v) {
; CHECK-LABEL: vreduce_xor_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -37,7 +37,7 @@ define signext i1 @vreduce_and_nxv1i1(<vscale x 1 x i1> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
; CHECK-NEXT: vpopc.m a0, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -51,7 +51,7 @@ define signext i1 @vreduce_or_nxv2i1(<vscale x 2 x i1> %v) {
; CHECK-LABEL: vreduce_or_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -65,7 +65,7 @@ define signext i1 @vreduce_xor_nxv2i1(<vscale x 2 x i1> %v) {
; CHECK-LABEL: vreduce_xor_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -80,7 +80,7 @@ define signext i1 @vreduce_and_nxv2i1(<vscale x 2 x i1> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
; CHECK-NEXT: vpopc.m a0, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -94,7 +94,7 @@ define signext i1 @vreduce_or_nxv4i1(<vscale x 4 x i1> %v) {
; CHECK-LABEL: vreduce_or_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -108,7 +108,7 @@ define signext i1 @vreduce_xor_nxv4i1(<vscale x 4 x i1> %v) {
; CHECK-LABEL: vreduce_xor_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -123,7 +123,7 @@ define signext i1 @vreduce_and_nxv4i1(<vscale x 4 x i1> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
; CHECK-NEXT: vpopc.m a0, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -137,7 +137,7 @@ define signext i1 @vreduce_or_nxv8i1(<vscale x 8 x i1> %v) {
; CHECK-LABEL: vreduce_or_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -151,7 +151,7 @@ define signext i1 @vreduce_xor_nxv8i1(<vscale x 8 x i1> %v) {
; CHECK-LABEL: vreduce_xor_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -166,7 +166,7 @@ define signext i1 @vreduce_and_nxv8i1(<vscale x 8 x i1> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
; CHECK-NEXT: vpopc.m a0, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -180,7 +180,7 @@ define signext i1 @vreduce_or_nxv16i1(<vscale x 16 x i1> %v) {
; CHECK-LABEL: vreduce_or_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -194,7 +194,7 @@ define signext i1 @vreduce_xor_nxv16i1(<vscale x 16 x i1> %v) {
; CHECK-LABEL: vreduce_xor_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -209,7 +209,7 @@ define signext i1 @vreduce_and_nxv16i1(<vscale x 16 x i1> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
; CHECK-NEXT: vpopc.m a0, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -223,7 +223,7 @@ define signext i1 @vreduce_or_nxv32i1(<vscale x 32 x i1> %v) {
; CHECK-LABEL: vreduce_or_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -237,7 +237,7 @@ define signext i1 @vreduce_xor_nxv32i1(<vscale x 32 x i1> %v) {
; CHECK-LABEL: vreduce_xor_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -252,7 +252,7 @@ define signext i1 @vreduce_and_nxv32i1(<vscale x 32 x i1> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
; CHECK-NEXT: vpopc.m a0, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -266,7 +266,7 @@ define signext i1 @vreduce_or_nxv64i1(<vscale x 64 x i1> %v) {
; CHECK-LABEL: vreduce_or_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -280,7 +280,7 @@ define signext i1 @vreduce_xor_nxv64i1(<vscale x 64 x i1> %v) {
; CHECK-LABEL: vreduce_xor_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vpopc.m a0, v0
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
@ -295,7 +295,7 @@ define signext i1 @vreduce_and_nxv64i1(<vscale x 64 x i1> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
; CHECK-NEXT: vpopc.m a0, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret

View File

@ -6,7 +6,7 @@ define <vscale x 1 x i1> @vselect_nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1>
; CHECK-LABEL: vselect_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -18,7 +18,7 @@ define <vscale x 2 x i1> @vselect_nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1>
; CHECK-LABEL: vselect_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -30,7 +30,7 @@ define <vscale x 4 x i1> @vselect_nxv4i1(<vscale x 4 x i1> %a, <vscale x 4 x i1>
; CHECK-LABEL: vselect_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -42,7 +42,7 @@ define <vscale x 8 x i1> @vselect_nxv8i1(<vscale x 8 x i1> %a, <vscale x 8 x i1>
; CHECK-LABEL: vselect_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -54,7 +54,7 @@ define <vscale x 16 x i1> @vselect_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x
; CHECK-LABEL: vselect_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -66,7 +66,7 @@ define <vscale x 32 x i1> @vselect_nxv32i1(<vscale x 32 x i1> %a, <vscale x 32 x
; CHECK-LABEL: vselect_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
@ -78,7 +78,7 @@ define <vscale x 64 x i1> @vselect_nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x
; CHECK-LABEL: vselect_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vmandnot.mm v8, v8, v9
; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret

View File

@ -91,7 +91,7 @@
ret <vscale x 1 x i64> %d
}
define void @vsetvli_vpopc() {
define void @vsetvli_vcpop() {
ret void
}
@ -442,7 +442,7 @@ body: |
...
---
name: vsetvli_vpopc
name: vsetvli_vcpop
tracksRegLiveness: true
registers:
- { id: 0, class: gpr, preferred-register: '' }
@ -458,7 +458,7 @@ registers:
- { id: 10, class: gpr, preferred-register: '' }
- { id: 11, class: vr, preferred-register: '' }
body: |
; CHECK-LABEL: name: vsetvli_vpopc
; CHECK-LABEL: name: vsetvli_vcpop
; CHECK: bb.0:
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: liveins: $x10, $x11
@ -479,9 +479,9 @@ body: |
; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 23, implicit-def $vl, implicit-def $vtype, implicit $vl
; CHECK-NEXT: [[PseudoVLE32_V_MF2_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_MF2_MASK [[PseudoVMV_V_I_MF2_]], killed [[COPY]], $v0, -1, 5, 0, implicit $vl, implicit $vtype
; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 69, implicit-def $vl, implicit-def $vtype, implicit $vl
; CHECK-NEXT: [[PseudoVPOPC_M_B1_:%[0-9]+]]:gpr = PseudoVPOPC_M_B1 [[PseudoVMSEQ_VI_MF2_]], -1, 0, implicit $vl, implicit $vtype
; CHECK-NEXT: [[PseudoVCPOP_M_B1_:%[0-9]+]]:gpr = PseudoVCPOP_M_B1 [[PseudoVMSEQ_VI_MF2_]], -1, 0, implicit $vl, implicit $vtype
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0
; CHECK-NEXT: BEQ killed [[PseudoVPOPC_M_B1_]], [[COPY2]], %bb.3
; CHECK-NEXT: BEQ killed [[PseudoVCPOP_M_B1_]], [[COPY2]], %bb.3
; CHECK-NEXT: PseudoBR %bb.2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
@ -511,7 +511,7 @@ body: |
%5:vmv0 = PseudoVMSEQ_VI_MF2 killed %3, 0, -1, 5
$v0 = COPY %5
%6:vrnov0 = PseudoVLE32_V_MF2_MASK %4, killed %0, $v0, -1, 5, 0
%7:gpr = PseudoVPOPC_M_B1 %5, -1, 0
%7:gpr = PseudoVCPOP_M_B1 %5, -1, 0
%8:gpr = COPY $x0
BEQ killed %7, %8, %bb.3
PseudoBR %bb.2

View File

@ -90,3 +90,12 @@ vfredsum.vs v8, v4, v20, v0.t
# ALIAS: vfwredusum.vs v8, v4, v20, v0.t # encoding: [0x57,0x14,0x4a,0xc4]
# NO-ALIAS: vfwredusum.vs v8, v4, v20, v0.t # encoding: [0x57,0x14,0x4a,0xc4]
vfwredsum.vs v8, v4, v20, v0.t
# ALIAS: vcpop.m a2, v4, v0.t # encoding: [0x57,0x26,0x48,0x40]
# NO-ALIAS: vcpop.m a2, v4, v0.t # encoding: [0x57,0x26,0x48,0x40]
vpopc.m a2, v4, v0.t
# ALIAS: vmandn.mm v8, v4, v20 # encoding: [0x57,0x24,0x4a,0x62]
# NO-ALIAS: vmandn.mm v8, v4, v20 # encoding: [0x57,0x24,0x4a,0x62]
vmandnot.mm v8, v4, v20
# ALIAS: vmorn.mm v8, v4, v20 # encoding: [0x57,0x24,0x4a,0x72]
# NO-ALIAS: vmorn.mm v8, v4, v20 # encoding: [0x57,0x24,0x4a,0x72]
vmornot.mm v8, v4, v20

View File

@ -421,7 +421,7 @@ vmsge.vx v8, v4, a0, v0.t
vmsgeu.vx v0, v4, a0, v0.t, v2
# CHECK-INST: vmsltu.vx v2, v4, a0, v0.t
# CHECK-INST: vmandnot.mm v0, v0, v2
# CHECK-INST: vmandn.mm v0, v0, v2
# CHECK-ENCODING: [0x57,0x41,0x45,0x68]
# CHECK-ENCODING: [0x57,0x20,0x01,0x62]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
@ -430,7 +430,7 @@ vmsgeu.vx v0, v4, a0, v0.t, v2
vmsge.vx v0, v4, a0, v0.t, v2
# CHECK-INST: vmslt.vx v2, v4, a0, v0.t
# CHECK-INST: vmandnot.mm v0, v0, v2
# CHECK-INST: vmandn.mm v0, v0, v2
# CHECK-ENCODING: [0x57,0x41,0x45,0x6c]
# CHECK-ENCODING: [0x57,0x20,0x01,0x62]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
@ -439,8 +439,8 @@ vmsge.vx v0, v4, a0, v0.t, v2
vmsgeu.vx v9, v4, a0, v0.t, v2
# CHECK-INST: vmsltu.vx v2, v4, a0
# CHECK-INST: vmandnot.mm v2, v0, v2
# CHECK-INST: vmandnot.mm v9, v9, v0
# CHECK-INST: vmandn.mm v2, v0, v2
# CHECK-INST: vmandn.mm v9, v9, v0
# CHECK-INST: vmor.mm v9, v2, v9
# CHECK-ENCODING: [0x57,0x41,0x45,0x6a]
# CHECK-ENCODING: [0x57,0x21,0x01,0x62]
@ -454,8 +454,8 @@ vmsgeu.vx v9, v4, a0, v0.t, v2
vmsge.vx v8, v4, a0, v0.t, v2
# CHECK-INST: vmslt.vx v2, v4, a0
# CHECK-INST: vmandnot.mm v2, v0, v2
# CHECK-INST: vmandnot.mm v8, v8, v0
# CHECK-INST: vmandn.mm v2, v0, v2
# CHECK-INST: vmandn.mm v8, v8, v0
# CHECK-INST: vmor.mm v8, v2, v8
# CHECK-ENCODING: [0x57,0x41,0x45,0x6e]
# CHECK-ENCODING: [0x57,0x21,0x01,0x62]

View File

@ -20,8 +20,8 @@ vmnand.mm v8, v4, v20
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 57 24 4a 76 <unknown>
vmandnot.mm v8, v4, v20
# CHECK-INST: vmandnot.mm v8, v4, v20
vmandn.mm v8, v4, v20
# CHECK-INST: vmandn.mm v8, v4, v20
# CHECK-ENCODING: [0x57,0x24,0x4a,0x62]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 57 24 4a 62 <unknown>
@ -44,8 +44,8 @@ vmnor.mm v8, v4, v20
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 57 24 4a 7a <unknown>
vmornot.mm v8, v4, v20
# CHECK-INST: vmornot.mm v8, v4, v20
vmorn.mm v8, v4, v20
# CHECK-INST: vmorn.mm v8, v4, v20
# CHECK-ENCODING: [0x57,0x24,0x4a,0x72]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 57 24 4a 72 <unknown>
@ -56,14 +56,14 @@ vmxnor.mm v8, v4, v20
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 57 24 4a 7e <unknown>
vpopc.m a2, v4, v0.t
# CHECK-INST: vpopc.m a2, v4, v0.t
vcpop.m a2, v4, v0.t
# CHECK-INST: vcpop.m a2, v4, v0.t
# CHECK-ENCODING: [0x57,0x26,0x48,0x40]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 57 26 48 40 <unknown>
vpopc.m a2, v4
# CHECK-INST: vpopc.m a2, v4
vcpop.m a2, v4
# CHECK-INST: vcpop.m a2, v4
# CHECK-ENCODING: [0x57,0x26,0x48,0x42]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 57 26 48 42 <unknown>