[LegalizeTypes][VP] Add promotion support for binary VP ops

This patch extends the preliminary support for vector-predicated (VP)
operation legalization to include promotion of illegal integer vector
types.

Integer promotion of binary VP operations is relatively simple and
piggy-backs on the non-VP logic, but passing the two extra mask and VP
operands through to the promoted operation.

Tests have been added to the RISC-V target to cover the basic scenarios
for integer promotion for both fixed- and scalable-vector types.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D108288
This commit is contained in:
Fraser Cormack 2021-08-18 11:50:55 +01:00
parent 8872c9d1ca
commit 2c5568a6a9
28 changed files with 532 additions and 72 deletions

View File

@ -81,15 +81,23 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::STRICT_FSETCCS: case ISD::STRICT_FSETCCS:
case ISD::SETCC: Res = PromoteIntRes_SETCC(N); break; case ISD::SETCC: Res = PromoteIntRes_SETCC(N); break;
case ISD::SMIN: case ISD::SMIN:
case ISD::SMAX: Res = PromoteIntRes_SExtIntBinOp(N); break; case ISD::SMAX:
Res = PromoteIntRes_SExtIntBinOp(N, /*IsVP*/ false);
break;
case ISD::UMIN: case ISD::UMIN:
case ISD::UMAX: Res = PromoteIntRes_UMINUMAX(N); break; case ISD::UMAX: Res = PromoteIntRes_UMINUMAX(N); break;
case ISD::SHL: Res = PromoteIntRes_SHL(N); break; case ISD::SHL:
Res = PromoteIntRes_SHL(N, /*IsVP*/ false);
break;
case ISD::SIGN_EXTEND_INREG: case ISD::SIGN_EXTEND_INREG:
Res = PromoteIntRes_SIGN_EXTEND_INREG(N); break; Res = PromoteIntRes_SIGN_EXTEND_INREG(N); break;
case ISD::SRA: Res = PromoteIntRes_SRA(N); break; case ISD::SRA:
case ISD::SRL: Res = PromoteIntRes_SRL(N); break; Res = PromoteIntRes_SRA(N, /*IsVP*/ false);
break;
case ISD::SRL:
Res = PromoteIntRes_SRL(N, /*IsVP*/ false);
break;
case ISD::TRUNCATE: Res = PromoteIntRes_TRUNCATE(N); break; case ISD::TRUNCATE: Res = PromoteIntRes_TRUNCATE(N); break;
case ISD::UNDEF: Res = PromoteIntRes_UNDEF(N); break; case ISD::UNDEF: Res = PromoteIntRes_UNDEF(N); break;
case ISD::VAARG: Res = PromoteIntRes_VAARG(N); break; case ISD::VAARG: Res = PromoteIntRes_VAARG(N); break;
@ -144,13 +152,19 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::XOR: case ISD::XOR:
case ISD::ADD: case ISD::ADD:
case ISD::SUB: case ISD::SUB:
case ISD::MUL: Res = PromoteIntRes_SimpleIntBinOp(N); break; case ISD::MUL:
Res = PromoteIntRes_SimpleIntBinOp(N, /*IsVP*/ false);
break;
case ISD::SDIV: case ISD::SDIV:
case ISD::SREM: Res = PromoteIntRes_SExtIntBinOp(N); break; case ISD::SREM:
Res = PromoteIntRes_SExtIntBinOp(N, /*IsVP*/ false);
break;
case ISD::UDIV: case ISD::UDIV:
case ISD::UREM: Res = PromoteIntRes_ZExtIntBinOp(N); break; case ISD::UREM:
Res = PromoteIntRes_ZExtIntBinOp(N, /*IsVP*/ false);
break;
case ISD::SADDO: case ISD::SADDO:
case ISD::SSUBO: Res = PromoteIntRes_SADDSUBO(N, ResNo); break; case ISD::SSUBO: Res = PromoteIntRes_SADDSUBO(N, ResNo); break;
@ -233,6 +247,32 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::FSHR: case ISD::FSHR:
Res = PromoteIntRes_FunnelShift(N); Res = PromoteIntRes_FunnelShift(N);
break; break;
case ISD::VP_AND:
case ISD::VP_OR:
case ISD::VP_XOR:
case ISD::VP_ADD:
case ISD::VP_SUB:
case ISD::VP_MUL:
Res = PromoteIntRes_SimpleIntBinOp(N, /*IsVP*/ true);
break;
case ISD::VP_SDIV:
case ISD::VP_SREM:
Res = PromoteIntRes_SExtIntBinOp(N, /*IsVP*/ true);
break;
case ISD::VP_UDIV:
case ISD::VP_UREM:
Res = PromoteIntRes_ZExtIntBinOp(N, /*IsVP*/ true);
break;
case ISD::VP_SHL:
Res = PromoteIntRes_SHL(N, /*IsVP*/ true);
break;
case ISD::VP_ASHR:
Res = PromoteIntRes_SRA(N, /*IsVP*/ true);
break;
case ISD::VP_LSHR:
Res = PromoteIntRes_SRL(N, /*IsVP*/ true);
break;
} }
// If the result is null then the sub-method took care of registering it. // If the result is null then the sub-method took care of registering it.
@ -1103,12 +1143,15 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SETCC(SDNode *N) {
return DAG.getSExtOrTrunc(SetCC, dl, NVT); return DAG.getSExtOrTrunc(SetCC, dl, NVT);
} }
SDValue DAGTypeLegalizer::PromoteIntRes_SHL(SDNode *N) { SDValue DAGTypeLegalizer::PromoteIntRes_SHL(SDNode *N, bool IsVP) {
SDValue LHS = GetPromotedInteger(N->getOperand(0)); SDValue LHS = GetPromotedInteger(N->getOperand(0));
SDValue RHS = N->getOperand(1); SDValue RHS = N->getOperand(1);
if (getTypeAction(RHS.getValueType()) == TargetLowering::TypePromoteInteger) if (getTypeAction(RHS.getValueType()) == TargetLowering::TypePromoteInteger)
RHS = ZExtPromotedInteger(RHS); RHS = ZExtPromotedInteger(RHS);
return DAG.getNode(ISD::SHL, SDLoc(N), LHS.getValueType(), LHS, RHS); if (!IsVP)
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS);
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS,
N->getOperand(2), N->getOperand(3));
} }
SDValue DAGTypeLegalizer::PromoteIntRes_SIGN_EXTEND_INREG(SDNode *N) { SDValue DAGTypeLegalizer::PromoteIntRes_SIGN_EXTEND_INREG(SDNode *N) {
@ -1117,30 +1160,36 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SIGN_EXTEND_INREG(SDNode *N) {
Op.getValueType(), Op, N->getOperand(1)); Op.getValueType(), Op, N->getOperand(1));
} }
SDValue DAGTypeLegalizer::PromoteIntRes_SimpleIntBinOp(SDNode *N) { SDValue DAGTypeLegalizer::PromoteIntRes_SimpleIntBinOp(SDNode *N, bool IsVP) {
// The input may have strange things in the top bits of the registers, but // The input may have strange things in the top bits of the registers, but
// these operations don't care. They may have weird bits going out, but // these operations don't care. They may have weird bits going out, but
// that too is okay if they are integer operations. // that too is okay if they are integer operations.
SDValue LHS = GetPromotedInteger(N->getOperand(0)); SDValue LHS = GetPromotedInteger(N->getOperand(0));
SDValue RHS = GetPromotedInteger(N->getOperand(1)); SDValue RHS = GetPromotedInteger(N->getOperand(1));
return DAG.getNode(N->getOpcode(), SDLoc(N), if (!IsVP)
LHS.getValueType(), LHS, RHS); return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS);
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS,
N->getOperand(2), N->getOperand(3));
} }
SDValue DAGTypeLegalizer::PromoteIntRes_SExtIntBinOp(SDNode *N) { SDValue DAGTypeLegalizer::PromoteIntRes_SExtIntBinOp(SDNode *N, bool IsVP) {
// Sign extend the input. // Sign extend the input.
SDValue LHS = SExtPromotedInteger(N->getOperand(0)); SDValue LHS = SExtPromotedInteger(N->getOperand(0));
SDValue RHS = SExtPromotedInteger(N->getOperand(1)); SDValue RHS = SExtPromotedInteger(N->getOperand(1));
return DAG.getNode(N->getOpcode(), SDLoc(N), if (!IsVP)
LHS.getValueType(), LHS, RHS); return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS);
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS,
N->getOperand(2), N->getOperand(3));
} }
SDValue DAGTypeLegalizer::PromoteIntRes_ZExtIntBinOp(SDNode *N) { SDValue DAGTypeLegalizer::PromoteIntRes_ZExtIntBinOp(SDNode *N, bool IsVP) {
// Zero extend the input. // Zero extend the input.
SDValue LHS = ZExtPromotedInteger(N->getOperand(0)); SDValue LHS = ZExtPromotedInteger(N->getOperand(0));
SDValue RHS = ZExtPromotedInteger(N->getOperand(1)); SDValue RHS = ZExtPromotedInteger(N->getOperand(1));
return DAG.getNode(N->getOpcode(), SDLoc(N), if (!IsVP)
LHS.getValueType(), LHS, RHS); return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS);
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS,
N->getOperand(2), N->getOperand(3));
} }
SDValue DAGTypeLegalizer::PromoteIntRes_UMINUMAX(SDNode *N) { SDValue DAGTypeLegalizer::PromoteIntRes_UMINUMAX(SDNode *N) {
@ -1152,22 +1201,28 @@ SDValue DAGTypeLegalizer::PromoteIntRes_UMINUMAX(SDNode *N) {
LHS.getValueType(), LHS, RHS); LHS.getValueType(), LHS, RHS);
} }
SDValue DAGTypeLegalizer::PromoteIntRes_SRA(SDNode *N) { SDValue DAGTypeLegalizer::PromoteIntRes_SRA(SDNode *N, bool IsVP) {
// The input value must be properly sign extended. // The input value must be properly sign extended.
SDValue LHS = SExtPromotedInteger(N->getOperand(0)); SDValue LHS = SExtPromotedInteger(N->getOperand(0));
SDValue RHS = N->getOperand(1); SDValue RHS = N->getOperand(1);
if (getTypeAction(RHS.getValueType()) == TargetLowering::TypePromoteInteger) if (getTypeAction(RHS.getValueType()) == TargetLowering::TypePromoteInteger)
RHS = ZExtPromotedInteger(RHS); RHS = ZExtPromotedInteger(RHS);
return DAG.getNode(ISD::SRA, SDLoc(N), LHS.getValueType(), LHS, RHS); if (!IsVP)
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS);
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS,
N->getOperand(2), N->getOperand(3));
} }
SDValue DAGTypeLegalizer::PromoteIntRes_SRL(SDNode *N) { SDValue DAGTypeLegalizer::PromoteIntRes_SRL(SDNode *N, bool IsVP) {
// The input value must be properly zero extended. // The input value must be properly zero extended.
SDValue LHS = ZExtPromotedInteger(N->getOperand(0)); SDValue LHS = ZExtPromotedInteger(N->getOperand(0));
SDValue RHS = N->getOperand(1); SDValue RHS = N->getOperand(1);
if (getTypeAction(RHS.getValueType()) == TargetLowering::TypePromoteInteger) if (getTypeAction(RHS.getValueType()) == TargetLowering::TypePromoteInteger)
RHS = ZExtPromotedInteger(RHS); RHS = ZExtPromotedInteger(RHS);
return DAG.getNode(ISD::SRL, SDLoc(N), LHS.getValueType(), LHS, RHS); if (!IsVP)
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS);
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS,
N->getOperand(2), N->getOperand(3));
} }
SDValue DAGTypeLegalizer::PromoteIntRes_Rotate(SDNode *N) { SDValue DAGTypeLegalizer::PromoteIntRes_Rotate(SDNode *N) {

View File

@ -332,14 +332,14 @@ private:
SDValue PromoteIntRes_VSELECT(SDNode *N); SDValue PromoteIntRes_VSELECT(SDNode *N);
SDValue PromoteIntRes_SELECT_CC(SDNode *N); SDValue PromoteIntRes_SELECT_CC(SDNode *N);
SDValue PromoteIntRes_SETCC(SDNode *N); SDValue PromoteIntRes_SETCC(SDNode *N);
SDValue PromoteIntRes_SHL(SDNode *N); SDValue PromoteIntRes_SHL(SDNode *N, bool IsVP);
SDValue PromoteIntRes_SimpleIntBinOp(SDNode *N); SDValue PromoteIntRes_SimpleIntBinOp(SDNode *N, bool IsVP);
SDValue PromoteIntRes_ZExtIntBinOp(SDNode *N); SDValue PromoteIntRes_ZExtIntBinOp(SDNode *N, bool IsVP);
SDValue PromoteIntRes_SExtIntBinOp(SDNode *N); SDValue PromoteIntRes_SExtIntBinOp(SDNode *N, bool IsVP);
SDValue PromoteIntRes_UMINUMAX(SDNode *N); SDValue PromoteIntRes_UMINUMAX(SDNode *N);
SDValue PromoteIntRes_SIGN_EXTEND_INREG(SDNode *N); SDValue PromoteIntRes_SIGN_EXTEND_INREG(SDNode *N);
SDValue PromoteIntRes_SRA(SDNode *N); SDValue PromoteIntRes_SRA(SDNode *N, bool IsVP);
SDValue PromoteIntRes_SRL(SDNode *N); SDValue PromoteIntRes_SRL(SDNode *N, bool IsVP);
SDValue PromoteIntRes_TRUNCATE(SDNode *N); SDValue PromoteIntRes_TRUNCATE(SDNode *N);
SDValue PromoteIntRes_UADDSUBO(SDNode *N, unsigned ResNo); SDValue PromoteIntRes_UADDSUBO(SDNode *N, unsigned ResNo);
SDValue PromoteIntRes_ADDSUBCARRY(SDNode *N, unsigned ResNo); SDValue PromoteIntRes_ADDSUBCARRY(SDNode *N, unsigned ResNo);

View File

@ -4,6 +4,18 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <8 x i7> @llvm.vp.add.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
define <8 x i7> @vadd_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.add.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
ret <8 x i7> %v
}
declare <2 x i8> @llvm.vp.add.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) declare <2 x i8> @llvm.vp.add.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
define <2 x i8> @vadd_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { define <2 x i8> @vadd_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
@ -395,17 +407,17 @@ define <256 x i8> @vadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %ev
; CHECK-NEXT: addi a0, a1, -128 ; CHECK-NEXT: addi a0, a1, -128
; CHECK-NEXT: vmv1r.v v26, v0 ; CHECK-NEXT: vmv1r.v v26, v0
; CHECK-NEXT: mv a3, zero ; CHECK-NEXT: mv a3, zero
; CHECK-NEXT: bltu a1, a0, .LBB30_2 ; CHECK-NEXT: bltu a1, a0, .LBB31_2
; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a0 ; CHECK-NEXT: mv a3, a0
; CHECK-NEXT: .LBB30_2: ; CHECK-NEXT: .LBB31_2:
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t ; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t
; CHECK-NEXT: bltu a1, a2, .LBB30_4 ; CHECK-NEXT: bltu a1, a2, .LBB31_4
; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: addi a1, zero, 128 ; CHECK-NEXT: addi a1, zero, 128
; CHECK-NEXT: .LBB30_4: ; CHECK-NEXT: .LBB31_4:
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
@ -421,17 +433,17 @@ define <256 x i8> @vadd_vi_v258i8_unmasked(<256 x i8> %va, i32 zeroext %evl) {
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: addi a1, a0, -128 ; CHECK-NEXT: addi a1, a0, -128
; CHECK-NEXT: mv a2, zero ; CHECK-NEXT: mv a2, zero
; CHECK-NEXT: bltu a0, a1, .LBB31_2 ; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB31_2: ; CHECK-NEXT: .LBB32_2:
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu
; CHECK-NEXT: addi a1, zero, 128 ; CHECK-NEXT: addi a1, zero, 128
; CHECK-NEXT: vadd.vi v16, v16, -1 ; CHECK-NEXT: vadd.vi v16, v16, -1
; CHECK-NEXT: bltu a0, a1, .LBB31_4 ; CHECK-NEXT: bltu a0, a1, .LBB32_4
; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: addi a0, zero, 128 ; CHECK-NEXT: addi a0, zero, 128
; CHECK-NEXT: .LBB31_4: ; CHECK-NEXT: .LBB32_4:
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret ; CHECK-NEXT: ret
@ -1520,17 +1532,17 @@ define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
; RV32-NEXT: addi a2, a0, -16 ; RV32-NEXT: addi a2, a0, -16
; RV32-NEXT: vmv.v.i v24, -1 ; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: bltu a0, a2, .LBB106_2 ; RV32-NEXT: bltu a0, a2, .LBB107_2
; RV32-NEXT: # %bb.1: ; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2 ; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB106_2: ; RV32-NEXT: .LBB107_2:
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; RV32-NEXT: addi a1, zero, 16 ; RV32-NEXT: addi a1, zero, 16
; RV32-NEXT: vadd.vv v16, v16, v24, v0.t ; RV32-NEXT: vadd.vv v16, v16, v24, v0.t
; RV32-NEXT: bltu a0, a1, .LBB106_4 ; RV32-NEXT: bltu a0, a1, .LBB107_4
; RV32-NEXT: # %bb.3: ; RV32-NEXT: # %bb.3:
; RV32-NEXT: addi a0, zero, 16 ; RV32-NEXT: addi a0, zero, 16
; RV32-NEXT: .LBB106_4: ; RV32-NEXT: .LBB107_4:
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vadd.vv v8, v8, v24, v0.t ; RV32-NEXT: vadd.vv v8, v8, v24, v0.t
@ -1543,17 +1555,17 @@ define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV64-NEXT: addi a2, a0, -16 ; RV64-NEXT: addi a2, a0, -16
; RV64-NEXT: vmv1r.v v25, v0 ; RV64-NEXT: vmv1r.v v25, v0
; RV64-NEXT: vslidedown.vi v0, v0, 2 ; RV64-NEXT: vslidedown.vi v0, v0, 2
; RV64-NEXT: bltu a0, a2, .LBB106_2 ; RV64-NEXT: bltu a0, a2, .LBB107_2
; RV64-NEXT: # %bb.1: ; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a1, a2 ; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB106_2: ; RV64-NEXT: .LBB107_2:
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; RV64-NEXT: addi a1, zero, 16 ; RV64-NEXT: addi a1, zero, 16
; RV64-NEXT: vadd.vi v16, v16, -1, v0.t ; RV64-NEXT: vadd.vi v16, v16, -1, v0.t
; RV64-NEXT: bltu a0, a1, .LBB106_4 ; RV64-NEXT: bltu a0, a1, .LBB107_4
; RV64-NEXT: # %bb.3: ; RV64-NEXT: # %bb.3:
; RV64-NEXT: addi a0, zero, 16 ; RV64-NEXT: addi a0, zero, 16
; RV64-NEXT: .LBB106_4: ; RV64-NEXT: .LBB107_4:
; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; RV64-NEXT: vmv1r.v v0, v25 ; RV64-NEXT: vmv1r.v v0, v25
; RV64-NEXT: vadd.vi v8, v8, -1, v0.t ; RV64-NEXT: vadd.vi v8, v8, -1, v0.t
@ -1572,17 +1584,17 @@ define <32 x i64> @vadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
; RV32-NEXT: addi a2, a0, -16 ; RV32-NEXT: addi a2, a0, -16
; RV32-NEXT: vmv.v.i v24, -1 ; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: bltu a0, a2, .LBB107_2 ; RV32-NEXT: bltu a0, a2, .LBB108_2
; RV32-NEXT: # %bb.1: ; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2 ; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB107_2: ; RV32-NEXT: .LBB108_2:
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; RV32-NEXT: addi a1, zero, 16 ; RV32-NEXT: addi a1, zero, 16
; RV32-NEXT: vadd.vv v16, v16, v24 ; RV32-NEXT: vadd.vv v16, v16, v24
; RV32-NEXT: bltu a0, a1, .LBB107_4 ; RV32-NEXT: bltu a0, a1, .LBB108_4
; RV32-NEXT: # %bb.3: ; RV32-NEXT: # %bb.3:
; RV32-NEXT: addi a0, zero, 16 ; RV32-NEXT: addi a0, zero, 16
; RV32-NEXT: .LBB107_4: ; RV32-NEXT: .LBB108_4:
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; RV32-NEXT: vadd.vv v8, v8, v24 ; RV32-NEXT: vadd.vv v8, v8, v24
; RV32-NEXT: ret ; RV32-NEXT: ret
@ -1591,17 +1603,17 @@ define <32 x i64> @vadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV64: # %bb.0: ; RV64: # %bb.0:
; RV64-NEXT: addi a1, a0, -16 ; RV64-NEXT: addi a1, a0, -16
; RV64-NEXT: mv a2, zero ; RV64-NEXT: mv a2, zero
; RV64-NEXT: bltu a0, a1, .LBB107_2 ; RV64-NEXT: bltu a0, a1, .LBB108_2
; RV64-NEXT: # %bb.1: ; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a2, a1 ; RV64-NEXT: mv a2, a1
; RV64-NEXT: .LBB107_2: ; RV64-NEXT: .LBB108_2:
; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV64-NEXT: addi a1, zero, 16 ; RV64-NEXT: addi a1, zero, 16
; RV64-NEXT: vadd.vi v16, v16, -1 ; RV64-NEXT: vadd.vi v16, v16, -1
; RV64-NEXT: bltu a0, a1, .LBB107_4 ; RV64-NEXT: bltu a0, a1, .LBB108_4
; RV64-NEXT: # %bb.3: ; RV64-NEXT: # %bb.3:
; RV64-NEXT: addi a0, zero, 16 ; RV64-NEXT: addi a0, zero, 16
; RV64-NEXT: .LBB107_4: ; RV64-NEXT: .LBB108_4:
; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; RV64-NEXT: vadd.vi v8, v8, -1 ; RV64-NEXT: vadd.vi v8, v8, -1
; RV64-NEXT: ret ; RV64-NEXT: ret

View File

@ -4,6 +4,18 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <8 x i7> @llvm.vp.and.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
define <8 x i7> @vand_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.and.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
ret <8 x i7> %v
}
declare <2 x i8> @llvm.vp.and.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) declare <2 x i8> @llvm.vp.and.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
define <2 x i8> @vand_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { define <2 x i8> @vand_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,23 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <8 x i7> @llvm.vp.sdiv.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
define <8 x i7> @vdiv_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_v8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vadd.vv v25, v9, v9
; CHECK-NEXT: vsra.vi v25, v25, 1
; CHECK-NEXT: vadd.vv v26, v8, v8
; CHECK-NEXT: vsra.vi v26, v26, 1
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vdiv.vv v8, v26, v25, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.sdiv.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
ret <8 x i7> %v
}
declare <2 x i8> @llvm.vp.sdiv.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) declare <2 x i8> @llvm.vp.sdiv.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
define <2 x i8> @vdiv_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { define <2 x i8> @vdiv_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,22 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <8 x i7> @llvm.vp.udiv.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
define <8 x i7> @vdivu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_v8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a1, zero, 127
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vand.vx v25, v9, a1
; CHECK-NEXT: vand.vx v26, v8, a1
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vdivu.vv v8, v26, v25, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.udiv.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
ret <8 x i7> %v
}
declare <2 x i8> @llvm.vp.udiv.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) declare <2 x i8> @llvm.vp.udiv.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
define <2 x i8> @vdivu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { define <2 x i8> @vdivu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,18 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <8 x i7> @llvm.vp.mul.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
define <8 x i7> @vmul_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.mul.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
ret <8 x i7> %v
}
declare <2 x i8> @llvm.vp.mul.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) declare <2 x i8> @llvm.vp.mul.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
define <2 x i8> @vmul_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { define <2 x i8> @vmul_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,18 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <8 x i7> @llvm.vp.or.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
define <8 x i7> @vor_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.or.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
ret <8 x i7> %v
}
declare <2 x i8> @llvm.vp.or.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) declare <2 x i8> @llvm.vp.or.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
define <2 x i8> @vor_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { define <2 x i8> @vor_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,23 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <8 x i7> @llvm.vp.srem.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
define <8 x i7> @vrem_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_v8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vadd.vv v25, v9, v9
; CHECK-NEXT: vsra.vi v25, v25, 1
; CHECK-NEXT: vadd.vv v26, v8, v8
; CHECK-NEXT: vsra.vi v26, v26, 1
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vrem.vv v8, v26, v25, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.srem.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
ret <8 x i7> %v
}
declare <2 x i8> @llvm.vp.srem.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) declare <2 x i8> @llvm.vp.srem.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
define <2 x i8> @vrem_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { define <2 x i8> @vrem_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,22 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <8 x i7> @llvm.vp.urem.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
define <8 x i7> @vremu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_v8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a1, zero, 127
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vand.vx v25, v9, a1
; CHECK-NEXT: vand.vx v26, v8, a1
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vremu.vv v8, v26, v25, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.urem.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
ret <8 x i7> %v
}
declare <2 x i8> @llvm.vp.urem.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) declare <2 x i8> @llvm.vp.urem.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
define <2 x i8> @vremu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { define <2 x i8> @vremu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,21 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <8 x i7> @llvm.vp.shl.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
define <8 x i7> @vsll_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_v8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a1, zero, 127
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vand.vx v25, v9, a1
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vsll.vv v8, v8, v25, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.shl.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
ret <8 x i7> %v
}
declare <2 x i8> @llvm.vp.shl.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) declare <2 x i8> @llvm.vp.shl.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
define <2 x i8> @vsll_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { define <2 x i8> @vsll_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,23 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <8 x i7> @llvm.vp.ashr.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
define <8 x i7> @vsra_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_v8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a1, zero, 127
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vand.vx v25, v9, a1
; CHECK-NEXT: vadd.vv v26, v8, v8
; CHECK-NEXT: vsra.vi v26, v26, 1
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vsra.vv v8, v26, v25, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.ashr.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
ret <8 x i7> %v
}
declare <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) declare <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
define <2 x i8> @vsra_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { define <2 x i8> @vsra_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,22 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <8 x i7> @llvm.vp.lshr.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
define <8 x i7> @vsrl_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_v8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a1, zero, 127
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vand.vx v25, v9, a1
; CHECK-NEXT: vand.vx v26, v8, a1
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vsrl.vv v8, v26, v25, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.lshr.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
ret <8 x i7> %v
}
declare <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) declare <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
define <2 x i8> @vsrl_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { define <2 x i8> @vsrl_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,18 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <8 x i7> @llvm.vp.sub.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
define <8 x i7> @vsub_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.sub.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
ret <8 x i7> %v
}
declare <2 x i8> @llvm.vp.sub.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) declare <2 x i8> @llvm.vp.sub.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
define <2 x i8> @vsub_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { define <2 x i8> @vsub_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,18 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <8 x i7> @llvm.vp.xor.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
define <8 x i7> @vxor_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.xor.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
ret <8 x i7> %v
}
declare <2 x i8> @llvm.vp.xor.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) declare <2 x i8> @llvm.vp.xor.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
define <2 x i8> @vxor_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { define <2 x i8> @vxor_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,20 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 8 x i7> @llvm.vp.add.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
define <vscale x 8 x i7> @vadd_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> undef, i7 %b, i32 0
%vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i7> @llvm.vp.add.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
ret <vscale x 8 x i7> %v
}
declare <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32) declare <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
define <vscale x 1 x i8> @vadd_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { define <vscale x 1 x i8> @vadd_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
@ -622,20 +636,20 @@ define <vscale x 128 x i8> @vadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x
; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: mv a3, a1
; CHECK-NEXT: bltu a1, a2, .LBB48_2 ; CHECK-NEXT: bltu a1, a2, .LBB49_2
; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a2 ; CHECK-NEXT: mv a3, a2
; CHECK-NEXT: .LBB48_2: ; CHECK-NEXT: .LBB49_2:
; CHECK-NEXT: mv a4, zero ; CHECK-NEXT: mv a4, zero
; CHECK-NEXT: vsetvli a5, zero, e8, m8, ta, mu ; CHECK-NEXT: vsetvli a5, zero, e8, m8, ta, mu
; CHECK-NEXT: vle1.v v25, (a0) ; CHECK-NEXT: vle1.v v25, (a0)
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu
; CHECK-NEXT: sub a0, a1, a2 ; CHECK-NEXT: sub a0, a1, a2
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: bltu a1, a0, .LBB48_4 ; CHECK-NEXT: bltu a1, a0, .LBB49_4
; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a4, a0 ; CHECK-NEXT: mv a4, a0
; CHECK-NEXT: .LBB48_4: ; CHECK-NEXT: .LBB49_4:
; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t ; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t
@ -652,18 +666,18 @@ define <vscale x 128 x i8> @vadd_vi_nxv128i8_unmasked(<vscale x 128 x i8> %va, i
; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: mv a2, a0 ; CHECK-NEXT: mv a2, a0
; CHECK-NEXT: bltu a0, a1, .LBB49_2 ; CHECK-NEXT: bltu a0, a1, .LBB50_2
; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB49_2: ; CHECK-NEXT: .LBB50_2:
; CHECK-NEXT: mv a3, zero ; CHECK-NEXT: mv a3, zero
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu
; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: sub a1, a0, a1
; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: bltu a0, a1, .LBB49_4 ; CHECK-NEXT: bltu a0, a1, .LBB50_4
; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: mv a3, a1
; CHECK-NEXT: .LBB49_4: ; CHECK-NEXT: .LBB50_4:
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu
; CHECK-NEXT: vadd.vi v16, v16, -1 ; CHECK-NEXT: vadd.vi v16, v16, -1
; CHECK-NEXT: ret ; CHECK-NEXT: ret
@ -1526,16 +1540,16 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x
; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: sub a3, a0, a1
; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vslidedown.vx v0, v0, a4 ; CHECK-NEXT: vslidedown.vx v0, v0, a4
; CHECK-NEXT: bltu a0, a3, .LBB116_2 ; CHECK-NEXT: bltu a0, a3, .LBB117_2
; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB116_2: ; CHECK-NEXT: .LBB117_2:
; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu
; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t ; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB116_4 ; CHECK-NEXT: bltu a0, a1, .LBB117_4
; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB116_4: ; CHECK-NEXT: .LBB117_4:
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
@ -1561,16 +1575,16 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32_unmasked(<vscale x 32 x i32> %va, i
; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: sub a3, a0, a1
; CHECK-NEXT: vmv1r.v v26, v25 ; CHECK-NEXT: vmv1r.v v26, v25
; CHECK-NEXT: vslidedown.vx v0, v25, a4 ; CHECK-NEXT: vslidedown.vx v0, v25, a4
; CHECK-NEXT: bltu a0, a3, .LBB117_2 ; CHECK-NEXT: bltu a0, a3, .LBB118_2
; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB117_2: ; CHECK-NEXT: .LBB118_2:
; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu
; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t ; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB117_4 ; CHECK-NEXT: bltu a0, a1, .LBB118_4
; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB117_4: ; CHECK-NEXT: .LBB118_4:
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v26 ; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
@ -1601,16 +1615,16 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, <v
; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: sub a3, a0, a1
; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vslidedown.vx v0, v0, a4 ; CHECK-NEXT: vslidedown.vx v0, v0, a4
; CHECK-NEXT: bltu a0, a3, .LBB118_2 ; CHECK-NEXT: bltu a0, a3, .LBB119_2
; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB118_2: ; CHECK-NEXT: .LBB119_2:
; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu
; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t ; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB118_4 ; CHECK-NEXT: bltu a0, a1, .LBB119_4
; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_4: ; CHECK-NEXT: .LBB119_4:
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t

View File

@ -4,6 +4,20 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 8 x i7> @llvm.vp.and.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
define <vscale x 8 x i7> @vand_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> undef, i7 %b, i32 0
%vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i7> @llvm.vp.and.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
ret <vscale x 8 x i7> %v
}
declare <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32) declare <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
define <vscale x 1 x i8> @vand_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { define <vscale x 1 x i8> @vand_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,26 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 8 x i7> @llvm.vp.sdiv.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
define <vscale x 8 x i7> @vdiv_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu
; CHECK-NEXT: vadd.vv v25, v8, v8
; CHECK-NEXT: vsra.vi v25, v25, 1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vadd.vv v26, v26, v26
; CHECK-NEXT: vsra.vi v26, v26, 1
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vdiv.vv v8, v25, v26, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> undef, i7 %b, i32 0
%vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i7> @llvm.vp.sdiv.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
ret <vscale x 8 x i7> %v
}
declare <vscale x 1 x i8> @llvm.vp.sdiv.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32) declare <vscale x 1 x i8> @llvm.vp.sdiv.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
define <vscale x 1 x i8> @vdiv_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { define <vscale x 1 x i8> @vdiv_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,25 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 8 x i7> @llvm.vp.udiv.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
define <vscale x 8 x i7> @vdivu_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a2, zero, 127
; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, mu
; CHECK-NEXT: vand.vx v25, v8, a2
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vand.vx v26, v26, a2
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vdivu.vv v8, v25, v26, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> undef, i7 %b, i32 0
%vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i7> @llvm.vp.udiv.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
ret <vscale x 8 x i7> %v
}
declare <vscale x 1 x i8> @llvm.vp.udiv.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32) declare <vscale x 1 x i8> @llvm.vp.udiv.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
define <vscale x 1 x i8> @vdivu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { define <vscale x 1 x i8> @vdivu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,20 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 8 x i7> @llvm.vp.mul.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
define <vscale x 8 x i7> @vmul_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> undef, i7 %b, i32 0
%vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i7> @llvm.vp.mul.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
ret <vscale x 8 x i7> %v
}
declare <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32) declare <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
define <vscale x 1 x i8> @vmul_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { define <vscale x 1 x i8> @vmul_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,20 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 8 x i7> @llvm.vp.or.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
define <vscale x 8 x i7> @vor_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> undef, i7 %b, i32 0
%vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i7> @llvm.vp.or.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
ret <vscale x 8 x i7> %v
}
declare <vscale x 1 x i8> @llvm.vp.or.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32) declare <vscale x 1 x i8> @llvm.vp.or.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
define <vscale x 1 x i8> @vor_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { define <vscale x 1 x i8> @vor_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,26 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 8 x i7> @llvm.vp.srem.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
define <vscale x 8 x i7> @vrem_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu
; CHECK-NEXT: vadd.vv v25, v8, v8
; CHECK-NEXT: vsra.vi v25, v25, 1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vadd.vv v26, v26, v26
; CHECK-NEXT: vsra.vi v26, v26, 1
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vrem.vv v8, v25, v26, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> undef, i7 %b, i32 0
%vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i7> @llvm.vp.srem.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
ret <vscale x 8 x i7> %v
}
declare <vscale x 1 x i8> @llvm.vp.srem.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32) declare <vscale x 1 x i8> @llvm.vp.srem.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
define <vscale x 1 x i8> @vrem_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { define <vscale x 1 x i8> @vrem_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,25 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 8 x i7> @llvm.vp.urem.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
define <vscale x 8 x i7> @vremu_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a2, zero, 127
; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, mu
; CHECK-NEXT: vand.vx v25, v8, a2
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vand.vx v26, v26, a2
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vremu.vv v8, v25, v26, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> undef, i7 %b, i32 0
%vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i7> @llvm.vp.urem.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
ret <vscale x 8 x i7> %v
}
declare <vscale x 1 x i8> @llvm.vp.urem.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32) declare <vscale x 1 x i8> @llvm.vp.urem.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
define <vscale x 1 x i8> @vremu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { define <vscale x 1 x i8> @vremu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,24 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 8 x i7> @llvm.vp.shl.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
define <vscale x 8 x i7> @vsll_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu
; CHECK-NEXT: vmv.v.x v25, a0
; CHECK-NEXT: addi a0, zero, 127
; CHECK-NEXT: vand.vx v25, v25, a0
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsll.vv v8, v8, v25, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> undef, i7 %b, i32 0
%vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i7> @llvm.vp.shl.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
ret <vscale x 8 x i7> %v
}
declare <vscale x 1 x i8> @llvm.vp.shl.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32) declare <vscale x 1 x i8> @llvm.vp.shl.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
define <vscale x 1 x i8> @vsll_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { define <vscale x 1 x i8> @vsll_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,26 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 8 x i7> @llvm.vp.ashr.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
define <vscale x 8 x i7> @vsra_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu
; CHECK-NEXT: vadd.vv v25, v8, v8
; CHECK-NEXT: vsra.vi v25, v25, 1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: addi a0, zero, 127
; CHECK-NEXT: vand.vx v26, v26, a0
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsra.vv v8, v25, v26, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> undef, i7 %b, i32 0
%vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i7> @llvm.vp.ashr.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
ret <vscale x 8 x i7> %v
}
declare <vscale x 1 x i8> @llvm.vp.ashr.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32) declare <vscale x 1 x i8> @llvm.vp.ashr.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
define <vscale x 1 x i8> @vsra_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { define <vscale x 1 x i8> @vsra_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,25 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 8 x i7> @llvm.vp.lshr.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
define <vscale x 8 x i7> @vsrl_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a2, zero, 127
; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, mu
; CHECK-NEXT: vand.vx v25, v8, a2
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vand.vx v26, v26, a2
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsrl.vv v8, v25, v26, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> undef, i7 %b, i32 0
%vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i7> @llvm.vp.lshr.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
ret <vscale x 8 x i7> %v
}
declare <vscale x 1 x i8> @llvm.vp.lshr.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32) declare <vscale x 1 x i8> @llvm.vp.lshr.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
define <vscale x 1 x i8> @vsrl_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { define <vscale x 1 x i8> @vsrl_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,20 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 8 x i7> @llvm.vp.sub.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
define <vscale x 8 x i7> @vsub_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> undef, i7 %b, i32 0
%vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i7> @llvm.vp.sub.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
ret <vscale x 8 x i7> %v
}
declare <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32) declare <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
define <vscale x 1 x i8> @vsub_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { define <vscale x 1 x i8> @vsub_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {

View File

@ -4,6 +4,20 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 8 x i7> @llvm.vp.xor.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
define <vscale x 8 x i7> @vxor_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv8i7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> undef, i7 %b, i32 0
%vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i7> @llvm.vp.xor.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
ret <vscale x 8 x i7> %v
}
declare <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32) declare <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
define <vscale x 1 x i8> @vxor_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { define <vscale x 1 x i8> @vxor_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {