[RISCV] Support mask policy for RVV IR intrinsics.

Add the UsesMaskPolicy flag to indicate the operations result
would be effected by the mask policy. (ex. mask operations).

It means RISCVInsertVSETVLI should decide the mask policy according
by mask policy operand or passthru operand.
If UsesMaskPolicy is false (ex. unmasked, store, and reduction operations),
the mask policy could be either mask undisturbed or agnostic.
Currently, RISCVInsertVSETVLI sets UsesMaskPolicy operations default to
MA, otherwise to MU to keep the current mask policy would not be changed
for unmasked operations.

Add masked-tama, masked-tamu, masked-tuma and masked-tumu test cases.
I didn't add all operations because most of implementations are using
the same pseudo multiclass. Some tests maybe be duplicated in different
tests. (ex. masked vmacc with tumu shows in vmacc-rv32.ll and masked-tumu)
I think having different tests only for policy would make the testing
clear.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D120226
This commit is contained in:
Zakk Chen 2022-02-18 22:19:15 -08:00
parent 0ff19b1905
commit abb5a985e9
10 changed files with 4955 additions and 107 deletions

View File

@ -219,7 +219,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For unit stride load with mask
// Input: (maskedoff, pointer, mask, vl, ta)
// Input: (maskedoff, pointer, mask, vl, policy)
class RISCVUSLoadMask
: Intrinsic<[llvm_anyvector_ty ],
[LLVMMatchType<0>,
@ -231,7 +231,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For unit stride fault-only-first load with mask
// Input: (maskedoff, pointer, mask, vl, ta)
// Input: (maskedoff, pointer, mask, vl, policy)
// Output: (data, vl)
// NOTE: We model this with default memory properties since we model writing
// VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
@ -255,7 +255,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For strided load with mask
// Input: (maskedoff, pointer, stride, mask, vl, ta)
// Input: (maskedoff, pointer, stride, mask, vl, policy)
class RISCVSLoadMask
: Intrinsic<[llvm_anyvector_ty ],
[LLVMMatchType<0>,
@ -277,7 +277,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For indexed load with mask
// Input: (maskedoff, pointer, index, mask, vl, ta)
// Input: (maskedoff, pointer, index, mask, vl, policy)
class RISCVILoadMask
: Intrinsic<[llvm_anyvector_ty ],
[LLVMMatchType<0>,
@ -358,7 +358,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For destination vector type is the same as first source vector (with mask).
// Input: (vector_in, mask, vl, ta)
// Input: (vector_in, vector_in, mask, vl, policy)
class RISCVUnaryAAMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>,
@ -367,7 +367,8 @@ let TargetPrefix = "riscv" in {
[ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 3;
}
class RISCVUnaryAAMaskTU
// Input: (passthru, vector_in, vector_in, mask, vl)
class RISCVCompress
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
@ -392,7 +393,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For destination vector type is the same as first and second source vector.
// Input: (vector_in, vector_in, int_vector_in, vl, ta)
// Input: (vector_in, vector_in, int_vector_in, vl, policy)
class RISCVRGatherVVMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
@ -411,7 +412,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For destination vector type is the same as first and second source vector.
// Input: (vector_in, vector_in, int16_vector_in, vl, ta)
// Input: (vector_in, vector_in, int16_vector_in, vl, policy)
class RISCVRGatherEI16VVMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>,
@ -433,7 +434,7 @@ let TargetPrefix = "riscv" in {
}
// For destination vector type is the same as first source vector (with mask).
// Second operand is XLen.
// Input: (maskedoff, vector_in, xlen_in, mask, vl, ta)
// Input: (maskedoff, vector_in, xlen_in, mask, vl, policy)
class RISCVGatherVXMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
@ -453,7 +454,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For destination vector type is the same as first source vector (with mask).
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVBinaryAAXMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
@ -475,7 +476,7 @@ let TargetPrefix = "riscv" in {
}
// For destination vector type is the same as first source vector (with mask).
// The second source operand must match the destination type or be an XLen scalar.
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVBinaryAAShiftMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
@ -495,7 +496,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For destination vector type is NOT the same as first source vector (with mask).
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVBinaryABXMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
@ -517,7 +518,7 @@ let TargetPrefix = "riscv" in {
}
// For destination vector type is NOT the same as first source vector (with mask).
// The second source operand must match the destination type or be an XLen scalar.
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVBinaryABShiftMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
@ -615,7 +616,7 @@ let TargetPrefix = "riscv" in {
}
// For Saturating binary operations with mask.
// The destination vector type is the same as first source vector.
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVSaturatingBinaryAAXMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
@ -639,7 +640,7 @@ let TargetPrefix = "riscv" in {
// For Saturating binary operations with mask.
// The destination vector type is the same as first source vector.
// The second source operand matches the destination type or is an XLen scalar.
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVSaturatingBinaryAAShiftMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
@ -662,7 +663,7 @@ let TargetPrefix = "riscv" in {
// For Saturating binary operations with mask.
// The destination vector type is NOT the same as first source vector (with mask).
// The second source operand matches the destination type or is an XLen scalar.
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVSaturatingBinaryABShiftMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
@ -671,6 +672,7 @@ let TargetPrefix = "riscv" in {
[ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
let VLOperand = 4;
}
// Input: (vector_in, vector_in, vector_in/scalar_in, vl)
class RISCVTernaryAAAXNoMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
@ -678,6 +680,7 @@ let TargetPrefix = "riscv" in {
[IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 3;
}
// Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVTernaryAAAXMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
@ -686,6 +689,7 @@ let TargetPrefix = "riscv" in {
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 4;
}
// NoMask Vector Multiply-Add operations, its first operand can not be undef.
// Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
class RISCVTernaryAAXANoMask
: Intrinsic<[llvm_anyvector_ty],
@ -695,6 +699,7 @@ let TargetPrefix = "riscv" in {
let ScalarOperand = 1;
let VLOperand = 3;
}
// Mask Vector Multiply-Add operations, its first operand can not be undef.
// Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
class RISCVTernaryAAXAMask
: Intrinsic<[llvm_anyvector_ty],
@ -705,6 +710,7 @@ let TargetPrefix = "riscv" in {
let ScalarOperand = 1;
let VLOperand = 4;
}
// NoMask Widening Vector Multiply-Add operations, its first operand can not be undef.
// Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
class RISCVTernaryWideNoMask
: Intrinsic< [llvm_anyvector_ty],
@ -714,6 +720,7 @@ let TargetPrefix = "riscv" in {
let ScalarOperand = 1;
let VLOperand = 3;
}
// Mask Widening Vector Multiply-Add operations, its first operand can not be undef.
// Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
class RISCVTernaryWideMask
: Intrinsic< [llvm_anyvector_ty],
@ -772,7 +779,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For destination vector type is NOT the same as source vector (with mask).
// Input: (maskedoff, vector_in, mask, vl, ta)
// Input: (maskedoff, vector_in, mask, vl, policy)
class RISCVUnaryABMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty,
@ -824,7 +831,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For Conversion unary operations with mask.
// Input: (maskedoff, vector_in, mask, vl, ta)
// Input: (maskedoff, vector_in, mask, vl, policy)
class RISCVConversionMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty,
@ -844,7 +851,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 1;
}
// For unit stride segment load with mask
// Input: (maskedoff, pointer, mask, vl, ta)
// Input: (maskedoff, pointer, mask, vl, policy)
class RISCVUSSegLoadMask<int nf>
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
!add(nf, -1))),
@ -870,7 +877,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 1;
}
// For unit stride fault-only-first segment load with mask
// Input: (maskedoff, pointer, mask, vl, ta)
// Input: (maskedoff, pointer, mask, vl, policy)
// Output: (data, vl)
// NOTE: We model this with default memory properties since we model writing
// VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
@ -896,7 +903,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For stride segment load with mask
// Input: (maskedoff, pointer, offset, mask, vl, ta)
// Input: (maskedoff, pointer, offset, mask, vl, policy)
class RISCVSSegLoadMask<int nf>
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
!add(nf, -1))),
@ -920,7 +927,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For indexed segment load with mask
// Input: (maskedoff, pointer, index, mask, vl, ta)
// Input: (maskedoff, pointer, index, mask, vl, policy)
class RISCVISegLoadMask<int nf>
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
!add(nf, -1))),
@ -1360,7 +1367,7 @@ let TargetPrefix = "riscv" in {
defm vrgather_vx : RISCVRGatherVX;
defm vrgatherei16_vv : RISCVRGatherEI16VV;
def "int_riscv_vcompress" : RISCVUnaryAAMaskTU;
def "int_riscv_vcompress" : RISCVCompress;
defm vaaddu : RISCVSaturatingBinaryAAX;
defm vaadd : RISCVSaturatingBinaryAAX;

View File

@ -88,6 +88,13 @@ enum {
// Pseudos.
IsRVVWideningReductionShift = HasVecPolicyOpShift + 1,
IsRVVWideningReductionMask = 1 << IsRVVWideningReductionShift,
// Does this instruction care about mask policy. If it is not, the mask policy
// could be either agnostic or undisturbed. For example, unmasked, store, and
// reduction operations result would not be affected by mask policy, so
// compiler has free to select either one.
UsesMaskPolicyShift = IsRVVWideningReductionShift + 1,
UsesMaskPolicyMask = 1 << UsesMaskPolicyShift,
};
// Match with the definitions in RISCVInstrFormatsV.td
@ -110,8 +117,8 @@ enum VLMUL : uint8_t {
};
enum {
TAIL_UNDISTURBED = 0,
TAIL_AGNOSTIC = 1,
MASK_AGNOSTIC = 2,
};
// Helper functions to read TSFlags.
@ -156,6 +163,10 @@ static inline bool hasVecPolicyOp(uint64_t TSFlags) {
static inline bool isRVVWideningReduction(uint64_t TSFlags) {
return TSFlags & IsRVVWideningReductionMask;
}
/// \returns true if mask policy is valid for the instruction.
static inline bool UsesMaskPolicy(uint64_t TSFlags) {
return TSFlags & UsesMaskPolicyMask;
}
// RISC-V Specific Machine Operand Flags
enum {

View File

@ -502,29 +502,46 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
unsigned NumOperands = MI.getNumExplicitOperands();
bool HasPolicy = RISCVII::hasVecPolicyOp(TSFlags);
// Default to tail agnostic unless the destination is tied to a source.
// Unless the source is undef. In that case the user would have some control
// over the tail values. Some pseudo instructions force a tail agnostic policy
// despite having a tied def.
// If the instruction has policy argument, use the argument.
// If there is no policy argument, default to tail agnostic unless the
// destination is tied to a source. Unless the source is undef. In that case
// the user would have some control over the policy values. Some pseudo
// instructions force a tail agnostic policy despite having a tied def.
bool ForceTailAgnostic = RISCVII::doesForceTailAgnostic(TSFlags);
bool TailAgnostic = true;
// If the instruction has policy argument, use the argument.
bool UsesMaskPolicy = RISCVII::UsesMaskPolicy(TSFlags);
// FIXME: Could we look at the above or below instructions to choose the
// matched mask policy to reduce vsetvli instructions? Default mask policy is
// agnostic if instructions use mask policy, otherwise is undisturbed. Because
// most mask operations are mask undisturbed, so we could possibly reduce the
// vsetvli between mask and nomasked instruction sequence.
bool MaskAgnostic = UsesMaskPolicy;
unsigned UseOpIdx;
if (HasPolicy) {
const MachineOperand &Op = MI.getOperand(MI.getNumExplicitOperands() - 1);
TailAgnostic = Op.getImm() & 0x1;
}
unsigned UseOpIdx;
if (!(ForceTailAgnostic || (HasPolicy && TailAgnostic)) &&
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
uint64_t Policy = Op.getImm();
assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) &&
"Invalid Policy Value");
// Although in some cases, mismatched passthru/maskedoff with policy value
// does not make sense (ex. tied operand is IMPLICIT_DEF with non-TAMA
// policy, or tied operand is not IMPLICIT_DEF with TAMA policy), but users
// have set the policy value explicitly, so compiler would not fix it.
TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC;
MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC;
} else if (!ForceTailAgnostic && MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
TailAgnostic = false;
if (UsesMaskPolicy)
MaskAgnostic = false;
// If the tied operand is an IMPLICIT_DEF we can keep TailAgnostic.
const MachineOperand &UseMO = MI.getOperand(UseOpIdx);
MachineInstr *UseMI = MRI->getVRegDef(UseMO.getReg());
if (UseMI) {
UseMI = elideCopies(UseMI, MRI);
if (UseMI && UseMI->isImplicitDef())
if (UseMI && UseMI->isImplicitDef()) {
TailAgnostic = true;
if (UsesMaskPolicy)
MaskAgnostic = true;
}
}
}
@ -559,8 +576,8 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
}
} else
InstrInfo.setAVLReg(RISCV::NoRegister);
InstrInfo.setVTYPE(VLMul, SEW, /*TailAgnostic*/ TailAgnostic,
/*MaskAgnostic*/ false, MaskRegOp, StoreOp, ScalarMovOp);
InstrInfo.setVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic, MaskRegOp, StoreOp,
ScalarMovOp);
return InstrInfo;
}

View File

@ -197,6 +197,9 @@ class RVInst<dag outs, dag ins, string opcodestr, string argstr,
bit IsRVVWideningReduction = 0;
let TSFlags{17} = IsRVVWideningReduction;
bit UsesMaskPolicy = 0;
let TSFlags{18} = UsesMaskPolicy;
}
// Pseudo instructions

View File

@ -685,6 +685,7 @@ class VPseudoUSLoadMask<VReg RetClass, int EEW, bit isFF> :
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@ -733,6 +734,7 @@ class VPseudoSLoadMask<VReg RetClass, int EEW>:
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@ -787,6 +789,7 @@ class VPseudoILoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@ -912,6 +915,7 @@ class VPseudoNullaryMask<VReg RegClass>:
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@ -973,6 +977,7 @@ class VPseudoUnaryMask<VReg RetClass, VReg OpClass, string Constraint = ""> :
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@ -989,6 +994,7 @@ class VPseudoUnaryMaskTA<VReg RetClass, VReg OpClass, string Constraint = ""> :
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@ -1164,6 +1170,7 @@ class VPseudoBinaryMaskPolicy<VReg RetClass,
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@ -1184,6 +1191,9 @@ class VPseudoBinaryMOutMask<VReg RetClass,
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
// FIXME: In current design, we would not change the mask policy, so
// UsesMaskPolicy is false. We could fix after add the policy operand.
let UsesMaskPolicy = 0;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@ -1206,6 +1216,7 @@ class VPseudoTiedBinaryMask<VReg RetClass,
let HasSEWOp = 1;
let HasMergeOp = 0; // Merge is also rs2.
let HasVecPolicyOp = 1;
let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@ -1325,6 +1336,7 @@ class VPseudoUSSegLoadMask<VReg RetClass, int EEW, bits<4> NF, bit isFF>:
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@ -1358,6 +1370,7 @@ class VPseudoSSegLoadMask<VReg RetClass, int EEW, bits<4> NF>:
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@ -1397,6 +1410,7 @@ class VPseudoISegLoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,67 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64);
define <vscale x 8 x i8> @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2)
ret <vscale x 8 x i8> %a
}
declare <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i64, i64);
define <vscale x 8 x i8> @intrinsic_vadd_mask_tu(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vadd_mask_tu:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
i64 %4, i64 0)
ret <vscale x 8 x i8> %a
}
define <vscale x 8 x i8> @intrinsic_vadd_mask_ta(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vadd_mask_ta:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
i64 %4, i64 1)
ret <vscale x 8 x i8> %a
}