forked from OSchip/llvm-project
[RISCV] Provide VLOperand in td.
Currently, users expected VL is the last operand. However, since some intrinsics has tail policy in the last operand, this rule cannot be used anymore. Reviewed By: craig.topper, frasercrmck Differential Revision: https://reviews.llvm.org/D117452
This commit is contained in:
parent
3fc4b5896a
commit
ec9cb3a79c
|
@ -123,11 +123,16 @@ let TargetPrefix = "riscv" in {
|
|||
// The intrinsic does not have any operand that must be extended.
|
||||
defvar NoSplatOperand = 0xF;
|
||||
|
||||
// The intrinsic does not have a VL operand.
|
||||
// (e.g., riscv_vmv_x_s and riscv_vfmv_f_s)
|
||||
defvar NoVLOperand = 0x1F;
|
||||
|
||||
class RISCVVIntrinsic {
|
||||
// These intrinsics may accept illegal integer values in their llvm_any_ty
|
||||
// operand, so they have to be extended.
|
||||
Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
|
||||
bits<4> SplatOperand = NoSplatOperand;
|
||||
bits<5> VLOperand = NoVLOperand;
|
||||
}
|
||||
|
||||
let TargetPrefix = "riscv" in {
|
||||
|
@ -152,7 +157,9 @@ let TargetPrefix = "riscv" in {
|
|||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMPointerType<LLVMMatchType<0>>,
|
||||
llvm_anyint_ty],
|
||||
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
}
|
||||
// For unit stride fault-only-first load
|
||||
// Input: (pointer, vl)
|
||||
// Output: (data, vl)
|
||||
|
@ -162,7 +169,9 @@ let TargetPrefix = "riscv" in {
|
|||
: Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
|
||||
[LLVMPointerType<LLVMMatchType<0>>, LLVMMatchType<1>],
|
||||
[NoCapture<ArgIndex<0>>]>,
|
||||
RISCVVIntrinsic;
|
||||
RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
}
|
||||
// For unit stride load with mask
|
||||
// Input: (maskedoff, pointer, mask, vl, ta)
|
||||
class RISCVUSLoadMask
|
||||
|
@ -172,7 +181,9 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
llvm_anyint_ty, LLVMMatchType<1>],
|
||||
[NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>, IntrReadMem]>,
|
||||
RISCVVIntrinsic;
|
||||
RISCVVIntrinsic {
|
||||
let VLOperand = 3;
|
||||
}
|
||||
// For unit stride fault-only-first load with mask
|
||||
// Input: (maskedoff, pointer, mask, vl, ta)
|
||||
// Output: (data, vl)
|
||||
|
@ -184,14 +195,18 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMPointerType<LLVMMatchType<0>>,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
LLVMMatchType<1>, LLVMMatchType<1>],
|
||||
[NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
|
||||
let VLOperand = 3;
|
||||
}
|
||||
// For strided load
|
||||
// Input: (pointer, stride, vl)
|
||||
class RISCVSLoad
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMPointerType<LLVMMatchType<0>>,
|
||||
llvm_anyint_ty, LLVMMatchType<1>],
|
||||
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For strided load with mask
|
||||
// Input: (maskedoff, pointer, stride, mask, vl, ta)
|
||||
class RISCVSLoadMask
|
||||
|
@ -201,14 +216,18 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
|
||||
LLVMMatchType<1>],
|
||||
[NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
|
||||
RISCVVIntrinsic;
|
||||
RISCVVIntrinsic {
|
||||
let VLOperand = 4;
|
||||
}
|
||||
// For indexed load
|
||||
// Input: (pointer, index, vl)
|
||||
class RISCVILoad
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMPointerType<LLVMMatchType<0>>,
|
||||
llvm_anyvector_ty, llvm_anyint_ty],
|
||||
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For indexed load with mask
|
||||
// Input: (maskedoff, pointer, index, mask, vl, ta)
|
||||
class RISCVILoadMask
|
||||
|
@ -218,7 +237,9 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
|
||||
LLVMMatchType<2>],
|
||||
[NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
|
||||
RISCVVIntrinsic;
|
||||
RISCVVIntrinsic {
|
||||
let VLOperand = 4;
|
||||
}
|
||||
// For unit stride store
|
||||
// Input: (vector_in, pointer, vl)
|
||||
class RISCVUSStore
|
||||
|
@ -226,7 +247,9 @@ let TargetPrefix = "riscv" in {
|
|||
[llvm_anyvector_ty,
|
||||
LLVMPointerType<LLVMMatchType<0>>,
|
||||
llvm_anyint_ty],
|
||||
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For unit stride store with mask
|
||||
// Input: (vector_in, pointer, mask, vl)
|
||||
class RISCVUSStoreMask
|
||||
|
@ -235,7 +258,9 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMPointerType<LLVMMatchType<0>>,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
llvm_anyint_ty],
|
||||
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 3;
|
||||
}
|
||||
// For strided store
|
||||
// Input: (vector_in, pointer, stride, vl)
|
||||
class RISCVSStore
|
||||
|
@ -243,7 +268,9 @@ let TargetPrefix = "riscv" in {
|
|||
[llvm_anyvector_ty,
|
||||
LLVMPointerType<LLVMMatchType<0>>,
|
||||
llvm_anyint_ty, LLVMMatchType<1>],
|
||||
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 3;
|
||||
}
|
||||
// For stride store with mask
|
||||
// Input: (vector_in, pointer, stirde, mask, vl)
|
||||
class RISCVSStoreMask
|
||||
|
@ -251,7 +278,9 @@ let TargetPrefix = "riscv" in {
|
|||
[llvm_anyvector_ty,
|
||||
LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
|
||||
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 4;
|
||||
}
|
||||
// For indexed store
|
||||
// Input: (vector_in, pointer, index, vl)
|
||||
class RISCVIStore
|
||||
|
@ -259,7 +288,9 @@ let TargetPrefix = "riscv" in {
|
|||
[llvm_anyvector_ty,
|
||||
LLVMPointerType<LLVMMatchType<0>>,
|
||||
llvm_anyint_ty, llvm_anyint_ty],
|
||||
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 3;
|
||||
}
|
||||
// For indexed store with mask
|
||||
// Input: (vector_in, pointer, index, mask, vl)
|
||||
class RISCVIStoreMask
|
||||
|
@ -267,13 +298,17 @@ let TargetPrefix = "riscv" in {
|
|||
[llvm_anyvector_ty,
|
||||
LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
||||
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 4;
|
||||
}
|
||||
// For destination vector type is the same as source vector.
|
||||
// Input: (vector_in, vl)
|
||||
class RISCVUnaryAANoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
}
|
||||
// For destination vector type is the same as first source vector (with mask).
|
||||
// Input: (vector_in, mask, vl, ta)
|
||||
class RISCVUnaryAAMask
|
||||
|
@ -281,24 +316,32 @@ let TargetPrefix = "riscv" in {
|
|||
[LLVMMatchType<0>, LLVMMatchType<0>,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
|
||||
LLVMMatchType<1>],
|
||||
[ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic;
|
||||
[ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 3;
|
||||
}
|
||||
class RISCVUnaryAAMaskNoTA
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, LLVMMatchType<0>,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 3;
|
||||
}
|
||||
// For destination vector type is the same as first and second source vector.
|
||||
// Input: (vector_in, vector_in, vl)
|
||||
class RISCVBinaryAAANoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For destination vector type is the same as first and second source vector.
|
||||
// Input: (vector_in, int_vector_in, vl)
|
||||
class RISCVRGatherVVNoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For destination vector type is the same as first and second source vector.
|
||||
// Input: (vector_in, vector_in, int_vector_in, vl, ta)
|
||||
class RISCVRGatherVVMask
|
||||
|
@ -306,22 +349,28 @@ let TargetPrefix = "riscv" in {
|
|||
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
|
||||
LLVMMatchType<1>],
|
||||
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic;
|
||||
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 4;
|
||||
}
|
||||
// Input: (vector_in, int16_vector_in, vl)
|
||||
class RISCVRGatherEI16VVNoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
|
||||
llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For destination vector type is the same as first and second source vector.
|
||||
// Input: (vector_in, vector_in, int16_vector_in, vl, ta)
|
||||
class RISCVRGatherEI16VVMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, LLVMMatchType<0>,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
|
||||
LLVMMatchType<1>],
|
||||
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic;
|
||||
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 4;
|
||||
}
|
||||
// For destination vector type is the same as first source vector, and the
|
||||
// second operand is XLen.
|
||||
// Input: (vector_in, xlen_in, vl)
|
||||
|
@ -329,6 +378,7 @@ let TargetPrefix = "riscv" in {
|
|||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For destination vector type is the same as first source vector (with mask).
|
||||
// Second operand is XLen.
|
||||
|
@ -339,6 +389,7 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
|
||||
LLVMMatchType<1>],
|
||||
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 4;
|
||||
}
|
||||
// For destination vector type is the same as first source vector.
|
||||
// Input: (vector_in, vector_in/scalar_in, vl)
|
||||
|
@ -347,6 +398,7 @@ let TargetPrefix = "riscv" in {
|
|||
[LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let SplatOperand = 1;
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For destination vector type is the same as first source vector (with mask).
|
||||
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
|
||||
|
@ -357,6 +409,7 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMMatchType<2>],
|
||||
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
|
||||
let SplatOperand = 2;
|
||||
let VLOperand = 4;
|
||||
}
|
||||
// For destination vector type is the same as first source vector. The
|
||||
// second source operand must match the destination type or be an XLen scalar.
|
||||
|
@ -364,7 +417,9 @@ let TargetPrefix = "riscv" in {
|
|||
class RISCVBinaryAAShiftNoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For destination vector type is the same as first source vector (with mask).
|
||||
// The second source operand must match the destination type or be an XLen scalar.
|
||||
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
|
||||
|
@ -373,7 +428,9 @@ let TargetPrefix = "riscv" in {
|
|||
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
|
||||
LLVMMatchType<2>],
|
||||
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic;
|
||||
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 4;
|
||||
}
|
||||
// For destination vector type is NOT the same as first source vector.
|
||||
// Input: (vector_in, vector_in/scalar_in, vl)
|
||||
class RISCVBinaryABXNoMask
|
||||
|
@ -381,6 +438,7 @@ let TargetPrefix = "riscv" in {
|
|||
[llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let SplatOperand = 1;
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For destination vector type is NOT the same as first source vector (with mask).
|
||||
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
|
||||
|
@ -391,6 +449,7 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMMatchType<3>],
|
||||
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
|
||||
let SplatOperand = 2;
|
||||
let VLOperand = 4;
|
||||
}
|
||||
// For destination vector type is NOT the same as first source vector. The
|
||||
// second source operand must match the destination type or be an XLen scalar.
|
||||
|
@ -398,7 +457,9 @@ let TargetPrefix = "riscv" in {
|
|||
class RISCVBinaryABShiftNoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For destination vector type is NOT the same as first source vector (with mask).
|
||||
// The second source operand must match the destination type or be an XLen scalar.
|
||||
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
|
||||
|
@ -407,7 +468,9 @@ let TargetPrefix = "riscv" in {
|
|||
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
|
||||
LLVMMatchType<3>],
|
||||
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic;
|
||||
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 4;
|
||||
}
|
||||
// For binary operations with V0 as input.
|
||||
// Input: (vector_in, vector_in/scalar_in, V0, vl)
|
||||
class RISCVBinaryWithV0
|
||||
|
@ -417,6 +480,7 @@ let TargetPrefix = "riscv" in {
|
|||
llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let SplatOperand = 1;
|
||||
let VLOperand = 3;
|
||||
}
|
||||
// For binary operations with mask type output and V0 as input.
|
||||
// Output: (mask type output)
|
||||
|
@ -428,6 +492,7 @@ let TargetPrefix = "riscv" in {
|
|||
llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let SplatOperand = 1;
|
||||
let VLOperand = 3;
|
||||
}
|
||||
// For binary operations with mask type output.
|
||||
// Output: (mask type output)
|
||||
|
@ -437,6 +502,7 @@ let TargetPrefix = "riscv" in {
|
|||
[llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let SplatOperand = 1;
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For binary operations with mask type output without mask.
|
||||
// Output: (mask type output)
|
||||
|
@ -446,6 +512,7 @@ let TargetPrefix = "riscv" in {
|
|||
[llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let SplatOperand = 1;
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For binary operations with mask type output with mask.
|
||||
// Output: (mask type output)
|
||||
|
@ -457,6 +524,7 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let SplatOperand = 2;
|
||||
let VLOperand = 4;
|
||||
}
|
||||
// For FP classify operations.
|
||||
// Output: (bit mask type output)
|
||||
|
@ -464,7 +532,9 @@ let TargetPrefix = "riscv" in {
|
|||
class RISCVClassifyNoMask
|
||||
: Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
|
||||
[llvm_anyvector_ty, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
}
|
||||
// For FP classify operations with mask.
|
||||
// Output: (bit mask type output)
|
||||
// Input: (maskedoff, vector_in, mask, vl)
|
||||
|
@ -472,7 +542,9 @@ let TargetPrefix = "riscv" in {
|
|||
: Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
|
||||
[LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 3;
|
||||
}
|
||||
// For Saturating binary operations.
|
||||
// The destination vector type is the same as first source vector.
|
||||
// Input: (vector_in, vector_in/scalar_in, vl)
|
||||
|
@ -481,6 +553,7 @@ let TargetPrefix = "riscv" in {
|
|||
[LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
|
||||
[IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
|
||||
let SplatOperand = 1;
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For Saturating binary operations with mask.
|
||||
// The destination vector type is the same as first source vector.
|
||||
|
@ -492,6 +565,7 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMMatchType<2>],
|
||||
[ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
|
||||
let SplatOperand = 2;
|
||||
let VLOperand = 4;
|
||||
}
|
||||
// For Saturating binary operations.
|
||||
// The destination vector type is the same as first source vector.
|
||||
|
@ -500,7 +574,9 @@ let TargetPrefix = "riscv" in {
|
|||
class RISCVSaturatingBinaryAAShiftNoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
|
||||
[IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic;
|
||||
[IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For Saturating binary operations with mask.
|
||||
// The destination vector type is the same as first source vector.
|
||||
// The second source operand matches the destination type or is an XLen scalar.
|
||||
|
@ -510,7 +586,9 @@ let TargetPrefix = "riscv" in {
|
|||
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
|
||||
LLVMMatchType<2>],
|
||||
[ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic;
|
||||
[ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
|
||||
let VLOperand = 4;
|
||||
}
|
||||
// For Saturating binary operations.
|
||||
// The destination vector type is NOT the same as first source vector.
|
||||
// The second source operand matches the destination type or is an XLen scalar.
|
||||
|
@ -518,7 +596,9 @@ let TargetPrefix = "riscv" in {
|
|||
class RISCVSaturatingBinaryABShiftNoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
|
||||
[IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic;
|
||||
[IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For Saturating binary operations with mask.
|
||||
// The destination vector type is NOT the same as first source vector (with mask).
|
||||
// The second source operand matches the destination type or is an XLen scalar.
|
||||
|
@ -528,23 +608,30 @@ let TargetPrefix = "riscv" in {
|
|||
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
|
||||
LLVMMatchType<3>],
|
||||
[ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic;
|
||||
[ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
|
||||
let VLOperand = 4;
|
||||
}
|
||||
class RISCVTernaryAAAXNoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
|
||||
LLVMMatchType<1>],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 3;
|
||||
}
|
||||
class RISCVTernaryAAAXMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 4;
|
||||
}
|
||||
class RISCVTernaryAAXANoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
|
||||
llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let SplatOperand = 1;
|
||||
let VLOperand = 3;
|
||||
}
|
||||
class RISCVTernaryAAXAMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
|
@ -552,6 +639,7 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let SplatOperand = 1;
|
||||
let VLOperand = 4;
|
||||
}
|
||||
class RISCVTernaryWideNoMask
|
||||
: Intrinsic< [llvm_anyvector_ty],
|
||||
|
@ -559,6 +647,7 @@ let TargetPrefix = "riscv" in {
|
|||
llvm_anyint_ty],
|
||||
[IntrNoMem] >, RISCVVIntrinsic {
|
||||
let SplatOperand = 1;
|
||||
let VLOperand = 3;
|
||||
}
|
||||
class RISCVTernaryWideMask
|
||||
: Intrinsic< [llvm_anyvector_ty],
|
||||
|
@ -566,6 +655,7 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let SplatOperand = 1;
|
||||
let VLOperand = 4;
|
||||
}
|
||||
// For Reduction ternary operations.
|
||||
// For destination vector type is the same as first and third source vector.
|
||||
|
@ -574,7 +664,9 @@ let TargetPrefix = "riscv" in {
|
|||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
|
||||
llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 3;
|
||||
}
|
||||
// For Reduction ternary operations with mask.
|
||||
// For destination vector type is the same as first and third source vector.
|
||||
// The mask type come from second source vector.
|
||||
|
@ -583,27 +675,35 @@ let TargetPrefix = "riscv" in {
|
|||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
|
||||
LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 4;
|
||||
}
|
||||
// For unary operations with scalar type output without mask
|
||||
// Output: (scalar type)
|
||||
// Input: (vector_in, vl)
|
||||
class RISCVMaskUnarySOutNoMask
|
||||
: Intrinsic<[LLVMMatchType<1>],
|
||||
[llvm_anyvector_ty, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
}
|
||||
// For unary operations with scalar type output with mask
|
||||
// Output: (scalar type)
|
||||
// Input: (vector_in, mask, vl)
|
||||
class RISCVMaskUnarySOutMask
|
||||
: Intrinsic<[LLVMMatchType<1>],
|
||||
[llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For destination vector type is NOT the same as source vector.
|
||||
// Input: (vector_in, vl)
|
||||
class RISCVUnaryABNoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[llvm_anyvector_ty, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
}
|
||||
// For destination vector type is NOT the same as source vector (with mask).
|
||||
// Input: (maskedoff, vector_in, mask, vl, ta)
|
||||
class RISCVUnaryABMask
|
||||
|
@ -611,14 +711,18 @@ let TargetPrefix = "riscv" in {
|
|||
[LLVMMatchType<0>, llvm_anyvector_ty,
|
||||
LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
|
||||
llvm_anyint_ty, LLVMMatchType<2>],
|
||||
[ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic;
|
||||
[ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 3;
|
||||
}
|
||||
// For unary operations with the same vector type in/out without mask
|
||||
// Output: (vector)
|
||||
// Input: (vector_in, vl)
|
||||
class RISCVUnaryNoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
}
|
||||
// For mask unary operations with mask type in/out with mask
|
||||
// Output: (mask type output)
|
||||
// Input: (mask type maskedoff, mask type vector_in, mask, vl)
|
||||
|
@ -626,19 +730,25 @@ let TargetPrefix = "riscv" in {
|
|||
: Intrinsic<[llvm_anyint_ty],
|
||||
[LLVMMatchType<0>, LLVMMatchType<0>,
|
||||
LLVMMatchType<0>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 3;
|
||||
}
|
||||
// Output: (vector)
|
||||
// Input: (vl)
|
||||
class RISCVNullaryIntrinsic
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 0;
|
||||
}
|
||||
// For Conversion unary operations.
|
||||
// Input: (vector_in, vl)
|
||||
class RISCVConversionNoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[llvm_anyvector_ty, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
}
|
||||
// For Conversion unary operations with mask.
|
||||
// Input: (maskedoff, vector_in, mask, vl, ta)
|
||||
class RISCVConversionMask
|
||||
|
@ -646,7 +756,9 @@ let TargetPrefix = "riscv" in {
|
|||
[LLVMMatchType<0>, llvm_anyvector_ty,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
|
||||
LLVMMatchType<2>],
|
||||
[ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic;
|
||||
[ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 3;
|
||||
}
|
||||
|
||||
// For unit stride segment load
|
||||
// Input: (pointer, vl)
|
||||
|
@ -654,7 +766,9 @@ let TargetPrefix = "riscv" in {
|
|||
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
|
||||
!add(nf, -1))),
|
||||
[LLVMPointerToElt<0>, llvm_anyint_ty],
|
||||
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
}
|
||||
// For unit stride segment load with mask
|
||||
// Input: (maskedoff, pointer, mask, vl, ta)
|
||||
class RISCVUSSegLoadMask<int nf>
|
||||
|
@ -665,7 +779,9 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
llvm_anyint_ty, LLVMMatchType<1>]),
|
||||
[ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
|
||||
RISCVVIntrinsic;
|
||||
RISCVVIntrinsic {
|
||||
let VLOperand = !add(nf, 2);
|
||||
}
|
||||
|
||||
// For unit stride fault-only-first segment load
|
||||
// Input: (pointer, vl)
|
||||
|
@ -676,7 +792,9 @@ let TargetPrefix = "riscv" in {
|
|||
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
|
||||
!add(nf, -1)), [llvm_anyint_ty]),
|
||||
[LLVMPointerToElt<0>, LLVMMatchType<1>],
|
||||
[NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
}
|
||||
// For unit stride fault-only-first segment load with mask
|
||||
// Input: (maskedoff, pointer, mask, vl, ta)
|
||||
// Output: (data, vl)
|
||||
|
@ -690,7 +808,9 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
LLVMMatchType<1>, LLVMMatchType<1>]),
|
||||
[ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>]>,
|
||||
RISCVVIntrinsic;
|
||||
RISCVVIntrinsic {
|
||||
let VLOperand = !add(nf, 2);
|
||||
}
|
||||
|
||||
// For stride segment load
|
||||
// Input: (pointer, offset, vl)
|
||||
|
@ -698,7 +818,9 @@ let TargetPrefix = "riscv" in {
|
|||
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
|
||||
!add(nf, -1))),
|
||||
[LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>],
|
||||
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For stride segment load with mask
|
||||
// Input: (maskedoff, pointer, offset, mask, vl, ta)
|
||||
class RISCVSSegLoadMask<int nf>
|
||||
|
@ -710,7 +832,9 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
LLVMMatchType<1>, LLVMMatchType<1>]),
|
||||
[ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
|
||||
RISCVVIntrinsic;
|
||||
RISCVVIntrinsic {
|
||||
let VLOperand = !add(nf, 3);
|
||||
}
|
||||
|
||||
// For indexed segment load
|
||||
// Input: (pointer, index, vl)
|
||||
|
@ -718,7 +842,9 @@ let TargetPrefix = "riscv" in {
|
|||
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
|
||||
!add(nf, -1))),
|
||||
[LLVMPointerToElt<0>, llvm_anyvector_ty, llvm_anyint_ty],
|
||||
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For indexed segment load with mask
|
||||
// Input: (maskedoff, pointer, index, mask, vl, ta)
|
||||
class RISCVISegLoadMask<int nf>
|
||||
|
@ -730,7 +856,9 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
llvm_anyint_ty, LLVMMatchType<2>]),
|
||||
[ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
|
||||
RISCVVIntrinsic;
|
||||
RISCVVIntrinsic {
|
||||
let VLOperand = !add(nf, 3);
|
||||
}
|
||||
|
||||
// For unit stride segment store
|
||||
// Input: (value, pointer, vl)
|
||||
|
@ -739,7 +867,9 @@ let TargetPrefix = "riscv" in {
|
|||
!listconcat([llvm_anyvector_ty],
|
||||
!listsplat(LLVMMatchType<0>, !add(nf, -1)),
|
||||
[LLVMPointerToElt<0>, llvm_anyint_ty]),
|
||||
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = !add(nf, 1);
|
||||
}
|
||||
// For unit stride segment store with mask
|
||||
// Input: (value, pointer, mask, vl)
|
||||
class RISCVUSSegStoreMask<int nf>
|
||||
|
@ -749,7 +879,9 @@ let TargetPrefix = "riscv" in {
|
|||
[LLVMPointerToElt<0>,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
llvm_anyint_ty]),
|
||||
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = !add(nf, 2);
|
||||
}
|
||||
|
||||
// For stride segment store
|
||||
// Input: (value, pointer, offset, vl)
|
||||
|
@ -759,7 +891,9 @@ let TargetPrefix = "riscv" in {
|
|||
!listsplat(LLVMMatchType<0>, !add(nf, -1)),
|
||||
[LLVMPointerToElt<0>, llvm_anyint_ty,
|
||||
LLVMMatchType<1>]),
|
||||
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = !add(nf, 2);
|
||||
}
|
||||
// For stride segment store with mask
|
||||
// Input: (value, pointer, offset, mask, vl)
|
||||
class RISCVSSegStoreMask<int nf>
|
||||
|
@ -769,7 +903,9 @@ let TargetPrefix = "riscv" in {
|
|||
[LLVMPointerToElt<0>, llvm_anyint_ty,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
LLVMMatchType<1>]),
|
||||
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = !add(nf, 3);
|
||||
}
|
||||
|
||||
// For indexed segment store
|
||||
// Input: (value, pointer, offset, vl)
|
||||
|
@ -779,7 +915,9 @@ let TargetPrefix = "riscv" in {
|
|||
!listsplat(LLVMMatchType<0>, !add(nf, -1)),
|
||||
[LLVMPointerToElt<0>, llvm_anyvector_ty,
|
||||
llvm_anyint_ty]),
|
||||
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = !add(nf, 2);
|
||||
}
|
||||
// For indexed segment store with mask
|
||||
// Input: (value, pointer, offset, mask, vl)
|
||||
class RISCVISegStoreMask<int nf>
|
||||
|
@ -789,7 +927,9 @@ let TargetPrefix = "riscv" in {
|
|||
[LLVMPointerToElt<0>, llvm_anyvector_ty,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
llvm_anyint_ty]),
|
||||
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
|
||||
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = !add(nf, 3);
|
||||
}
|
||||
|
||||
multiclass RISCVUSLoad {
|
||||
def "int_riscv_" # NAME : RISCVUSLoad;
|
||||
|
@ -1056,13 +1196,19 @@ let TargetPrefix = "riscv" in {
|
|||
|
||||
def int_riscv_vmv_v_v : Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
}
|
||||
def int_riscv_vmv_v_x : Intrinsic<[llvm_anyint_ty],
|
||||
[LLVMVectorElementType<0>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
}
|
||||
def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty],
|
||||
[LLVMVectorElementType<0>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
}
|
||||
|
||||
def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>],
|
||||
[llvm_anyint_ty],
|
||||
|
@ -1070,7 +1216,9 @@ let TargetPrefix = "riscv" in {
|
|||
def int_riscv_vmv_s_x : Intrinsic<[llvm_anyint_ty],
|
||||
[LLVMMatchType<0>, LLVMVectorElementType<0>,
|
||||
llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 2;
|
||||
}
|
||||
|
||||
def int_riscv_vfmv_f_s : Intrinsic<[LLVMVectorElementType<0>],
|
||||
[llvm_anyfloat_ty],
|
||||
|
@ -1078,7 +1226,9 @@ let TargetPrefix = "riscv" in {
|
|||
def int_riscv_vfmv_s_f : Intrinsic<[llvm_anyfloat_ty],
|
||||
[LLVMMatchType<0>, LLVMVectorElementType<0>,
|
||||
llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 2;
|
||||
}
|
||||
|
||||
defm vfmul : RISCVBinaryAAX;
|
||||
defm vfdiv : RISCVBinaryAAX;
|
||||
|
@ -1215,7 +1365,9 @@ let TargetPrefix = "riscv" in {
|
|||
def int_riscv_viota : Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
}
|
||||
// Output: (vector)
|
||||
// Input: (maskedoff, mask type vector_in, mask, vl)
|
||||
def int_riscv_viota_mask : Intrinsic<[llvm_anyvector_ty],
|
||||
|
@ -1223,7 +1375,9 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 3;
|
||||
}
|
||||
// Output: (vector)
|
||||
// Input: (vl)
|
||||
def int_riscv_vid : RISCVNullaryIntrinsic;
|
||||
|
@ -1234,7 +1388,9 @@ let TargetPrefix = "riscv" in {
|
|||
[LLVMMatchType<0>,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 2;
|
||||
}
|
||||
|
||||
foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
|
||||
defm vlseg # nf : RISCVUSSegLoad<nf>;
|
||||
|
|
|
@ -4203,8 +4203,7 @@ static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
|
|||
// We need to convert the scalar to a splat vector.
|
||||
// FIXME: Can we implicitly truncate the scalar if it is known to
|
||||
// be sign extended?
|
||||
// VL should be the last operand.
|
||||
SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
|
||||
SDValue VL = Op.getOperand(II->VLOperand + 1 + HasChain);
|
||||
assert(VL.getValueType() == XLenVT);
|
||||
ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
|
||||
return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
|
||||
|
|
|
@ -670,10 +670,15 @@ namespace RISCVVIntrinsicsTable {
|
|||
struct RISCVVIntrinsicInfo {
|
||||
unsigned IntrinsicID;
|
||||
uint8_t SplatOperand;
|
||||
uint8_t VLOperand;
|
||||
bool hasSplatOperand() const {
|
||||
// 0xF is not valid. See NoSplatOperand in IntrinsicsRISCV.td.
|
||||
return SplatOperand != 0xF;
|
||||
}
|
||||
bool hasVLOperand() const {
|
||||
// 0x1F is not valid. See NoVLOperand in IntrinsicsRISCV.td.
|
||||
return VLOperand != 0x1F;
|
||||
}
|
||||
};
|
||||
|
||||
using namespace RISCV;
|
||||
|
|
|
@ -419,7 +419,7 @@ def RISCVVPseudosTable : GenericTable {
|
|||
def RISCVVIntrinsicsTable : GenericTable {
|
||||
let FilterClass = "RISCVVIntrinsic";
|
||||
let CppTypeName = "RISCVVIntrinsicInfo";
|
||||
let Fields = ["IntrinsicID", "SplatOperand"];
|
||||
let Fields = ["IntrinsicID", "SplatOperand", "VLOperand"];
|
||||
let PrimaryKey = ["IntrinsicID"];
|
||||
let PrimaryKeyName = "getRISCVVIntrinsicInfo";
|
||||
}
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vaadd.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vaadd.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vaaddu.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vaaddu.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1804,10 +1804,9 @@ define <vscale x 1 x i64> @intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1861,10 +1860,9 @@ define <vscale x 2 x i64> @intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1918,10 +1916,9 @@ define <vscale x 4 x i64> @intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1975,10 +1972,9 @@ define <vscale x 8 x i64> @intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vand.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vand.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vand.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vasub.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vasub.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vasubu.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vasubu.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vmul.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vmul.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vmulh.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vmulh.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vmulhsu.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vmulhsu.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vmulhu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vmulhu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vmulhu.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vmulhu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vmulhu.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vmulhu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vor.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vor.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vor.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -851,10 +851,9 @@ define <vscale x 1 x i64> @intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsub.vv v8, v10, v9, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -909,10 +908,9 @@ define <vscale x 2 x i64> @intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsub.vv v8, v12, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -967,10 +965,9 @@ define <vscale x 4 x i64> @intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsub.vv v8, v16, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1025,10 +1022,9 @@ define <vscale x 8 x i64> @intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsub.vv v8, v24, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
|
@ -1845,10 +1845,9 @@ define <vscale x 1 x i64> @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v10, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1903,10 +1902,9 @@ define <vscale x 2 x i64> @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v12, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -1961,10 +1959,9 @@ define <vscale x 4 x i64> @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v16, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
@ -2019,10 +2016,9 @@ define <vscale x 8 x i64> @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
|
|||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: sw a1, 12(sp)
|
||||
; CHECK-NEXT: sw a0, 8(sp)
|
||||
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: addi a0, sp, 8
|
||||
; CHECK-NEXT: vlse64.v v24, (a0), zero
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
|
|
Loading…
Reference in New Issue