forked from OSchip/llvm-project
[AARch64] Add ARMv8.2-A FP16 vector intrinsics
Putting back the code that was reverted few weeks ago. Differential Revision: https://reviews.llvm.org/D34161 llvm-svn: 321294
This commit is contained in:
parent
6e62834fef
commit
f58a132eef
|
@ -227,6 +227,7 @@ def OP_UNAVAILABLE : Operation {
|
|||
// u: unsigned integer (int/float args)
|
||||
// f: float (int args)
|
||||
// F: double (int args)
|
||||
// H: half (int args)
|
||||
// d: default
|
||||
// g: default, ignore 'Q' size modifier.
|
||||
// j: default, force 'Q' size modifier.
|
||||
|
@ -345,6 +346,7 @@ def OP_MLSLHi : Op<(call "vmlsl", $p0, (call "vget_high", $p1),
|
|||
(call "vget_high", $p2))>;
|
||||
def OP_MLSLHi_N : Op<(call "vmlsl_n", $p0, (call "vget_high", $p1), $p2)>;
|
||||
def OP_MUL_N : Op<(op "*", $p0, (dup $p1))>;
|
||||
def OP_MULX_N : Op<(call "vmulx", $p0, (dup $p1))>;
|
||||
def OP_MLA_N : Op<(op "+", $p0, (op "*", $p1, (dup $p2)))>;
|
||||
def OP_MLS_N : Op<(op "-", $p0, (op "*", $p1, (dup $p2)))>;
|
||||
def OP_FMLA_N : Op<(call "vfma", $p0, $p1, (dup $p2))>;
|
||||
|
@ -1661,3 +1663,186 @@ def SCALAR_SQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "sssji", "SsSi", OP_SCALAR
|
|||
def SCALAR_VDUP_LANE : IInst<"vdup_lane", "sdi", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs">;
|
||||
def SCALAR_VDUP_LANEQ : IInst<"vdup_laneq", "sji", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs">;
|
||||
}
|
||||
|
||||
// ARMv8.2-A FP16 intrinsics.
|
||||
let ArchGuard = "defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)" in {
|
||||
|
||||
// ARMv8.2-A FP16 one-operand vector intrinsics.
|
||||
|
||||
// Comparison
|
||||
def CMEQH : SInst<"vceqz", "ud", "hQh">;
|
||||
def CMGEH : SInst<"vcgez", "ud", "hQh">;
|
||||
def CMGTH : SInst<"vcgtz", "ud", "hQh">;
|
||||
def CMLEH : SInst<"vclez", "ud", "hQh">;
|
||||
def CMLTH : SInst<"vcltz", "ud", "hQh">;
|
||||
|
||||
// Vector conversion
|
||||
def VCVT_F16 : SInst<"vcvt_f16", "Hd", "sUsQsQUs">;
|
||||
def VCVT_S16 : SInst<"vcvt_s16", "xd", "hQh">;
|
||||
def VCVT_U16 : SInst<"vcvt_u16", "ud", "hQh">;
|
||||
def VCVTA_S16 : SInst<"vcvta_s16", "xd", "hQh">;
|
||||
def VCVTA_U16 : SInst<"vcvta_u16", "ud", "hQh">;
|
||||
def VCVTM_S16 : SInst<"vcvtm_s16", "xd", "hQh">;
|
||||
def VCVTM_U16 : SInst<"vcvtm_u16", "ud", "hQh">;
|
||||
def VCVTN_S16 : SInst<"vcvtn_s16", "xd", "hQh">;
|
||||
def VCVTN_U16 : SInst<"vcvtn_u16", "ud", "hQh">;
|
||||
def VCVTP_S16 : SInst<"vcvtp_s16", "xd", "hQh">;
|
||||
def VCVTP_U16 : SInst<"vcvtp_u16", "ud", "hQh">;
|
||||
|
||||
// Vector rounding
|
||||
def FRINTZH : SInst<"vrnd", "dd", "hQh">;
|
||||
def FRINTNH : SInst<"vrndn", "dd", "hQh">;
|
||||
def FRINTAH : SInst<"vrnda", "dd", "hQh">;
|
||||
def FRINTPH : SInst<"vrndp", "dd", "hQh">;
|
||||
def FRINTMH : SInst<"vrndm", "dd", "hQh">;
|
||||
def FRINTXH : SInst<"vrndx", "dd", "hQh">;
|
||||
def FRINTIH : SInst<"vrndi", "dd", "hQh">;
|
||||
|
||||
// Misc.
|
||||
def VABSH : SInst<"vabs", "dd", "hQh">;
|
||||
def VNEGH : SOpInst<"vneg", "dd", "hQh", OP_NEG>;
|
||||
def VRECPEH : SInst<"vrecpe", "dd", "hQh">;
|
||||
def FRSQRTEH : SInst<"vrsqrte", "dd", "hQh">;
|
||||
def FSQRTH : SInst<"vsqrt", "dd", "hQh">;
|
||||
|
||||
// ARMv8.2-A FP16 two-operands vector intrinsics.
|
||||
|
||||
// Misc.
|
||||
def VADDH : SOpInst<"vadd", "ddd", "hQh", OP_ADD>;
|
||||
def VABDH : SInst<"vabd", "ddd", "hQh">;
|
||||
def VSUBH : SOpInst<"vsub", "ddd", "hQh", OP_SUB>;
|
||||
|
||||
// Comparison
|
||||
let InstName = "vacge" in {
|
||||
def VCAGEH : SInst<"vcage", "udd", "hQh">;
|
||||
def VCALEH : SInst<"vcale", "udd", "hQh">;
|
||||
}
|
||||
let InstName = "vacgt" in {
|
||||
def VCAGTH : SInst<"vcagt", "udd", "hQh">;
|
||||
def VCALTH : SInst<"vcalt", "udd", "hQh">;
|
||||
}
|
||||
def VCEQH : SOpInst<"vceq", "udd", "hQh", OP_EQ>;
|
||||
def VCGEH : SOpInst<"vcge", "udd", "hQh", OP_GE>;
|
||||
def VCGTH : SOpInst<"vcgt", "udd", "hQh", OP_GT>;
|
||||
let InstName = "vcge" in
|
||||
def VCLEH : SOpInst<"vcle", "udd", "hQh", OP_LE>;
|
||||
let InstName = "vcgt" in
|
||||
def VCLTH : SOpInst<"vclt", "udd", "hQh", OP_LT>;
|
||||
|
||||
// Vector conversion
|
||||
let isVCVT_N = 1 in {
|
||||
def VCVT_N_F16 : SInst<"vcvt_n_f16", "Hdi", "sUsQsQUs">;
|
||||
def VCVT_N_S16 : SInst<"vcvt_n_s16", "xdi", "hQh">;
|
||||
def VCVT_N_U16 : SInst<"vcvt_n_u16", "udi", "hQh">;
|
||||
}
|
||||
|
||||
// Max/Min
|
||||
def VMAXH : SInst<"vmax", "ddd", "hQh">;
|
||||
def VMINH : SInst<"vmin", "ddd", "hQh">;
|
||||
def FMAXNMH : SInst<"vmaxnm", "ddd", "hQh">;
|
||||
def FMINNMH : SInst<"vminnm", "ddd", "hQh">;
|
||||
|
||||
// Multiplication/Division
|
||||
def VMULH : SOpInst<"vmul", "ddd", "hQh", OP_MUL>;
|
||||
def MULXH : SInst<"vmulx", "ddd", "hQh">;
|
||||
def FDIVH : IOpInst<"vdiv", "ddd", "hQh", OP_DIV>;
|
||||
|
||||
// Pairwise addition
|
||||
def VPADDH : SInst<"vpadd", "ddd", "hQh">;
|
||||
|
||||
// Pairwise Max/Min
|
||||
def VPMAXH : SInst<"vpmax", "ddd", "hQh">;
|
||||
def VPMINH : SInst<"vpmin", "ddd", "hQh">;
|
||||
// Pairwise MaxNum/MinNum
|
||||
def FMAXNMPH : SInst<"vpmaxnm", "ddd", "hQh">;
|
||||
def FMINNMPH : SInst<"vpminnm", "ddd", "hQh">;
|
||||
|
||||
// Reciprocal/Sqrt
|
||||
def VRECPSH : SInst<"vrecps", "ddd", "hQh">;
|
||||
def VRSQRTSH : SInst<"vrsqrts", "ddd", "hQh">;
|
||||
|
||||
// ARMv8.2-A FP16 three-operands vector intrinsics.
|
||||
|
||||
// Vector fused multiply-add operations
|
||||
def VFMAH : SInst<"vfma", "dddd", "hQh">;
|
||||
def VFMSH : SOpInst<"vfms", "dddd", "hQh", OP_FMLS>;
|
||||
|
||||
// ARMv8.2-A FP16 lane vector intrinsics.
|
||||
|
||||
// FMA lane
|
||||
def VFMA_LANEH : IInst<"vfma_lane", "dddgi", "hQh">;
|
||||
def VFMA_LANEQH : IInst<"vfma_laneq", "dddji", "hQh">;
|
||||
|
||||
// FMA lane with scalar argument
|
||||
def FMLA_NH : SOpInst<"vfma_n", "ddds", "hQh", OP_FMLA_N>;
|
||||
// Scalar floating point fused multiply-add (scalar, by element)
|
||||
def SCALAR_FMLA_LANEH : IInst<"vfma_lane", "sssdi", "Sh">;
|
||||
def SCALAR_FMLA_LANEQH : IInst<"vfma_laneq", "sssji", "Sh">;
|
||||
|
||||
// FMS lane
|
||||
def VFMS_LANEH : IOpInst<"vfms_lane", "dddgi", "hQh", OP_FMS_LN>;
|
||||
def VFMS_LANEQH : IOpInst<"vfms_laneq", "dddji", "hQh", OP_FMS_LNQ>;
|
||||
// FMS lane with scalar argument
|
||||
def FMLS_NH : SOpInst<"vfms_n", "ddds", "hQh", OP_FMLS_N>;
|
||||
// Scalar floating foint fused multiply-subtract (scalar, by element)
|
||||
def SCALAR_FMLS_LANEH : IOpInst<"vfms_lane", "sssdi", "Sh", OP_FMS_LN>;
|
||||
def SCALAR_FMLS_LANEQH : IOpInst<"vfms_laneq", "sssji", "Sh", OP_FMS_LNQ>;
|
||||
|
||||
// Mul lane
|
||||
def VMUL_LANEH : IOpInst<"vmul_lane", "ddgi", "hQh", OP_MUL_LN>;
|
||||
def VMUL_LANEQH : IOpInst<"vmul_laneq", "ddji", "hQh", OP_MUL_LN>;
|
||||
def VMUL_NH : IOpInst<"vmul_n", "dds", "hQh", OP_MUL_N>;
|
||||
// Scalar floating point multiply (scalar, by element)
|
||||
def SCALAR_FMUL_LANEH : IOpInst<"vmul_lane", "ssdi", "Sh", OP_SCALAR_MUL_LN>;
|
||||
def SCALAR_FMUL_LANEQH : IOpInst<"vmul_laneq", "ssji", "Sh", OP_SCALAR_MUL_LN>;
|
||||
|
||||
// Mulx lane
|
||||
def VMULX_LANEH : IOpInst<"vmulx_lane", "ddgi", "hQh", OP_MULX_LN>;
|
||||
def VMULX_LANEQH : IOpInst<"vmulx_laneq", "ddji", "hQh", OP_MULX_LN>;
|
||||
def VMULX_NH : IOpInst<"vmulx_n", "dds", "hQh", OP_MULX_N>;
|
||||
// TODO: Scalar floating point multiply extended (scalar, by element)
|
||||
// Below ones are commented out because they need vmulx_f16(float16_t, float16_t)
|
||||
// which will be implemented later with fp16 scalar intrinsic (arm_fp16.h)
|
||||
//def SCALAR_FMULX_LANEH : IOpInst<"vmulx_lane", "ssdi", "Sh", OP_SCALAR_MUL_LN>;
|
||||
//def SCALAR_FMULX_LANEQH : IOpInst<"vmulx_laneq", "ssji", "Sh", OP_SCALAR_MUL_LN>;
|
||||
|
||||
// ARMv8.2-A FP16 reduction vector intrinsics.
|
||||
def VMAXVH : SInst<"vmaxv", "sd", "hQh">;
|
||||
def VMINVH : SInst<"vminv", "sd", "hQh">;
|
||||
def FMAXNMVH : SInst<"vmaxnmv", "sd", "hQh">;
|
||||
def FMINNMVH : SInst<"vminnmv", "sd", "hQh">;
|
||||
|
||||
// Data processing intrinsics - section 5
|
||||
|
||||
// Logical operations
|
||||
let isHiddenLInst = 1 in
|
||||
def VBSLH : SInst<"vbsl", "dudd", "hQh">;
|
||||
|
||||
// Transposition operations
|
||||
def VZIPH : WInst<"vzip", "2dd", "hQh">;
|
||||
def VUZPH : WInst<"vuzp", "2dd", "hQh">;
|
||||
def VTRNH : WInst<"vtrn", "2dd", "hQh">;
|
||||
|
||||
// Set all lanes to same value.
|
||||
/* Already implemented prior to ARMv8.2-A.
|
||||
def VMOV_NH : WOpInst<"vmov_n", "ds", "hQh", OP_DUP>;
|
||||
def VDUP_NH : WOpInst<"vdup_n", "ds", "hQh", OP_DUP>;
|
||||
def VDUP_LANE1H : WOpInst<"vdup_lane", "dgi", "hQh", OP_DUP_LN>;*/
|
||||
|
||||
// Vector Extract
|
||||
def VEXTH : WInst<"vext", "dddi", "hQh">;
|
||||
|
||||
// Reverse vector elements
|
||||
def VREV64H : WOpInst<"vrev64", "dd", "hQh", OP_REV64>;
|
||||
|
||||
// Permutation
|
||||
def VTRN1H : SOpInst<"vtrn1", "ddd", "hQh", OP_TRN1>;
|
||||
def VZIP1H : SOpInst<"vzip1", "ddd", "hQh", OP_ZIP1>;
|
||||
def VUZP1H : SOpInst<"vuzp1", "ddd", "hQh", OP_UZP1>;
|
||||
def VTRN2H : SOpInst<"vtrn2", "ddd", "hQh", OP_TRN2>;
|
||||
def VZIP2H : SOpInst<"vzip2", "ddd", "hQh", OP_ZIP2>;
|
||||
def VUZP2H : SOpInst<"vuzp2", "ddd", "hQh", OP_UZP2>;
|
||||
|
||||
def SCALAR_VDUP_LANEH : IInst<"vdup_lane", "sdi", "Sh">;
|
||||
def SCALAR_VDUP_LANEQH : IInst<"vdup_laneq", "sji", "Sh">;
|
||||
}
|
||||
|
|
|
@ -181,6 +181,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
|
|||
if (Unaligned)
|
||||
Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
|
||||
|
||||
if ((FPU & NeonMode) && HasFullFP16)
|
||||
Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
|
||||
|
||||
switch (ArchKind) {
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -3386,8 +3386,9 @@ static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
|
|||
return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
|
||||
case NeonTypeFlags::Int16:
|
||||
case NeonTypeFlags::Poly16:
|
||||
case NeonTypeFlags::Float16:
|
||||
return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
|
||||
case NeonTypeFlags::Float16:
|
||||
return llvm::VectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
|
||||
case NeonTypeFlags::Int32:
|
||||
return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
|
||||
case NeonTypeFlags::Int64:
|
||||
|
@ -3410,6 +3411,8 @@ static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
|
|||
NeonTypeFlags IntTypeFlags) {
|
||||
int IsQuad = IntTypeFlags.isQuad();
|
||||
switch (IntTypeFlags.getEltType()) {
|
||||
case NeonTypeFlags::Int16:
|
||||
return llvm::VectorType::get(CGF->HalfTy, (4 << IsQuad));
|
||||
case NeonTypeFlags::Int32:
|
||||
return llvm::VectorType::get(CGF->FloatTy, (2 << IsQuad));
|
||||
case NeonTypeFlags::Int64:
|
||||
|
@ -3557,55 +3560,80 @@ static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
|
|||
NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
|
||||
NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
|
||||
NEONMAP0(vcvt_f32_v),
|
||||
NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
|
||||
NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
|
||||
NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0),
|
||||
NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
|
||||
NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
|
||||
NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0),
|
||||
NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
|
||||
NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
|
||||
NEONMAP0(vcvt_s16_v),
|
||||
NEONMAP0(vcvt_s32_v),
|
||||
NEONMAP0(vcvt_s64_v),
|
||||
NEONMAP0(vcvt_u16_v),
|
||||
NEONMAP0(vcvt_u32_v),
|
||||
NEONMAP0(vcvt_u64_v),
|
||||
NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0),
|
||||
NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
|
||||
NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
|
||||
NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
|
||||
NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
|
||||
NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0),
|
||||
NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
|
||||
NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
|
||||
NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
|
||||
NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
|
||||
NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
|
||||
NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
|
||||
NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
|
||||
NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
|
||||
NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0),
|
||||
NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
|
||||
NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
|
||||
NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0),
|
||||
NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
|
||||
NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
|
||||
NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0),
|
||||
NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
|
||||
NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
|
||||
NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0),
|
||||
NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
|
||||
NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
|
||||
NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0),
|
||||
NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
|
||||
NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
|
||||
NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0),
|
||||
NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
|
||||
NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
|
||||
NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0),
|
||||
NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
|
||||
NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
|
||||
NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0),
|
||||
NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
|
||||
NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
|
||||
NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0),
|
||||
NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
|
||||
NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
|
||||
NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0),
|
||||
NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
|
||||
NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
|
||||
NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0),
|
||||
NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
|
||||
NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
|
||||
NEONMAP0(vcvtq_f32_v),
|
||||
NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
|
||||
NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
|
||||
NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0),
|
||||
NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
|
||||
NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
|
||||
NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0),
|
||||
NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
|
||||
NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
|
||||
NEONMAP0(vcvtq_s16_v),
|
||||
NEONMAP0(vcvtq_s32_v),
|
||||
NEONMAP0(vcvtq_s64_v),
|
||||
NEONMAP0(vcvtq_u16_v),
|
||||
NEONMAP0(vcvtq_u32_v),
|
||||
NEONMAP0(vcvtq_u64_v),
|
||||
NEONMAP0(vext_v),
|
||||
|
@ -3768,19 +3796,27 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
|
|||
NEONMAP1(vcnt_v, ctpop, Add1ArgType),
|
||||
NEONMAP1(vcntq_v, ctpop, Add1ArgType),
|
||||
NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
|
||||
NEONMAP0(vcvt_f16_v),
|
||||
NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
|
||||
NEONMAP0(vcvt_f32_v),
|
||||
NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
|
||||
NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
|
||||
NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
|
||||
NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
|
||||
NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
|
||||
NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
|
||||
NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
|
||||
NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
|
||||
NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
|
||||
NEONMAP0(vcvtq_f16_v),
|
||||
NEONMAP0(vcvtq_f32_v),
|
||||
NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
|
||||
NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
|
||||
NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
|
||||
NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
|
||||
NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
|
||||
NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
|
||||
NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
|
||||
NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
|
||||
NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
|
||||
NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
|
||||
|
@ -4249,9 +4285,20 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
|
|||
case NEON::BI__builtin_neon_vcageq_v:
|
||||
case NEON::BI__builtin_neon_vcagt_v:
|
||||
case NEON::BI__builtin_neon_vcagtq_v: {
|
||||
llvm::Type *VecFlt = llvm::VectorType::get(
|
||||
VTy->getScalarSizeInBits() == 32 ? FloatTy : DoubleTy,
|
||||
VTy->getNumElements());
|
||||
llvm::Type *Ty;
|
||||
switch (VTy->getScalarSizeInBits()) {
|
||||
default: llvm_unreachable("unexpected type");
|
||||
case 32:
|
||||
Ty = FloatTy;
|
||||
break;
|
||||
case 64:
|
||||
Ty = DoubleTy;
|
||||
break;
|
||||
case 16:
|
||||
Ty = HalfTy;
|
||||
break;
|
||||
}
|
||||
llvm::Type *VecFlt = llvm::VectorType::get(Ty, VTy->getNumElements());
|
||||
llvm::Type *Tys[] = { VTy, VecFlt };
|
||||
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
|
||||
return EmitNeonCall(F, Ops, NameHint);
|
||||
|
@ -4268,8 +4315,16 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
|
|||
Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad));
|
||||
return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
|
||||
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
|
||||
case NEON::BI__builtin_neon_vcvt_f16_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_f16_v:
|
||||
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
||||
Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad));
|
||||
return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
|
||||
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
|
||||
case NEON::BI__builtin_neon_vcvt_n_f16_v:
|
||||
case NEON::BI__builtin_neon_vcvt_n_f32_v:
|
||||
case NEON::BI__builtin_neon_vcvt_n_f64_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_n_f16_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_n_f32_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
|
||||
llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
|
||||
|
@ -4277,11 +4332,15 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
|
|||
Function *F = CGM.getIntrinsic(Int, Tys);
|
||||
return EmitNeonCall(F, Ops, "vcvt_n");
|
||||
}
|
||||
case NEON::BI__builtin_neon_vcvt_n_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvt_n_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvt_n_u16_v:
|
||||
case NEON::BI__builtin_neon_vcvt_n_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvt_n_s64_v:
|
||||
case NEON::BI__builtin_neon_vcvt_n_u64_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_n_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_n_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_n_u16_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_n_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_n_s64_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
|
||||
|
@ -4293,44 +4352,63 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
|
|||
case NEON::BI__builtin_neon_vcvt_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvt_s64_v:
|
||||
case NEON::BI__builtin_neon_vcvt_u64_v:
|
||||
case NEON::BI__builtin_neon_vcvt_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvt_u16_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_s64_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_u64_v: {
|
||||
case NEON::BI__builtin_neon_vcvtq_u64_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_u16_v: {
|
||||
Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
|
||||
return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
|
||||
: Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
|
||||
}
|
||||
case NEON::BI__builtin_neon_vcvta_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvta_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvta_s64_v:
|
||||
case NEON::BI__builtin_neon_vcvta_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvta_u64_v:
|
||||
case NEON::BI__builtin_neon_vcvtaq_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvtaq_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvtaq_s64_v:
|
||||
case NEON::BI__builtin_neon_vcvtaq_u16_v:
|
||||
case NEON::BI__builtin_neon_vcvtaq_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvtaq_u64_v:
|
||||
case NEON::BI__builtin_neon_vcvtn_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvtn_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvtn_s64_v:
|
||||
case NEON::BI__builtin_neon_vcvtn_u16_v:
|
||||
case NEON::BI__builtin_neon_vcvtn_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvtn_u64_v:
|
||||
case NEON::BI__builtin_neon_vcvtnq_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvtnq_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvtnq_s64_v:
|
||||
case NEON::BI__builtin_neon_vcvtnq_u16_v:
|
||||
case NEON::BI__builtin_neon_vcvtnq_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvtnq_u64_v:
|
||||
case NEON::BI__builtin_neon_vcvtp_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvtp_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvtp_s64_v:
|
||||
case NEON::BI__builtin_neon_vcvtp_u16_v:
|
||||
case NEON::BI__builtin_neon_vcvtp_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvtp_u64_v:
|
||||
case NEON::BI__builtin_neon_vcvtpq_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvtpq_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvtpq_s64_v:
|
||||
case NEON::BI__builtin_neon_vcvtpq_u16_v:
|
||||
case NEON::BI__builtin_neon_vcvtpq_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvtpq_u64_v:
|
||||
case NEON::BI__builtin_neon_vcvtm_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvtm_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvtm_s64_v:
|
||||
case NEON::BI__builtin_neon_vcvtm_u16_v:
|
||||
case NEON::BI__builtin_neon_vcvtm_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvtm_u64_v:
|
||||
case NEON::BI__builtin_neon_vcvtmq_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvtmq_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvtmq_s64_v:
|
||||
case NEON::BI__builtin_neon_vcvtmq_u16_v:
|
||||
case NEON::BI__builtin_neon_vcvtmq_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvtmq_u64_v: {
|
||||
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
|
||||
|
@ -6540,7 +6618,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
|
|||
Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
|
||||
return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
|
||||
}
|
||||
case NEON::BI__builtin_neon_vfmah_lane_f16:
|
||||
case NEON::BI__builtin_neon_vfmas_lane_f32:
|
||||
case NEON::BI__builtin_neon_vfmah_laneq_f16:
|
||||
case NEON::BI__builtin_neon_vfmas_laneq_f32:
|
||||
case NEON::BI__builtin_neon_vfmad_lane_f64:
|
||||
case NEON::BI__builtin_neon_vfmad_laneq_f64: {
|
||||
|
@ -6715,18 +6795,25 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
|
|||
case NEON::BI__builtin_neon_vcvt_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvt_s64_v:
|
||||
case NEON::BI__builtin_neon_vcvt_u64_v:
|
||||
case NEON::BI__builtin_neon_vcvt_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvt_u16_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_s64_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_u64_v: {
|
||||
case NEON::BI__builtin_neon_vcvtq_u64_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvtq_u16_v: {
|
||||
Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
|
||||
if (usgn)
|
||||
return Builder.CreateFPToUI(Ops[0], Ty);
|
||||
return Builder.CreateFPToSI(Ops[0], Ty);
|
||||
}
|
||||
case NEON::BI__builtin_neon_vcvta_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvta_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvtaq_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvtaq_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvta_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvtaq_u16_v:
|
||||
case NEON::BI__builtin_neon_vcvtaq_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvta_s64_v:
|
||||
case NEON::BI__builtin_neon_vcvtaq_s64_v:
|
||||
|
@ -6736,9 +6823,13 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
|
|||
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
|
||||
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
|
||||
}
|
||||
case NEON::BI__builtin_neon_vcvtm_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvtm_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvtmq_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvtmq_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvtm_u16_v:
|
||||
case NEON::BI__builtin_neon_vcvtm_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvtmq_u16_v:
|
||||
case NEON::BI__builtin_neon_vcvtmq_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvtm_s64_v:
|
||||
case NEON::BI__builtin_neon_vcvtmq_s64_v:
|
||||
|
@ -6748,9 +6839,13 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
|
|||
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
|
||||
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
|
||||
}
|
||||
case NEON::BI__builtin_neon_vcvtn_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvtn_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvtnq_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvtnq_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvtn_u16_v:
|
||||
case NEON::BI__builtin_neon_vcvtn_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvtnq_u16_v:
|
||||
case NEON::BI__builtin_neon_vcvtnq_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvtn_s64_v:
|
||||
case NEON::BI__builtin_neon_vcvtnq_s64_v:
|
||||
|
@ -6760,9 +6855,13 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
|
|||
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
|
||||
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
|
||||
}
|
||||
case NEON::BI__builtin_neon_vcvtp_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvtp_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvtpq_s16_v:
|
||||
case NEON::BI__builtin_neon_vcvtpq_s32_v:
|
||||
case NEON::BI__builtin_neon_vcvtp_u16_v:
|
||||
case NEON::BI__builtin_neon_vcvtp_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvtpq_u16_v:
|
||||
case NEON::BI__builtin_neon_vcvtpq_u32_v:
|
||||
case NEON::BI__builtin_neon_vcvtp_s64_v:
|
||||
case NEON::BI__builtin_neon_vcvtpq_s64_v:
|
||||
|
@ -6935,6 +7034,24 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
|
|||
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
|
||||
return Builder.CreateTrunc(Ops[0], Int16Ty);
|
||||
}
|
||||
case NEON::BI__builtin_neon_vmaxv_f16: {
|
||||
Int = Intrinsic::aarch64_neon_fmaxv;
|
||||
Ty = HalfTy;
|
||||
VTy = llvm::VectorType::get(HalfTy, 4);
|
||||
llvm::Type *Tys[2] = { Ty, VTy };
|
||||
Ops.push_back(EmitScalarExpr(E->getArg(0)));
|
||||
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
|
||||
return Builder.CreateTrunc(Ops[0], HalfTy);
|
||||
}
|
||||
case NEON::BI__builtin_neon_vmaxvq_f16: {
|
||||
Int = Intrinsic::aarch64_neon_fmaxv;
|
||||
Ty = HalfTy;
|
||||
VTy = llvm::VectorType::get(HalfTy, 8);
|
||||
llvm::Type *Tys[2] = { Ty, VTy };
|
||||
Ops.push_back(EmitScalarExpr(E->getArg(0)));
|
||||
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
|
||||
return Builder.CreateTrunc(Ops[0], HalfTy);
|
||||
}
|
||||
case NEON::BI__builtin_neon_vminv_u8: {
|
||||
Int = Intrinsic::aarch64_neon_uminv;
|
||||
Ty = Int32Ty;
|
||||
|
@ -7007,6 +7124,60 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
|
|||
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
|
||||
return Builder.CreateTrunc(Ops[0], Int16Ty);
|
||||
}
|
||||
case NEON::BI__builtin_neon_vminv_f16: {
|
||||
Int = Intrinsic::aarch64_neon_fminv;
|
||||
Ty = HalfTy;
|
||||
VTy = llvm::VectorType::get(HalfTy, 4);
|
||||
llvm::Type *Tys[2] = { Ty, VTy };
|
||||
Ops.push_back(EmitScalarExpr(E->getArg(0)));
|
||||
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
|
||||
return Builder.CreateTrunc(Ops[0], HalfTy);
|
||||
}
|
||||
case NEON::BI__builtin_neon_vminvq_f16: {
|
||||
Int = Intrinsic::aarch64_neon_fminv;
|
||||
Ty = HalfTy;
|
||||
VTy = llvm::VectorType::get(HalfTy, 8);
|
||||
llvm::Type *Tys[2] = { Ty, VTy };
|
||||
Ops.push_back(EmitScalarExpr(E->getArg(0)));
|
||||
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
|
||||
return Builder.CreateTrunc(Ops[0], HalfTy);
|
||||
}
|
||||
case NEON::BI__builtin_neon_vmaxnmv_f16: {
|
||||
Int = Intrinsic::aarch64_neon_fmaxnmv;
|
||||
Ty = HalfTy;
|
||||
VTy = llvm::VectorType::get(HalfTy, 4);
|
||||
llvm::Type *Tys[2] = { Ty, VTy };
|
||||
Ops.push_back(EmitScalarExpr(E->getArg(0)));
|
||||
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
|
||||
return Builder.CreateTrunc(Ops[0], HalfTy);
|
||||
}
|
||||
case NEON::BI__builtin_neon_vmaxnmvq_f16: {
|
||||
Int = Intrinsic::aarch64_neon_fmaxnmv;
|
||||
Ty = HalfTy;
|
||||
VTy = llvm::VectorType::get(HalfTy, 8);
|
||||
llvm::Type *Tys[2] = { Ty, VTy };
|
||||
Ops.push_back(EmitScalarExpr(E->getArg(0)));
|
||||
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
|
||||
return Builder.CreateTrunc(Ops[0], HalfTy);
|
||||
}
|
||||
case NEON::BI__builtin_neon_vminnmv_f16: {
|
||||
Int = Intrinsic::aarch64_neon_fminnmv;
|
||||
Ty = HalfTy;
|
||||
VTy = llvm::VectorType::get(HalfTy, 4);
|
||||
llvm::Type *Tys[2] = { Ty, VTy };
|
||||
Ops.push_back(EmitScalarExpr(E->getArg(0)));
|
||||
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
|
||||
return Builder.CreateTrunc(Ops[0], HalfTy);
|
||||
}
|
||||
case NEON::BI__builtin_neon_vminnmvq_f16: {
|
||||
Int = Intrinsic::aarch64_neon_fminnmv;
|
||||
Ty = HalfTy;
|
||||
VTy = llvm::VectorType::get(HalfTy, 8);
|
||||
llvm::Type *Tys[2] = { Ty, VTy };
|
||||
Ops.push_back(EmitScalarExpr(E->getArg(0)));
|
||||
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
|
||||
return Builder.CreateTrunc(Ops[0], HalfTy);
|
||||
}
|
||||
case NEON::BI__builtin_neon_vmul_n_f64: {
|
||||
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
|
||||
Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
|
||||
|
|
|
@ -103,6 +103,7 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
|
|||
Int16Ty = llvm::Type::getInt16Ty(LLVMContext);
|
||||
Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
|
||||
Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
|
||||
HalfTy = llvm::Type::getHalfTy(LLVMContext);
|
||||
FloatTy = llvm::Type::getFloatTy(LLVMContext);
|
||||
DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
|
||||
PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
|
||||
|
|
|
@ -37,7 +37,7 @@ struct CodeGenTypeCache {
|
|||
/// i8, i16, i32, and i64
|
||||
llvm::IntegerType *Int8Ty, *Int16Ty, *Int32Ty, *Int64Ty;
|
||||
/// float, double
|
||||
llvm::Type *FloatTy, *DoubleTy;
|
||||
llvm::Type *HalfTy, *FloatTy, *DoubleTy;
|
||||
|
||||
/// int
|
||||
llvm::IntegerType *IntTy;
|
||||
|
|
|
@ -9037,10 +9037,9 @@ int64x2_t test_vld1q_s64(int64_t const *a) {
|
|||
|
||||
// CHECK-LABEL: @test_vld1q_f16(
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>*
|
||||
// CHECK: [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* [[TMP1]]
|
||||
// CHECK: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to <8 x half>
|
||||
// CHECK: ret <8 x half> [[TMP3]]
|
||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x half>*
|
||||
// CHECK: [[TMP2:%.*]] = load <8 x half>, <8 x half>* [[TMP1]]
|
||||
// CHECK: ret <8 x half> [[TMP2]]
|
||||
float16x8_t test_vld1q_f16(float16_t const *a) {
|
||||
return vld1q_f16(a);
|
||||
}
|
||||
|
@ -9152,10 +9151,9 @@ int64x1_t test_vld1_s64(int64_t const *a) {
|
|||
|
||||
// CHECK-LABEL: @test_vld1_f16(
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>*
|
||||
// CHECK: [[TMP2:%.*]] = load <4 x i16>, <4 x i16>* [[TMP1]]
|
||||
// CHECK: [[TMP3:%.*]] = bitcast <4 x i16> [[TMP2]] to <4 x half>
|
||||
// CHECK: ret <4 x half> [[TMP3]]
|
||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <4 x half>*
|
||||
// CHECK: [[TMP2:%.*]] = load <4 x half>, <4 x half>* [[TMP1]]
|
||||
// CHECK: ret <4 x half> [[TMP2]]
|
||||
float16x4_t test_vld1_f16(float16_t const *a) {
|
||||
return vld1_f16(a);
|
||||
}
|
||||
|
@ -9342,10 +9340,10 @@ int64x2x2_t test_vld2q_s64(int64_t const *a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x2_t, align 16
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x2_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <8 x i16>*
|
||||
// CHECK: [[VLD2:%.*]] = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0v8i16(<8 x i16>* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <8 x i16>, <8 x i16> }*
|
||||
// CHECK: store { <8 x i16>, <8 x i16> } [[VLD2]], { <8 x i16>, <8 x i16> }* [[TMP3]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <8 x half>*
|
||||
// CHECK: [[VLD2:%.*]] = call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2.v8f16.p0v8f16(<8 x half>* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <8 x half>, <8 x half> }*
|
||||
// CHECK: store { <8 x half>, <8 x half> } [[VLD2]], { <8 x half>, <8 x half> }* [[TMP3]]
|
||||
// CHECK: [[TMP4:%.*]] = bitcast %struct.float16x8x2_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x8x2_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 32, i32 16, i1 false)
|
||||
|
@ -9573,10 +9571,10 @@ int64x1x2_t test_vld2_s64(int64_t const *a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x2_t, align 8
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x2_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <4 x i16>*
|
||||
// CHECK: [[VLD2:%.*]] = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0v4i16(<4 x i16>* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <4 x i16>, <4 x i16> }*
|
||||
// CHECK: store { <4 x i16>, <4 x i16> } [[VLD2]], { <4 x i16>, <4 x i16> }* [[TMP3]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <4 x half>*
|
||||
// CHECK: [[VLD2:%.*]] = call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2.v4f16.p0v4f16(<4 x half>* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <4 x half>, <4 x half> }*
|
||||
// CHECK: store { <4 x half>, <4 x half> } [[VLD2]], { <4 x half>, <4 x half> }* [[TMP3]]
|
||||
// CHECK: [[TMP4:%.*]] = bitcast %struct.float16x4x2_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x4x2_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 16, i32 8, i1 false)
|
||||
|
@ -9804,10 +9802,10 @@ int64x2x3_t test_vld3q_s64(int64_t const *a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x3_t, align 16
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x3_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <8 x i16>*
|
||||
// CHECK: [[VLD3:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0v8i16(<8 x i16>* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <8 x i16>, <8 x i16>, <8 x i16> }*
|
||||
// CHECK: store { <8 x i16>, <8 x i16>, <8 x i16> } [[VLD3]], { <8 x i16>, <8 x i16>, <8 x i16> }* [[TMP3]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <8 x half>*
|
||||
// CHECK: [[VLD3:%.*]] = call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3.v8f16.p0v8f16(<8 x half>* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <8 x half>, <8 x half>, <8 x half> }*
|
||||
// CHECK: store { <8 x half>, <8 x half>, <8 x half> } [[VLD3]], { <8 x half>, <8 x half>, <8 x half> }* [[TMP3]]
|
||||
// CHECK: [[TMP4:%.*]] = bitcast %struct.float16x8x3_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x8x3_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 48, i32 16, i1 false)
|
||||
|
@ -10035,10 +10033,10 @@ int64x1x3_t test_vld3_s64(int64_t const *a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x3_t, align 8
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x3_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <4 x i16>*
|
||||
// CHECK: [[VLD3:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0v4i16(<4 x i16>* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <4 x i16>, <4 x i16>, <4 x i16> }*
|
||||
// CHECK: store { <4 x i16>, <4 x i16>, <4 x i16> } [[VLD3]], { <4 x i16>, <4 x i16>, <4 x i16> }* [[TMP3]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <4 x half>*
|
||||
// CHECK: [[VLD3:%.*]] = call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3.v4f16.p0v4f16(<4 x half>* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <4 x half>, <4 x half>, <4 x half> }*
|
||||
// CHECK: store { <4 x half>, <4 x half>, <4 x half> } [[VLD3]], { <4 x half>, <4 x half>, <4 x half> }* [[TMP3]]
|
||||
// CHECK: [[TMP4:%.*]] = bitcast %struct.float16x4x3_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x4x3_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 24, i32 8, i1 false)
|
||||
|
@ -10266,10 +10264,10 @@ int64x2x4_t test_vld4q_s64(int64_t const *a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x4_t, align 16
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x4_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <8 x i16>*
|
||||
// CHECK: [[VLD4:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0v8i16(<8 x i16>* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> }*
|
||||
// CHECK: store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } [[VLD4]], { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> }* [[TMP3]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <8 x half>*
|
||||
// CHECK: [[VLD4:%.*]] = call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4.v8f16.p0v8f16(<8 x half>* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <8 x half>, <8 x half>, <8 x half>, <8 x half> }*
|
||||
// CHECK: store { <8 x half>, <8 x half>, <8 x half>, <8 x half> } [[VLD4]], { <8 x half>, <8 x half>, <8 x half>, <8 x half> }* [[TMP3]]
|
||||
// CHECK: [[TMP4:%.*]] = bitcast %struct.float16x8x4_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x8x4_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 64, i32 16, i1 false)
|
||||
|
@ -10497,10 +10495,10 @@ int64x1x4_t test_vld4_s64(int64_t const *a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x4_t, align 8
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x4_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <4 x i16>*
|
||||
// CHECK: [[VLD4:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0v4i16(<4 x i16>* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }*
|
||||
// CHECK: store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } [[VLD4]], { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }* [[TMP3]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <4 x half>*
|
||||
// CHECK: [[VLD4:%.*]] = call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4.v4f16.p0v4f16(<4 x half>* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <4 x half>, <4 x half>, <4 x half>, <4 x half> }*
|
||||
// CHECK: store { <4 x half>, <4 x half>, <4 x half>, <4 x half> } [[VLD4]], { <4 x half>, <4 x half>, <4 x half>, <4 x half> }* [[TMP3]]
|
||||
// CHECK: [[TMP4:%.*]] = bitcast %struct.float16x4x4_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x4x4_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 32, i32 8, i1 false)
|
||||
|
@ -10666,9 +10664,9 @@ void test_vst1q_s64(int64_t *a, int64x2_t b) {
|
|||
// CHECK-LABEL: @test_vst1q_f16(
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8>
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>*
|
||||
// CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
|
||||
// CHECK: store <8 x i16> [[TMP3]], <8 x i16>* [[TMP2]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP0]] to <8 x half>*
|
||||
// CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
|
||||
// CHECK: store <8 x half> [[TMP3]], <8 x half>* [[TMP2]]
|
||||
// CHECK: ret void
|
||||
void test_vst1q_f16(float16_t *a, float16x8_t b) {
|
||||
vst1q_f16(a, b);
|
||||
|
@ -10800,9 +10798,9 @@ void test_vst1_s64(int64_t *a, int64x1_t b) {
|
|||
// CHECK-LABEL: @test_vst1_f16(
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8>
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>*
|
||||
// CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
|
||||
// CHECK: store <4 x i16> [[TMP3]], <4 x i16>* [[TMP2]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP0]] to <4 x half>*
|
||||
// CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
|
||||
// CHECK: store <4 x half> [[TMP3]], <4 x half>* [[TMP2]]
|
||||
// CHECK: ret void
|
||||
void test_vst1_f16(float16_t *a, float16x4_t b) {
|
||||
vst1_f16(a, b);
|
||||
|
@ -11056,9 +11054,9 @@ void test_vst2q_s64(int64_t *a, int64x2x2_t b) {
|
|||
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x half>], [2 x <8 x half>]* [[VAL1]], i64 0, i64 1
|
||||
// CHECK: [[TMP5:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16
|
||||
// CHECK: [[TMP6:%.*]] = bitcast <8 x half> [[TMP5]] to <16 x i8>
|
||||
// CHECK: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x i16>
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
|
||||
// CHECK: call void @llvm.aarch64.neon.st2.v8i16.p0i8(<8 x i16> [[TMP7]], <8 x i16> [[TMP8]], i8* [[TMP2]])
|
||||
// CHECK: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x half>
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half>
|
||||
// CHECK: call void @llvm.aarch64.neon.st2.v8f16.p0i8(<8 x half> [[TMP7]], <8 x half> [[TMP8]], i8* [[TMP2]])
|
||||
// CHECK: ret void
|
||||
void test_vst2q_f16(float16_t *a, float16x8x2_t b) {
|
||||
vst2q_f16(a, b);
|
||||
|
@ -11366,9 +11364,9 @@ void test_vst2_s64(int64_t *a, int64x1x2_t b) {
|
|||
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x half>], [2 x <4 x half>]* [[VAL1]], i64 0, i64 1
|
||||
// CHECK: [[TMP5:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX2]], align 8
|
||||
// CHECK: [[TMP6:%.*]] = bitcast <4 x half> [[TMP5]] to <8 x i8>
|
||||
// CHECK: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16>
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16>
|
||||
// CHECK: call void @llvm.aarch64.neon.st2.v4i16.p0i8(<4 x i16> [[TMP7]], <4 x i16> [[TMP8]], i8* [[TMP2]])
|
||||
// CHECK: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x half>
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half>
|
||||
// CHECK: call void @llvm.aarch64.neon.st2.v4f16.p0i8(<4 x half> [[TMP7]], <4 x half> [[TMP8]], i8* [[TMP2]])
|
||||
// CHECK: ret void
|
||||
void test_vst2_f16(float16_t *a, float16x4x2_t b) {
|
||||
vst2_f16(a, b);
|
||||
|
@ -11716,10 +11714,10 @@ void test_vst3q_s64(int64_t *a, int64x2x3_t b) {
|
|||
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL3]], i64 0, i64 2
|
||||
// CHECK: [[TMP7:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 16
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <8 x half> [[TMP7]] to <16 x i8>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x i16>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
|
||||
// CHECK: call void @llvm.aarch64.neon.st3.v8i16.p0i8(<8 x i16> [[TMP9]], <8 x i16> [[TMP10]], <8 x i16> [[TMP11]], i8* [[TMP2]])
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x half>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x half>
|
||||
// CHECK: call void @llvm.aarch64.neon.st3.v8f16.p0i8(<8 x half> [[TMP9]], <8 x half> [[TMP10]], <8 x half> [[TMP11]], i8* [[TMP2]])
|
||||
// CHECK: ret void
|
||||
void test_vst3q_f16(float16_t *a, float16x8x3_t b) {
|
||||
vst3q_f16(a, b);
|
||||
|
@ -12085,10 +12083,10 @@ void test_vst3_s64(int64_t *a, int64x1x3_t b) {
|
|||
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x half>], [3 x <4 x half>]* [[VAL3]], i64 0, i64 2
|
||||
// CHECK: [[TMP7:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX4]], align 8
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <4 x half> [[TMP7]] to <8 x i8>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x i16>
|
||||
// CHECK: call void @llvm.aarch64.neon.st3.v4i16.p0i8(<4 x i16> [[TMP9]], <4 x i16> [[TMP10]], <4 x i16> [[TMP11]], i8* [[TMP2]])
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x half>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x half>
|
||||
// CHECK: call void @llvm.aarch64.neon.st3.v4f16.p0i8(<4 x half> [[TMP9]], <4 x half> [[TMP10]], <4 x half> [[TMP11]], i8* [[TMP2]])
|
||||
// CHECK: ret void
|
||||
void test_vst3_f16(float16_t *a, float16x4x3_t b) {
|
||||
vst3_f16(a, b);
|
||||
|
@ -12494,11 +12492,11 @@ void test_vst4q_s64(int64_t *a, int64x2x4_t b) {
|
|||
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL5]], i64 0, i64 3
|
||||
// CHECK: [[TMP9:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX6]], align 16
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <8 x half> [[TMP9]] to <16 x i8>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x i16>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP10]] to <8 x i16>
|
||||
// CHECK: call void @llvm.aarch64.neon.st4.v8i16.p0i8(<8 x i16> [[TMP11]], <8 x i16> [[TMP12]], <8 x i16> [[TMP13]], <8 x i16> [[TMP14]], i8* [[TMP2]])
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x half>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x half>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP10]] to <8 x half>
|
||||
// CHECK: call void @llvm.aarch64.neon.st4.v8f16.p0i8(<8 x half> [[TMP11]], <8 x half> [[TMP12]], <8 x half> [[TMP13]], <8 x half> [[TMP14]], i8* [[TMP2]])
|
||||
// CHECK: ret void
|
||||
void test_vst4q_f16(float16_t *a, float16x8x4_t b) {
|
||||
vst4q_f16(a, b);
|
||||
|
@ -12922,11 +12920,11 @@ void test_vst4_s64(int64_t *a, int64x1x4_t b) {
|
|||
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x half>], [4 x <4 x half>]* [[VAL5]], i64 0, i64 3
|
||||
// CHECK: [[TMP9:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX6]], align 8
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <4 x half> [[TMP9]] to <8 x i8>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x i16>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x i16>
|
||||
// CHECK: call void @llvm.aarch64.neon.st4.v4i16.p0i8(<4 x i16> [[TMP11]], <4 x i16> [[TMP12]], <4 x i16> [[TMP13]], <4 x i16> [[TMP14]], i8* [[TMP2]])
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x half>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x half>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x half>
|
||||
// CHECK: call void @llvm.aarch64.neon.st4.v4f16.p0i8(<4 x half> [[TMP11]], <4 x half> [[TMP12]], <4 x half> [[TMP13]], <4 x half> [[TMP14]], i8* [[TMP2]])
|
||||
// CHECK: ret void
|
||||
void test_vst4_f16(float16_t *a, float16x4x4_t b) {
|
||||
vst4_f16(a, b);
|
||||
|
@ -13208,10 +13206,10 @@ int64x2x2_t test_vld1q_s64_x2(int64_t const *a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x2_t, align 16
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x2_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i16*
|
||||
// CHECK: [[VLD1XN:%.*]] = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x2.v8i16.p0i16(i16* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <8 x i16>, <8 x i16> }*
|
||||
// CHECK: store { <8 x i16>, <8 x i16> } [[VLD1XN]], { <8 x i16>, <8 x i16> }* [[TMP3]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to half*
|
||||
// CHECK: [[VLD1XN:%.*]] = call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x2.v8f16.p0f16(half* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <8 x half>, <8 x half> }*
|
||||
// CHECK: store { <8 x half>, <8 x half> } [[VLD1XN]], { <8 x half>, <8 x half> }* [[TMP3]]
|
||||
// CHECK: [[TMP4:%.*]] = bitcast %struct.float16x8x2_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x8x2_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 32, i32 16, i1 false)
|
||||
|
@ -13454,10 +13452,10 @@ int64x1x2_t test_vld1_s64_x2(int64_t const *a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x2_t, align 8
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x2_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i16*
|
||||
// CHECK: [[VLD1XN:%.*]] = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x2.v4i16.p0i16(i16* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <4 x i16>, <4 x i16> }*
|
||||
// CHECK: store { <4 x i16>, <4 x i16> } [[VLD1XN]], { <4 x i16>, <4 x i16> }* [[TMP3]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to half*
|
||||
// CHECK: [[VLD1XN:%.*]] = call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x2.v4f16.p0f16(half* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <4 x half>, <4 x half> }*
|
||||
// CHECK: store { <4 x half>, <4 x half> } [[VLD1XN]], { <4 x half>, <4 x half> }* [[TMP3]]
|
||||
// CHECK: [[TMP4:%.*]] = bitcast %struct.float16x4x2_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x4x2_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 16, i32 8, i1 false)
|
||||
|
@ -13700,10 +13698,10 @@ int64x2x3_t test_vld1q_s64_x3(int64_t const *a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x3_t, align 16
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x3_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i16*
|
||||
// CHECK: [[VLD1XN:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x3.v8i16.p0i16(i16* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <8 x i16>, <8 x i16>, <8 x i16> }*
|
||||
// CHECK: store { <8 x i16>, <8 x i16>, <8 x i16> } [[VLD1XN]], { <8 x i16>, <8 x i16>, <8 x i16> }* [[TMP3]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to half*
|
||||
// CHECK: [[VLD1XN:%.*]] = call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x3.v8f16.p0f16(half* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <8 x half>, <8 x half>, <8 x half> }*
|
||||
// CHECK: store { <8 x half>, <8 x half>, <8 x half> } [[VLD1XN]], { <8 x half>, <8 x half>, <8 x half> }* [[TMP3]]
|
||||
// CHECK: [[TMP4:%.*]] = bitcast %struct.float16x8x3_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x8x3_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 48, i32 16, i1 false)
|
||||
|
@ -13946,10 +13944,10 @@ int64x1x3_t test_vld1_s64_x3(int64_t const *a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x3_t, align 8
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x3_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i16*
|
||||
// CHECK: [[VLD1XN:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x3.v4i16.p0i16(i16* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <4 x i16>, <4 x i16>, <4 x i16> }*
|
||||
// CHECK: store { <4 x i16>, <4 x i16>, <4 x i16> } [[VLD1XN]], { <4 x i16>, <4 x i16>, <4 x i16> }* [[TMP3]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to half*
|
||||
// CHECK: [[VLD1XN:%.*]] = call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x3.v4f16.p0f16(half* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <4 x half>, <4 x half>, <4 x half> }*
|
||||
// CHECK: store { <4 x half>, <4 x half>, <4 x half> } [[VLD1XN]], { <4 x half>, <4 x half>, <4 x half> }* [[TMP3]]
|
||||
// CHECK: [[TMP4:%.*]] = bitcast %struct.float16x4x3_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x4x3_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 24, i32 8, i1 false)
|
||||
|
@ -14192,10 +14190,10 @@ int64x2x4_t test_vld1q_s64_x4(int64_t const *a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x4_t, align 16
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x4_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i16*
|
||||
// CHECK: [[VLD1XN:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x4.v8i16.p0i16(i16* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> }*
|
||||
// CHECK: store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } [[VLD1XN]], { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> }* [[TMP3]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to half*
|
||||
// CHECK: [[VLD1XN:%.*]] = call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x4.v8f16.p0f16(half* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <8 x half>, <8 x half>, <8 x half>, <8 x half> }*
|
||||
// CHECK: store { <8 x half>, <8 x half>, <8 x half>, <8 x half> } [[VLD1XN]], { <8 x half>, <8 x half>, <8 x half>, <8 x half> }* [[TMP3]]
|
||||
// CHECK: [[TMP4:%.*]] = bitcast %struct.float16x8x4_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x8x4_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 64, i32 16, i1 false)
|
||||
|
@ -14438,10 +14436,10 @@ int64x1x4_t test_vld1_s64_x4(int64_t const *a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x4_t, align 8
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x4_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i16*
|
||||
// CHECK: [[VLD1XN:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x4.v4i16.p0i16(i16* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }*
|
||||
// CHECK: store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } [[VLD1XN]], { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }* [[TMP3]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to half*
|
||||
// CHECK: [[VLD1XN:%.*]] = call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x4.v4f16.p0f16(half* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <4 x half>, <4 x half>, <4 x half>, <4 x half> }*
|
||||
// CHECK: store { <4 x half>, <4 x half>, <4 x half>, <4 x half> } [[VLD1XN]], { <4 x half>, <4 x half>, <4 x half>, <4 x half> }* [[TMP3]]
|
||||
// CHECK: [[TMP4:%.*]] = bitcast %struct.float16x4x4_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x4x4_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 32, i32 8, i1 false)
|
||||
|
@ -14752,10 +14750,10 @@ void test_vst1q_s64_x2(int64_t *a, int64x2x2_t b) {
|
|||
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x half>], [2 x <8 x half>]* [[VAL1]], i64 0, i64 1
|
||||
// CHECK: [[TMP5:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16
|
||||
// CHECK: [[TMP6:%.*]] = bitcast <8 x half> [[TMP5]] to <16 x i8>
|
||||
// CHECK: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x i16>
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast i8* [[TMP2]] to i16*
|
||||
// CHECK: call void @llvm.aarch64.neon.st1x2.v8i16.p0i16(<8 x i16> [[TMP7]], <8 x i16> [[TMP8]], i16* [[TMP9]])
|
||||
// CHECK: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x half>
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast i8* [[TMP2]] to half*
|
||||
// CHECK: call void @llvm.aarch64.neon.st1x2.v8f16.p0f16(<8 x half> [[TMP7]], <8 x half> [[TMP8]], half* [[TMP9]])
|
||||
// CHECK: ret void
|
||||
void test_vst1q_f16_x2(float16_t *a, float16x8x2_t b) {
|
||||
vst1q_f16_x2(a, b);
|
||||
|
@ -15098,10 +15096,10 @@ void test_vst1_s64_x2(int64_t *a, int64x1x2_t b) {
|
|||
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x half>], [2 x <4 x half>]* [[VAL1]], i64 0, i64 1
|
||||
// CHECK: [[TMP5:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX2]], align 8
|
||||
// CHECK: [[TMP6:%.*]] = bitcast <4 x half> [[TMP5]] to <8 x i8>
|
||||
// CHECK: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16>
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast i8* [[TMP2]] to i16*
|
||||
// CHECK: call void @llvm.aarch64.neon.st1x2.v4i16.p0i16(<4 x i16> [[TMP7]], <4 x i16> [[TMP8]], i16* [[TMP9]])
|
||||
// CHECK: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x half>
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast i8* [[TMP2]] to half*
|
||||
// CHECK: call void @llvm.aarch64.neon.st1x2.v4f16.p0f16(<4 x half> [[TMP7]], <4 x half> [[TMP8]], half* [[TMP9]])
|
||||
// CHECK: ret void
|
||||
void test_vst1_f16_x2(float16_t *a, float16x4x2_t b) {
|
||||
vst1_f16_x2(a, b);
|
||||
|
@ -15484,11 +15482,11 @@ void test_vst1q_s64_x3(int64_t *a, int64x2x3_t b) {
|
|||
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL3]], i64 0, i64 2
|
||||
// CHECK: [[TMP7:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 16
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <8 x half> [[TMP7]] to <16 x i8>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x i16>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast i8* [[TMP2]] to i16*
|
||||
// CHECK: call void @llvm.aarch64.neon.st1x3.v8i16.p0i16(<8 x i16> [[TMP9]], <8 x i16> [[TMP10]], <8 x i16> [[TMP11]], i16* [[TMP12]])
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x half>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x half>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast i8* [[TMP2]] to half*
|
||||
// CHECK: call void @llvm.aarch64.neon.st1x3.v8f16.p0f16(<8 x half> [[TMP9]], <8 x half> [[TMP10]], <8 x half> [[TMP11]], half* [[TMP12]])
|
||||
// CHECK: ret void
|
||||
void test_vst1q_f16_x3(float16_t *a, float16x8x3_t b) {
|
||||
vst1q_f16_x3(a, b);
|
||||
|
@ -15894,11 +15892,11 @@ void test_vst1_s64_x3(int64_t *a, int64x1x3_t b) {
|
|||
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x half>], [3 x <4 x half>]* [[VAL3]], i64 0, i64 2
|
||||
// CHECK: [[TMP7:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX4]], align 8
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <4 x half> [[TMP7]] to <8 x i8>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x i16>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast i8* [[TMP2]] to i16*
|
||||
// CHECK: call void @llvm.aarch64.neon.st1x3.v4i16.p0i16(<4 x i16> [[TMP9]], <4 x i16> [[TMP10]], <4 x i16> [[TMP11]], i16* [[TMP12]])
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x half>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x half>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast i8* [[TMP2]] to half*
|
||||
// CHECK: call void @llvm.aarch64.neon.st1x3.v4f16.p0f16(<4 x half> [[TMP9]], <4 x half> [[TMP10]], <4 x half> [[TMP11]], half* [[TMP12]])
|
||||
// CHECK: ret void
|
||||
void test_vst1_f16_x3(float16_t *a, float16x4x3_t b) {
|
||||
vst1_f16_x3(a, b);
|
||||
|
@ -16344,12 +16342,12 @@ void test_vst1q_s64_x4(int64_t *a, int64x2x4_t b) {
|
|||
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL5]], i64 0, i64 3
|
||||
// CHECK: [[TMP9:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX6]], align 16
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <8 x half> [[TMP9]] to <16 x i8>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x i16>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP10]] to <8 x i16>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast i8* [[TMP2]] to i16*
|
||||
// CHECK: call void @llvm.aarch64.neon.st1x4.v8i16.p0i16(<8 x i16> [[TMP11]], <8 x i16> [[TMP12]], <8 x i16> [[TMP13]], <8 x i16> [[TMP14]], i16* [[TMP15]])
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x half>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x half>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP10]] to <8 x half>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast i8* [[TMP2]] to half*
|
||||
// CHECK: call void @llvm.aarch64.neon.st1x4.v8f16.p0f16(<8 x half> [[TMP11]], <8 x half> [[TMP12]], <8 x half> [[TMP13]], <8 x half> [[TMP14]], half* [[TMP15]])
|
||||
// CHECK: ret void
|
||||
void test_vst1q_f16_x4(float16_t *a, float16x8x4_t b) {
|
||||
vst1q_f16_x4(a, b);
|
||||
|
@ -16818,12 +16816,12 @@ void test_vst1_s64_x4(int64_t *a, int64x1x4_t b) {
|
|||
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x half>], [4 x <4 x half>]* [[VAL5]], i64 0, i64 3
|
||||
// CHECK: [[TMP9:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX6]], align 8
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <4 x half> [[TMP9]] to <8 x i8>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x i16>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x i16>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast i8* [[TMP2]] to i16*
|
||||
// CHECK: call void @llvm.aarch64.neon.st1x4.v4i16.p0i16(<4 x i16> [[TMP11]], <4 x i16> [[TMP12]], <4 x i16> [[TMP13]], <4 x i16> [[TMP14]], i16* [[TMP15]])
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x half>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x half>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x half>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast i8* [[TMP2]] to half*
|
||||
// CHECK: call void @llvm.aarch64.neon.st1x4.v4f16.p0f16(<4 x half> [[TMP11]], <4 x half> [[TMP12]], <4 x half> [[TMP13]], <4 x half> [[TMP14]], half* [[TMP15]])
|
||||
// CHECK: ret void
|
||||
void test_vst1_f16_x4(float16_t *a, float16x4x4_t b) {
|
||||
vst1_f16_x4(a, b);
|
||||
|
|
|
@ -90,12 +90,11 @@ int64x2_t test_vld1q_dup_s64(int64_t *a) {
|
|||
|
||||
// CHECK-LABEL: define <8 x half> @test_vld1q_dup_f16(half* %a) #0 {
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16*
|
||||
// CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]]
|
||||
// CHECK: [[TMP3:%.*]] = insertelement <8 x i16> undef, i16 [[TMP2]], i32 0
|
||||
// CHECK: [[LANE:%.*]] = shufflevector <8 x i16> [[TMP3]], <8 x i16> [[TMP3]], <8 x i32> zeroinitializer
|
||||
// CHECK: [[TMP4:%.*]] = bitcast <8 x i16> [[LANE]] to <8 x half>
|
||||
// CHECK: ret <8 x half> [[TMP4]]
|
||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to half*
|
||||
// CHECK: [[TMP2:%.*]] = load half, half* [[TMP1]]
|
||||
// CHECK: [[TMP3:%.*]] = insertelement <8 x half> undef, half [[TMP2]], i32 0
|
||||
// CHECK: [[LANE:%.*]] = shufflevector <8 x half> [[TMP3]], <8 x half> [[TMP3]], <8 x i32> zeroinitializer
|
||||
// CHECK: ret <8 x half> [[LANE]]
|
||||
float16x8_t test_vld1q_dup_f16(float16_t *a) {
|
||||
return vld1q_dup_f16(a);
|
||||
}
|
||||
|
@ -239,12 +238,11 @@ int64x1_t test_vld1_dup_s64(int64_t *a) {
|
|||
|
||||
// CHECK-LABEL: define <4 x half> @test_vld1_dup_f16(half* %a) #0 {
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16*
|
||||
// CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]]
|
||||
// CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[TMP2]], i32 0
|
||||
// CHECK: [[LANE:%.*]] = shufflevector <4 x i16> [[TMP3]], <4 x i16> [[TMP3]], <4 x i32> zeroinitializer
|
||||
// CHECK: [[TMP4:%.*]] = bitcast <4 x i16> [[LANE]] to <4 x half>
|
||||
// CHECK: ret <4 x half> [[TMP4]]
|
||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to half*
|
||||
// CHECK: [[TMP2:%.*]] = load half, half* [[TMP1]]
|
||||
// CHECK: [[TMP3:%.*]] = insertelement <4 x half> undef, half [[TMP2]], i32 0
|
||||
// CHECK: [[LANE:%.*]] = shufflevector <4 x half> [[TMP3]], <4 x half> [[TMP3]], <4 x i32> zeroinitializer
|
||||
// CHECK: ret <4 x half> [[LANE]]
|
||||
float16x4_t test_vld1_dup_f16(float16_t *a) {
|
||||
return vld1_dup_f16(a);
|
||||
}
|
||||
|
@ -447,10 +445,10 @@ int64x2x2_t test_vld2q_dup_s64(int64_t *a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x2_t, align 16
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x2_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i16*
|
||||
// CHECK: [[VLD2:%.*]] = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2r.v8i16.p0i16(i16* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <8 x i16>, <8 x i16> }*
|
||||
// CHECK: store { <8 x i16>, <8 x i16> } [[VLD2]], { <8 x i16>, <8 x i16> }* [[TMP3]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to half*
|
||||
// CHECK: [[VLD2:%.*]] = call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2r.v8f16.p0f16(half* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <8 x half>, <8 x half> }*
|
||||
// CHECK: store { <8 x half>, <8 x half> } [[VLD2]], { <8 x half>, <8 x half> }* [[TMP3]]
|
||||
// CHECK: [[TMP4:%.*]] = bitcast %struct.float16x8x2_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x8x2_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 32, i32 16, i1 false)
|
||||
|
@ -693,10 +691,10 @@ int64x1x2_t test_vld2_dup_s64(int64_t *a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x2_t, align 8
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x2_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i16*
|
||||
// CHECK: [[VLD2:%.*]] = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2r.v4i16.p0i16(i16* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <4 x i16>, <4 x i16> }*
|
||||
// CHECK: store { <4 x i16>, <4 x i16> } [[VLD2]], { <4 x i16>, <4 x i16> }* [[TMP3]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to half*
|
||||
// CHECK: [[VLD2:%.*]] = call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2r.v4f16.p0f16(half* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <4 x half>, <4 x half> }*
|
||||
// CHECK: store { <4 x half>, <4 x half> } [[VLD2]], { <4 x half>, <4 x half> }* [[TMP3]]
|
||||
// CHECK: [[TMP4:%.*]] = bitcast %struct.float16x4x2_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x4x2_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 16, i32 8, i1 false)
|
||||
|
@ -947,10 +945,10 @@ int64x2x3_t test_vld3q_dup_s64(int64_t *a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x3_t, align 16
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x3_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i16*
|
||||
// CHECK: [[VLD3:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3r.v8i16.p0i16(i16* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <8 x i16>, <8 x i16>, <8 x i16> }*
|
||||
// CHECK: store { <8 x i16>, <8 x i16>, <8 x i16> } [[VLD3]], { <8 x i16>, <8 x i16>, <8 x i16> }* [[TMP3]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to half*
|
||||
// CHECK: [[VLD3:%.*]] = call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3r.v8f16.p0f16(half* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <8 x half>, <8 x half>, <8 x half> }*
|
||||
// CHECK: store { <8 x half>, <8 x half>, <8 x half> } [[VLD3]], { <8 x half>, <8 x half>, <8 x half> }* [[TMP3]]
|
||||
// CHECK: [[TMP4:%.*]] = bitcast %struct.float16x8x3_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x8x3_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 48, i32 16, i1 false)
|
||||
|
@ -1207,10 +1205,10 @@ int64x1x3_t test_vld3_dup_s64(int64_t *a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x3_t, align 8
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x3_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i16*
|
||||
// CHECK: [[VLD3:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3r.v4i16.p0i16(i16* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <4 x i16>, <4 x i16>, <4 x i16> }*
|
||||
// CHECK: store { <4 x i16>, <4 x i16>, <4 x i16> } [[VLD3]], { <4 x i16>, <4 x i16>, <4 x i16> }* [[TMP3]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to half*
|
||||
// CHECK: [[VLD3:%.*]] = call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3r.v4f16.p0f16(half* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <4 x half>, <4 x half>, <4 x half> }*
|
||||
// CHECK: store { <4 x half>, <4 x half>, <4 x half> } [[VLD3]], { <4 x half>, <4 x half>, <4 x half> }* [[TMP3]]
|
||||
// CHECK: [[TMP4:%.*]] = bitcast %struct.float16x4x3_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x4x3_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 24, i32 8, i1 false)
|
||||
|
@ -1459,10 +1457,10 @@ int64x2x4_t test_vld4q_dup_s64(int64_t *a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x4_t, align 16
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x4_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i16*
|
||||
// CHECK: [[VLD4:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4r.v8i16.p0i16(i16* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> }*
|
||||
// CHECK: store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } [[VLD4]], { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> }* [[TMP3]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to half*
|
||||
// CHECK: [[VLD4:%.*]] = call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4r.v8f16.p0f16(half* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <8 x half>, <8 x half>, <8 x half>, <8 x half> }*
|
||||
// CHECK: store { <8 x half>, <8 x half>, <8 x half>, <8 x half> } [[VLD4]], { <8 x half>, <8 x half>, <8 x half>, <8 x half> }* [[TMP3]]
|
||||
// CHECK: [[TMP4:%.*]] = bitcast %struct.float16x8x4_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x8x4_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 64, i32 16, i1 false)
|
||||
|
@ -1705,10 +1703,10 @@ int64x1x4_t test_vld4_dup_s64(int64_t *a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x4_t, align 8
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x4_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i16*
|
||||
// CHECK: [[VLD4:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4r.v4i16.p0i16(i16* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }*
|
||||
// CHECK: store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } [[VLD4]], { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }* [[TMP3]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to half*
|
||||
// CHECK: [[VLD4:%.*]] = call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4r.v4f16.p0f16(half* [[TMP2]])
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to { <4 x half>, <4 x half>, <4 x half>, <4 x half> }*
|
||||
// CHECK: store { <4 x half>, <4 x half>, <4 x half>, <4 x half> } [[VLD4]], { <4 x half>, <4 x half>, <4 x half>, <4 x half> }* [[TMP3]]
|
||||
// CHECK: [[TMP4:%.*]] = bitcast %struct.float16x4x4_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP5:%.*]] = bitcast %struct.float16x4x4_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP4]], i8* [[TMP5]], i64 32, i32 8, i1 false)
|
||||
|
@ -1897,12 +1895,11 @@ int64x2_t test_vld1q_lane_s64(int64_t *a, int64x2_t b) {
|
|||
// CHECK-LABEL: define <8 x half> @test_vld1q_lane_f16(half* %a, <8 x half> %b) #0 {
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8>
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to i16*
|
||||
// CHECK: [[TMP4:%.*]] = load i16, i16* [[TMP3]]
|
||||
// CHECK: [[VLD1_LANE:%.*]] = insertelement <8 x i16> [[TMP2]], i16 [[TMP4]], i32 7
|
||||
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[VLD1_LANE]] to <8 x half>
|
||||
// CHECK: ret <8 x half> [[TMP5]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to half*
|
||||
// CHECK: [[TMP4:%.*]] = load half, half* [[TMP3]]
|
||||
// CHECK: [[VLD1_LANE:%.*]] = insertelement <8 x half> [[TMP2]], half [[TMP4]], i32 7
|
||||
// CHECK: ret <8 x half> [[VLD1_LANE]]
|
||||
float16x8_t test_vld1q_lane_f16(float16_t *a, float16x8_t b) {
|
||||
return vld1q_lane_f16(a, b, 7);
|
||||
}
|
||||
|
@ -2054,12 +2051,11 @@ int64x1_t test_vld1_lane_s64(int64_t *a, int64x1_t b) {
|
|||
// CHECK-LABEL: define <4 x half> @test_vld1_lane_f16(half* %a, <4 x half> %b) #0 {
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8>
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to i16*
|
||||
// CHECK: [[TMP4:%.*]] = load i16, i16* [[TMP3]]
|
||||
// CHECK: [[VLD1_LANE:%.*]] = insertelement <4 x i16> [[TMP2]], i16 [[TMP4]], i32 3
|
||||
// CHECK: [[TMP5:%.*]] = bitcast <4 x i16> [[VLD1_LANE]] to <4 x half>
|
||||
// CHECK: ret <4 x half> [[TMP5]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to half*
|
||||
// CHECK: [[TMP4:%.*]] = load half, half* [[TMP3]]
|
||||
// CHECK: [[VLD1_LANE:%.*]] = insertelement <4 x half> [[TMP2]], half [[TMP4]], i32 3
|
||||
// CHECK: ret <4 x half> [[VLD1_LANE]]
|
||||
float16x4_t test_vld1_lane_f16(float16_t *a, float16x4_t b) {
|
||||
return vld1_lane_f16(a, b, 3);
|
||||
}
|
||||
|
@ -2495,11 +2491,11 @@ int64x2x2_t test_vld2q_lane_s64(int64_t *a, int64x2x2_t b) {
|
|||
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x half>], [2 x <8 x half>]* [[VAL1]], i64 0, i64 1
|
||||
// CHECK: [[TMP6:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16
|
||||
// CHECK: [[TMP7:%.*]] = bitcast <8 x half> [[TMP6]] to <16 x i8>
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
|
||||
// CHECK: [[VLD2_LANE:%.*]] = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2lane.v8i16.p0i8(<8 x i16> [[TMP8]], <8 x i16> [[TMP9]], i64 7, i8* [[TMP3]])
|
||||
// CHECK: [[TMP10:%.*]] = bitcast i8* [[TMP2]] to { <8 x i16>, <8 x i16> }*
|
||||
// CHECK: store { <8 x i16>, <8 x i16> } [[VLD2_LANE]], { <8 x i16>, <8 x i16> }* [[TMP10]]
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x half>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x half>
|
||||
// CHECK: [[VLD2_LANE:%.*]] = call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2lane.v8f16.p0i8(<8 x half> [[TMP8]], <8 x half> [[TMP9]], i64 7, i8* [[TMP3]])
|
||||
// CHECK: [[TMP10:%.*]] = bitcast i8* [[TMP2]] to { <8 x half>, <8 x half> }*
|
||||
// CHECK: store { <8 x half>, <8 x half> } [[VLD2_LANE]], { <8 x half>, <8 x half> }* [[TMP10]]
|
||||
// CHECK: [[TMP11:%.*]] = bitcast %struct.float16x8x2_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP12:%.*]] = bitcast %struct.float16x8x2_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP11]], i8* [[TMP12]], i64 32, i32 16, i1 false)
|
||||
|
@ -2927,11 +2923,11 @@ int64x1x2_t test_vld2_lane_s64(int64_t *a, int64x1x2_t b) {
|
|||
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x half>], [2 x <4 x half>]* [[VAL1]], i64 0, i64 1
|
||||
// CHECK: [[TMP6:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX2]], align 8
|
||||
// CHECK: [[TMP7:%.*]] = bitcast <4 x half> [[TMP6]] to <8 x i8>
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16>
|
||||
// CHECK: [[VLD2_LANE:%.*]] = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2lane.v4i16.p0i8(<4 x i16> [[TMP8]], <4 x i16> [[TMP9]], i64 3, i8* [[TMP3]])
|
||||
// CHECK: [[TMP10:%.*]] = bitcast i8* [[TMP2]] to { <4 x i16>, <4 x i16> }*
|
||||
// CHECK: store { <4 x i16>, <4 x i16> } [[VLD2_LANE]], { <4 x i16>, <4 x i16> }* [[TMP10]]
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x half>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x half>
|
||||
// CHECK: [[VLD2_LANE:%.*]] = call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2lane.v4f16.p0i8(<4 x half> [[TMP8]], <4 x half> [[TMP9]], i64 3, i8* [[TMP3]])
|
||||
// CHECK: [[TMP10:%.*]] = bitcast i8* [[TMP2]] to { <4 x half>, <4 x half> }*
|
||||
// CHECK: store { <4 x half>, <4 x half> } [[VLD2_LANE]], { <4 x half>, <4 x half> }* [[TMP10]]
|
||||
// CHECK: [[TMP11:%.*]] = bitcast %struct.float16x4x2_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP12:%.*]] = bitcast %struct.float16x4x2_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP11]], i8* [[TMP12]], i64 16, i32 8, i1 false)
|
||||
|
@ -3364,12 +3360,12 @@ int64x2x3_t test_vld3q_lane_s64(int64_t *a, int64x2x3_t b) {
|
|||
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL3]], i64 0, i64 2
|
||||
// CHECK: [[TMP8:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 16
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <8 x half> [[TMP8]] to <16 x i8>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x i16>
|
||||
// CHECK: [[VLD3_LANE:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3lane.v8i16.p0i8(<8 x i16> [[TMP10]], <8 x i16> [[TMP11]], <8 x i16> [[TMP12]], i64 7, i8* [[TMP3]])
|
||||
// CHECK: [[TMP13:%.*]] = bitcast i8* [[TMP2]] to { <8 x i16>, <8 x i16>, <8 x i16> }*
|
||||
// CHECK: store { <8 x i16>, <8 x i16>, <8 x i16> } [[VLD3_LANE]], { <8 x i16>, <8 x i16>, <8 x i16> }* [[TMP13]]
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x half>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x half>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x half>
|
||||
// CHECK: [[VLD3_LANE:%.*]] = call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3lane.v8f16.p0i8(<8 x half> [[TMP10]], <8 x half> [[TMP11]], <8 x half> [[TMP12]], i64 7, i8* [[TMP3]])
|
||||
// CHECK: [[TMP13:%.*]] = bitcast i8* [[TMP2]] to { <8 x half>, <8 x half>, <8 x half> }*
|
||||
// CHECK: store { <8 x half>, <8 x half>, <8 x half> } [[VLD3_LANE]], { <8 x half>, <8 x half>, <8 x half> }* [[TMP13]]
|
||||
// CHECK: [[TMP14:%.*]] = bitcast %struct.float16x8x3_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP15:%.*]] = bitcast %struct.float16x8x3_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP14]], i8* [[TMP15]], i64 48, i32 16, i1 false)
|
||||
|
@ -3889,12 +3885,12 @@ int64x1x3_t test_vld3_lane_s64(int64_t *a, int64x1x3_t b) {
|
|||
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x half>], [3 x <4 x half>]* [[VAL3]], i64 0, i64 2
|
||||
// CHECK: [[TMP8:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX4]], align 8
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <4 x half> [[TMP8]] to <8 x i8>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x i16>
|
||||
// CHECK: [[VLD3_LANE:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3lane.v4i16.p0i8(<4 x i16> [[TMP10]], <4 x i16> [[TMP11]], <4 x i16> [[TMP12]], i64 3, i8* [[TMP3]])
|
||||
// CHECK: [[TMP13:%.*]] = bitcast i8* [[TMP2]] to { <4 x i16>, <4 x i16>, <4 x i16> }*
|
||||
// CHECK: store { <4 x i16>, <4 x i16>, <4 x i16> } [[VLD3_LANE]], { <4 x i16>, <4 x i16>, <4 x i16> }* [[TMP13]]
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x half>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x half>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x half>
|
||||
// CHECK: [[VLD3_LANE:%.*]] = call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3lane.v4f16.p0i8(<4 x half> [[TMP10]], <4 x half> [[TMP11]], <4 x half> [[TMP12]], i64 3, i8* [[TMP3]])
|
||||
// CHECK: [[TMP13:%.*]] = bitcast i8* [[TMP2]] to { <4 x half>, <4 x half>, <4 x half> }*
|
||||
// CHECK: store { <4 x half>, <4 x half>, <4 x half> } [[VLD3_LANE]], { <4 x half>, <4 x half>, <4 x half> }* [[TMP13]]
|
||||
// CHECK: [[TMP14:%.*]] = bitcast %struct.float16x4x3_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP15:%.*]] = bitcast %struct.float16x4x3_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP14]], i8* [[TMP15]], i64 24, i32 8, i1 false)
|
||||
|
@ -4454,13 +4450,13 @@ int64x2x4_t test_vld4q_lane_s64(int64_t *a, int64x2x4_t b) {
|
|||
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL5]], i64 0, i64 3
|
||||
// CHECK: [[TMP10:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX6]], align 16
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x half> [[TMP10]] to <16 x i8>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x i16>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
|
||||
// CHECK: [[VLD4_LANE:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4lane.v8i16.p0i8(<8 x i16> [[TMP12]], <8 x i16> [[TMP13]], <8 x i16> [[TMP14]], <8 x i16> [[TMP15]], i64 7, i8* [[TMP3]])
|
||||
// CHECK: [[TMP16:%.*]] = bitcast i8* [[TMP2]] to { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> }*
|
||||
// CHECK: store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } [[VLD4_LANE]], { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> }* [[TMP16]]
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x half>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x half>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x half>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x half>
|
||||
// CHECK: [[VLD4_LANE:%.*]] = call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4lane.v8f16.p0i8(<8 x half> [[TMP12]], <8 x half> [[TMP13]], <8 x half> [[TMP14]], <8 x half> [[TMP15]], i64 7, i8* [[TMP3]])
|
||||
// CHECK: [[TMP16:%.*]] = bitcast i8* [[TMP2]] to { <8 x half>, <8 x half>, <8 x half>, <8 x half> }*
|
||||
// CHECK: store { <8 x half>, <8 x half>, <8 x half>, <8 x half> } [[VLD4_LANE]], { <8 x half>, <8 x half>, <8 x half>, <8 x half> }* [[TMP16]]
|
||||
// CHECK: [[TMP17:%.*]] = bitcast %struct.float16x8x4_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP18:%.*]] = bitcast %struct.float16x8x4_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP17]], i8* [[TMP18]], i64 64, i32 16, i1 false)
|
||||
|
@ -5043,13 +5039,13 @@ int64x1x4_t test_vld4_lane_s64(int64_t *a, int64x1x4_t b) {
|
|||
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x half>], [4 x <4 x half>]* [[VAL5]], i64 0, i64 3
|
||||
// CHECK: [[TMP10:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX6]], align 8
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <4 x half> [[TMP10]] to <8 x i8>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x i16>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
|
||||
// CHECK: [[VLD4_LANE:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4lane.v4i16.p0i8(<4 x i16> [[TMP12]], <4 x i16> [[TMP13]], <4 x i16> [[TMP14]], <4 x i16> [[TMP15]], i64 3, i8* [[TMP3]])
|
||||
// CHECK: [[TMP16:%.*]] = bitcast i8* [[TMP2]] to { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }*
|
||||
// CHECK: store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } [[VLD4_LANE]], { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }* [[TMP16]]
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x half>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x half>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x half>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x half>
|
||||
// CHECK: [[VLD4_LANE:%.*]] = call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4lane.v4f16.p0i8(<4 x half> [[TMP12]], <4 x half> [[TMP13]], <4 x half> [[TMP14]], <4 x half> [[TMP15]], i64 3, i8* [[TMP3]])
|
||||
// CHECK: [[TMP16:%.*]] = bitcast i8* [[TMP2]] to { <4 x half>, <4 x half>, <4 x half>, <4 x half> }*
|
||||
// CHECK: store { <4 x half>, <4 x half>, <4 x half>, <4 x half> } [[VLD4_LANE]], { <4 x half>, <4 x half>, <4 x half>, <4 x half> }* [[TMP16]]
|
||||
// CHECK: [[TMP17:%.*]] = bitcast %struct.float16x4x4_t* [[RETVAL]] to i8*
|
||||
// CHECK: [[TMP18:%.*]] = bitcast %struct.float16x4x4_t* [[__RET]] to i8*
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[TMP17]], i8* [[TMP18]], i64 32, i32 8, i1 false)
|
||||
|
@ -5361,10 +5357,10 @@ void test_vst1q_lane_s64(int64_t *a, int64x2_t b) {
|
|||
// CHECK-LABEL: define void @test_vst1q_lane_f16(half* %a, <8 x half> %b) #0 {
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8>
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
|
||||
// CHECK: [[TMP3:%.*]] = extractelement <8 x i16> [[TMP2]], i32 7
|
||||
// CHECK: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to i16*
|
||||
// CHECK: store i16 [[TMP3]], i16* [[TMP4]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
|
||||
// CHECK: [[TMP3:%.*]] = extractelement <8 x half> [[TMP2]], i32 7
|
||||
// CHECK: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to half*
|
||||
// CHECK: store half [[TMP3]], half* [[TMP4]]
|
||||
// CHECK: ret void
|
||||
void test_vst1q_lane_f16(float16_t *a, float16x8_t b) {
|
||||
vst1q_lane_f16(a, b, 7);
|
||||
|
@ -5517,10 +5513,10 @@ void test_vst1_lane_s64(int64_t *a, int64x1_t b) {
|
|||
// CHECK-LABEL: define void @test_vst1_lane_f16(half* %a, <4 x half> %b) #0 {
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8>
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
|
||||
// CHECK: [[TMP3:%.*]] = extractelement <4 x i16> [[TMP2]], i32 3
|
||||
// CHECK: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to i16*
|
||||
// CHECK: store i16 [[TMP3]], i16* [[TMP4]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
|
||||
// CHECK: [[TMP3:%.*]] = extractelement <4 x half> [[TMP2]], i32 3
|
||||
// CHECK: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to half*
|
||||
// CHECK: store half [[TMP3]], half* [[TMP4]]
|
||||
// CHECK: ret void
|
||||
void test_vst1_lane_f16(float16_t *a, float16x4_t b) {
|
||||
vst1_lane_f16(a, b, 3);
|
||||
|
@ -5789,9 +5785,9 @@ void test_vst2q_lane_s64(int64_t *a, int64x2x2_t b) {
|
|||
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x half>], [2 x <8 x half>]* [[VAL1]], i64 0, i64 1
|
||||
// CHECK: [[TMP5:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16
|
||||
// CHECK: [[TMP6:%.*]] = bitcast <8 x half> [[TMP5]] to <16 x i8>
|
||||
// CHECK: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x i16>
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
|
||||
// CHECK: call void @llvm.aarch64.neon.st2lane.v8i16.p0i8(<8 x i16> [[TMP7]], <8 x i16> [[TMP8]], i64 7, i8* [[TMP2]])
|
||||
// CHECK: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x half>
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half>
|
||||
// CHECK: call void @llvm.aarch64.neon.st2lane.v8f16.p0i8(<8 x half> [[TMP7]], <8 x half> [[TMP8]], i64 7, i8* [[TMP2]])
|
||||
// CHECK: ret void
|
||||
void test_vst2q_lane_f16(float16_t *a, float16x8x2_t b) {
|
||||
vst2q_lane_f16(a, b, 7);
|
||||
|
@ -6124,9 +6120,9 @@ void test_vst2_lane_s64(int64_t *a, int64x1x2_t b) {
|
|||
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x half>], [2 x <4 x half>]* [[VAL1]], i64 0, i64 1
|
||||
// CHECK: [[TMP5:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX2]], align 8
|
||||
// CHECK: [[TMP6:%.*]] = bitcast <4 x half> [[TMP5]] to <8 x i8>
|
||||
// CHECK: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16>
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16>
|
||||
// CHECK: call void @llvm.aarch64.neon.st2lane.v4i16.p0i8(<4 x i16> [[TMP7]], <4 x i16> [[TMP8]], i64 3, i8* [[TMP2]])
|
||||
// CHECK: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x half>
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half>
|
||||
// CHECK: call void @llvm.aarch64.neon.st2lane.v4f16.p0i8(<4 x half> [[TMP7]], <4 x half> [[TMP8]], i64 3, i8* [[TMP2]])
|
||||
// CHECK: ret void
|
||||
void test_vst2_lane_f16(float16_t *a, float16x4x2_t b) {
|
||||
vst2_lane_f16(a, b, 3);
|
||||
|
@ -6499,10 +6495,10 @@ void test_vst3q_lane_s64(int64_t *a, int64x2x3_t b) {
|
|||
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL3]], i64 0, i64 2
|
||||
// CHECK: [[TMP7:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 16
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <8 x half> [[TMP7]] to <16 x i8>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x i16>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
|
||||
// CHECK: call void @llvm.aarch64.neon.st3lane.v8i16.p0i8(<8 x i16> [[TMP9]], <8 x i16> [[TMP10]], <8 x i16> [[TMP11]], i64 7, i8* [[TMP2]])
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x half>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x half>
|
||||
// CHECK: call void @llvm.aarch64.neon.st3lane.v8f16.p0i8(<8 x half> [[TMP9]], <8 x half> [[TMP10]], <8 x half> [[TMP11]], i64 7, i8* [[TMP2]])
|
||||
// CHECK: ret void
|
||||
void test_vst3q_lane_f16(float16_t *a, float16x8x3_t b) {
|
||||
vst3q_lane_f16(a, b, 7);
|
||||
|
@ -6898,10 +6894,10 @@ void test_vst3_lane_s64(int64_t *a, int64x1x3_t b) {
|
|||
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x half>], [3 x <4 x half>]* [[VAL3]], i64 0, i64 2
|
||||
// CHECK: [[TMP7:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX4]], align 8
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <4 x half> [[TMP7]] to <8 x i8>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x i16>
|
||||
// CHECK: call void @llvm.aarch64.neon.st3lane.v4i16.p0i8(<4 x i16> [[TMP9]], <4 x i16> [[TMP10]], <4 x i16> [[TMP11]], i64 3, i8* [[TMP2]])
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x half>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x half>
|
||||
// CHECK: call void @llvm.aarch64.neon.st3lane.v4f16.p0i8(<4 x half> [[TMP9]], <4 x half> [[TMP10]], <4 x half> [[TMP11]], i64 3, i8* [[TMP2]])
|
||||
// CHECK: ret void
|
||||
void test_vst3_lane_f16(float16_t *a, float16x4x3_t b) {
|
||||
vst3_lane_f16(a, b, 3);
|
||||
|
@ -7337,11 +7333,11 @@ void test_vst4q_lane_s64(int64_t *a, int64x2x4_t b) {
|
|||
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL5]], i64 0, i64 3
|
||||
// CHECK: [[TMP9:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX6]], align 16
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <8 x half> [[TMP9]] to <16 x i8>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x i16>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP10]] to <8 x i16>
|
||||
// CHECK: call void @llvm.aarch64.neon.st4lane.v8i16.p0i8(<8 x i16> [[TMP11]], <8 x i16> [[TMP12]], <8 x i16> [[TMP13]], <8 x i16> [[TMP14]], i64 7, i8* [[TMP2]])
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x half>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x half>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP10]] to <8 x half>
|
||||
// CHECK: call void @llvm.aarch64.neon.st4lane.v8f16.p0i8(<8 x half> [[TMP11]], <8 x half> [[TMP12]], <8 x half> [[TMP13]], <8 x half> [[TMP14]], i64 7, i8* [[TMP2]])
|
||||
// CHECK: ret void
|
||||
void test_vst4q_lane_f16(float16_t *a, float16x8x4_t b) {
|
||||
vst4q_lane_f16(a, b, 7);
|
||||
|
@ -7800,11 +7796,11 @@ void test_vst4_lane_s64(int64_t *a, int64x1x4_t b) {
|
|||
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x half>], [4 x <4 x half>]* [[VAL5]], i64 0, i64 3
|
||||
// CHECK: [[TMP9:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX6]], align 8
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <4 x half> [[TMP9]] to <8 x i8>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x i16>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x i16>
|
||||
// CHECK: call void @llvm.aarch64.neon.st4lane.v4i16.p0i8(<4 x i16> [[TMP11]], <4 x i16> [[TMP12]], <4 x i16> [[TMP13]], <4 x i16> [[TMP14]], i64 3, i8* [[TMP2]])
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x half>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x half>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x half>
|
||||
// CHECK: call void @llvm.aarch64.neon.st4lane.v4f16.p0i8(<4 x half> [[TMP11]], <4 x half> [[TMP12]], <4 x half> [[TMP13]], <4 x half> [[TMP14]], i64 3, i8* [[TMP2]])
|
||||
// CHECK: ret void
|
||||
void test_vst4_lane_f16(float16_t *a, float16x4x4_t b) {
|
||||
vst4_lane_f16(a, b, 3);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -3896,9 +3896,8 @@ int64x2_t test_vld1q_s64(int64_t const * a) {
|
|||
|
||||
// CHECK-LABEL: @test_vld1q_f16(
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[VLD1:%.*]] = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* [[TMP0]], i32 2)
|
||||
// CHECK: [[TMP1:%.*]] = bitcast <8 x i16> [[VLD1]] to <8 x half>
|
||||
// CHECK: ret <8 x half> [[TMP1]]
|
||||
// CHECK: [[VLD1:%.*]] = call <8 x half> @llvm.arm.neon.vld1.v8f16.p0i8(i8* [[TMP0]], i32 2)
|
||||
// CHECK: ret <8 x half> [[VLD1]]
|
||||
float16x8_t test_vld1q_f16(float16_t const * a) {
|
||||
return vld1q_f16(a);
|
||||
}
|
||||
|
@ -3990,9 +3989,8 @@ int64x1_t test_vld1_s64(int64_t const * a) {
|
|||
|
||||
// CHECK-LABEL: @test_vld1_f16(
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[VLD1:%.*]] = call <4 x i16> @llvm.arm.neon.vld1.v4i16.p0i8(i8* [[TMP0]], i32 2)
|
||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x i16> [[VLD1]] to <4 x half>
|
||||
// CHECK: ret <4 x half> [[TMP1]]
|
||||
// CHECK: [[VLD1:%.*]] = call <4 x half> @llvm.arm.neon.vld1.v4f16.p0i8(i8* [[TMP0]], i32 2)
|
||||
// CHECK: ret <4 x half> [[VLD1]]
|
||||
float16x4_t test_vld1_f16(float16_t const * a) {
|
||||
return vld1_f16(a);
|
||||
}
|
||||
|
@ -4106,12 +4104,11 @@ int64x2_t test_vld1q_dup_s64(int64_t const * a) {
|
|||
|
||||
// CHECK-LABEL: @test_vld1q_dup_f16(
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16*
|
||||
// CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]], align 2
|
||||
// CHECK: [[TMP3:%.*]] = insertelement <8 x i16> undef, i16 [[TMP2]], i32 0
|
||||
// CHECK: [[LANE:%.*]] = shufflevector <8 x i16> [[TMP3]], <8 x i16> [[TMP3]], <8 x i32> zeroinitializer
|
||||
// CHECK: [[TMP4:%.*]] = bitcast <8 x i16> [[LANE]] to <8 x half>
|
||||
// CHECK: ret <8 x half> [[TMP4]]
|
||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to half*
|
||||
// CHECK: [[TMP2:%.*]] = load half, half* [[TMP1]], align 2
|
||||
// CHECK: [[TMP3:%.*]] = insertelement <8 x half> undef, half [[TMP2]], i32 0
|
||||
// CHECK: [[LANE:%.*]] = shufflevector <8 x half> [[TMP3]], <8 x half> [[TMP3]], <8 x i32> zeroinitializer
|
||||
// CHECK: ret <8 x half> [[LANE]]
|
||||
float16x8_t test_vld1q_dup_f16(float16_t const * a) {
|
||||
return vld1q_dup_f16(a);
|
||||
}
|
||||
|
@ -4233,12 +4230,11 @@ int64x1_t test_vld1_dup_s64(int64_t const * a) {
|
|||
|
||||
// CHECK-LABEL: @test_vld1_dup_f16(
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16*
|
||||
// CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]], align 2
|
||||
// CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[TMP2]], i32 0
|
||||
// CHECK: [[LANE:%.*]] = shufflevector <4 x i16> [[TMP3]], <4 x i16> [[TMP3]], <4 x i32> zeroinitializer
|
||||
// CHECK: [[TMP4:%.*]] = bitcast <4 x i16> [[LANE]] to <4 x half>
|
||||
// CHECK: ret <4 x half> [[TMP4]]
|
||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to half*
|
||||
// CHECK: [[TMP2:%.*]] = load half, half* [[TMP1]], align 2
|
||||
// CHECK: [[TMP3:%.*]] = insertelement <4 x half> undef, half [[TMP2]], i32 0
|
||||
// CHECK: [[LANE:%.*]] = shufflevector <4 x half> [[TMP3]], <4 x half> [[TMP3]], <4 x i32> zeroinitializer
|
||||
// CHECK: ret <4 x half> [[LANE]]
|
||||
float16x4_t test_vld1_dup_f16(float16_t const * a) {
|
||||
return vld1_dup_f16(a);
|
||||
}
|
||||
|
@ -4365,12 +4361,11 @@ int64x2_t test_vld1q_lane_s64(int64_t const * a, int64x2_t b) {
|
|||
// CHECK-LABEL: @test_vld1q_lane_f16(
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8>
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to i16*
|
||||
// CHECK: [[TMP4:%.*]] = load i16, i16* [[TMP3]], align 2
|
||||
// CHECK: [[VLD1_LANE:%.*]] = insertelement <8 x i16> [[TMP2]], i16 [[TMP4]], i32 7
|
||||
// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[VLD1_LANE]] to <8 x half>
|
||||
// CHECK: ret <8 x half> [[TMP5]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to half*
|
||||
// CHECK: [[TMP4:%.*]] = load half, half* [[TMP3]], align 2
|
||||
// CHECK: [[VLD1_LANE:%.*]] = insertelement <8 x half> [[TMP2]], half [[TMP4]], i32 7
|
||||
// CHECK: ret <8 x half> [[VLD1_LANE]]
|
||||
float16x8_t test_vld1q_lane_f16(float16_t const * a, float16x8_t b) {
|
||||
return vld1q_lane_f16(a, b, 7);
|
||||
}
|
||||
|
@ -4498,12 +4493,11 @@ int64x1_t test_vld1_lane_s64(int64_t const * a, int64x1_t b) {
|
|||
// CHECK-LABEL: @test_vld1_lane_f16(
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8>
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to i16*
|
||||
// CHECK: [[TMP4:%.*]] = load i16, i16* [[TMP3]], align 2
|
||||
// CHECK: [[VLD1_LANE:%.*]] = insertelement <4 x i16> [[TMP2]], i16 [[TMP4]], i32 3
|
||||
// CHECK: [[TMP5:%.*]] = bitcast <4 x i16> [[VLD1_LANE]] to <4 x half>
|
||||
// CHECK: ret <4 x half> [[TMP5]]
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
|
||||
// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to half*
|
||||
// CHECK: [[TMP4:%.*]] = load half, half* [[TMP3]], align 2
|
||||
// CHECK: [[VLD1_LANE:%.*]] = insertelement <4 x half> [[TMP2]], half [[TMP4]], i32 3
|
||||
// CHECK: ret <4 x half> [[VLD1_LANE]]
|
||||
float16x4_t test_vld1_lane_f16(float16_t const * a, float16x4_t b) {
|
||||
return vld1_lane_f16(a, b, 3);
|
||||
}
|
||||
|
@ -4596,7 +4590,7 @@ int32x4x2_t test_vld2q_s32(int32_t const * a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x2_t, align 16
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x2_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[VLD2Q_V:%.*]] = call { <8 x i16>, <8 x i16>
|
||||
// CHECK: [[VLD2Q_V:%.*]] = call { <8 x half>, <8 x half>
|
||||
float16x8x2_t test_vld2q_f16(float16_t const * a) {
|
||||
return vld2q_f16(a);
|
||||
}
|
||||
|
@ -4701,7 +4695,7 @@ int64x1x2_t test_vld2_s64(int64_t const * a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x2_t, align 8
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x2_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[VLD2_V:%.*]] = call { <4 x i16>, <4 x i16>
|
||||
// CHECK: [[VLD2_V:%.*]] = call { <4 x half>, <4 x half>
|
||||
float16x4x2_t test_vld2_f16(float16_t const * a) {
|
||||
return vld2_f16(a);
|
||||
}
|
||||
|
@ -4806,7 +4800,7 @@ int64x1x2_t test_vld2_dup_s64(int64_t const * a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x2_t, align 8
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x2_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>
|
||||
// CHECK: [[VLD_DUP:%.*]] = call { <4 x half>, <4 x half>
|
||||
float16x4x2_t test_vld2_dup_f16(float16_t const * a) {
|
||||
return vld2_dup_f16(a);
|
||||
}
|
||||
|
@ -4965,9 +4959,9 @@ int32x4x2_t test_vld2q_lane_s32(int32_t const * a, int32x4x2_t b) {
|
|||
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x half>], [2 x <8 x half>]* [[VAL1]], i32 0, i32 1
|
||||
// CHECK: [[TMP7:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <8 x half> [[TMP7]] to <16 x i8>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
|
||||
// CHECK: [[VLD2Q_LANE_V:%.*]] = call { <8 x i16>, <8 x i16>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x half>
|
||||
// CHECK: [[VLD2Q_LANE_V:%.*]] = call { <8 x half>, <8 x half>
|
||||
float16x8x2_t test_vld2q_lane_f16(float16_t const * a, float16x8x2_t b) {
|
||||
return vld2q_lane_f16(a, b, 7);
|
||||
}
|
||||
|
@ -5198,9 +5192,9 @@ int32x2x2_t test_vld2_lane_s32(int32_t const * a, int32x2x2_t b) {
|
|||
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x half>], [2 x <4 x half>]* [[VAL1]], i32 0, i32 1
|
||||
// CHECK: [[TMP7:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX2]], align 8
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <4 x half> [[TMP7]] to <8 x i8>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x i16>
|
||||
// CHECK: [[VLD2_LANE_V:%.*]] = call { <4 x i16>, <4 x i16>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x half>
|
||||
// CHECK: [[VLD2_LANE_V:%.*]] = call { <4 x half>, <4 x half>
|
||||
float16x4x2_t test_vld2_lane_f16(float16_t const * a, float16x4x2_t b) {
|
||||
return vld2_lane_f16(a, b, 3);
|
||||
}
|
||||
|
@ -5337,7 +5331,7 @@ int32x4x3_t test_vld3q_s32(int32_t const * a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x3_t, align 16
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x3_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[VLD3Q_V:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>
|
||||
// CHECK: [[VLD3Q_V:%.*]] = call { <8 x half>, <8 x half>, <8 x half>
|
||||
float16x8x3_t test_vld3q_f16(float16_t const * a) {
|
||||
return vld3q_f16(a);
|
||||
}
|
||||
|
@ -5442,7 +5436,7 @@ int64x1x3_t test_vld3_s64(int64_t const * a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x3_t, align 8
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x3_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[VLD3_V:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>
|
||||
// CHECK: [[VLD3_V:%.*]] = call { <4 x half>, <4 x half>, <4 x half>
|
||||
float16x4x3_t test_vld3_f16(float16_t const * a) {
|
||||
return vld3_f16(a);
|
||||
}
|
||||
|
@ -5547,7 +5541,7 @@ int64x1x3_t test_vld3_dup_s64(int64_t const * a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x3_t, align 8
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x3_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>
|
||||
// CHECK: [[VLD_DUP:%.*]] = call { <4 x half>, <4 x half>, <4 x half>
|
||||
float16x4x3_t test_vld3_dup_f16(float16_t const * a) {
|
||||
return vld3_dup_f16(a);
|
||||
}
|
||||
|
@ -5730,10 +5724,10 @@ int32x4x3_t test_vld3q_lane_s32(int32_t const * a, int32x4x3_t b) {
|
|||
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL3]], i32 0, i32 2
|
||||
// CHECK: [[TMP9:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 16
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <8 x half> [[TMP9]] to <16 x i8>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP10]] to <8 x i16>
|
||||
// CHECK: [[VLD3Q_LANE_V:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x half>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP10]] to <8 x half>
|
||||
// CHECK: [[VLD3Q_LANE_V:%.*]] = call { <8 x half>, <8 x half>, <8 x half>
|
||||
float16x8x3_t test_vld3q_lane_f16(float16_t const * a, float16x8x3_t b) {
|
||||
return vld3q_lane_f16(a, b, 7);
|
||||
}
|
||||
|
@ -6004,10 +5998,10 @@ int32x2x3_t test_vld3_lane_s32(int32_t const * a, int32x2x3_t b) {
|
|||
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x half>], [3 x <4 x half>]* [[VAL3]], i32 0, i32 2
|
||||
// CHECK: [[TMP9:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX4]], align 8
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <4 x half> [[TMP9]] to <8 x i8>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x i16>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x i16>
|
||||
// CHECK: [[VLD3_LANE_V:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x half>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x half>
|
||||
// CHECK: [[VLD3_LANE_V:%.*]] = call { <4 x half>, <4 x half>, <4 x half>
|
||||
float16x4x3_t test_vld3_lane_f16(float16_t const * a, float16x4x3_t b) {
|
||||
return vld3_lane_f16(a, b, 3);
|
||||
}
|
||||
|
@ -6157,7 +6151,7 @@ int32x4x4_t test_vld4q_s32(int32_t const * a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x8x4_t, align 16
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x4_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[VLD4Q_V:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>
|
||||
// CHECK: [[VLD4Q_V:%.*]] = call { <8 x half>, <8 x half>, <8 x half>, <8 x half>
|
||||
float16x8x4_t test_vld4q_f16(float16_t const * a) {
|
||||
return vld4q_f16(a);
|
||||
}
|
||||
|
@ -6262,7 +6256,7 @@ int64x1x4_t test_vld4_s64(int64_t const * a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x4_t, align 8
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x4_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[VLD4_V:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>
|
||||
// CHECK: [[VLD4_V:%.*]] = call { <4 x half>, <4 x half>, <4 x half>, <4 x half>
|
||||
float16x4x4_t test_vld4_f16(float16_t const * a) {
|
||||
return vld4_f16(a);
|
||||
}
|
||||
|
@ -6367,7 +6361,7 @@ int64x1x4_t test_vld4_dup_s64(int64_t const * a) {
|
|||
// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x4_t, align 8
|
||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x4_t* [[__RET]] to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>
|
||||
// CHECK: [[VLD_DUP:%.*]] = call { <4 x half>, <4 x half>, <4 x half>, <4 x half>
|
||||
float16x4x4_t test_vld4_dup_f16(float16_t const * a) {
|
||||
return vld4_dup_f16(a);
|
||||
}
|
||||
|
@ -6574,11 +6568,11 @@ int32x4x4_t test_vld4q_lane_s32(int32_t const * a, int32x4x4_t b) {
|
|||
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL5]], i32 0, i32 3
|
||||
// CHECK: [[TMP11:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX6]], align 16
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x half> [[TMP11]] to <16 x i8>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP10]] to <8 x i16>
|
||||
// CHECK: [[TMP16:%.*]] = bitcast <16 x i8> [[TMP12]] to <8 x i16>
|
||||
// CHECK: [[VLD4Q_LANE_V:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x half>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP10]] to <8 x half>
|
||||
// CHECK: [[TMP16:%.*]] = bitcast <16 x i8> [[TMP12]] to <8 x half>
|
||||
// CHECK: [[VLD4Q_LANE_V:%.*]] = call { <8 x half>, <8 x half>, <8 x half>, <8 x half>
|
||||
float16x8x4_t test_vld4q_lane_f16(float16_t const * a, float16x8x4_t b) {
|
||||
return vld4q_lane_f16(a, b, 7);
|
||||
}
|
||||
|
@ -6889,11 +6883,11 @@ int32x2x4_t test_vld4_lane_s32(int32_t const * a, int32x2x4_t b) {
|
|||
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x half>], [4 x <4 x half>]* [[VAL5]], i32 0, i32 3
|
||||
// CHECK: [[TMP11:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX6]], align 8
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <4 x half> [[TMP11]] to <8 x i8>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x i16>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x i16>
|
||||
// CHECK: [[TMP16:%.*]] = bitcast <8 x i8> [[TMP12]] to <4 x i16>
|
||||
// CHECK: [[VLD4_LANE_V:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x half>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x half>
|
||||
// CHECK: [[TMP16:%.*]] = bitcast <8 x i8> [[TMP12]] to <4 x half>
|
||||
// CHECK: [[VLD4_LANE_V:%.*]] = call { <4 x half>, <4 x half>, <4 x half>, <4 x half>
|
||||
float16x4x4_t test_vld4_lane_f16(float16_t const * a, float16x4x4_t b) {
|
||||
return vld4_lane_f16(a, b, 3);
|
||||
}
|
||||
|
@ -15784,8 +15778,8 @@ void test_vst1q_s64(int64_t * a, int64x2_t b) {
|
|||
// CHECK-LABEL: @test_vst1q_f16(
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8>
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
|
||||
// CHECK: call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* [[TMP0]], <8 x i16> [[TMP2]], i32 2)
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
|
||||
// CHECK: call void @llvm.arm.neon.vst1.p0i8.v8f16(i8* [[TMP0]], <8 x half> [[TMP2]], i32 2)
|
||||
// CHECK: ret void
|
||||
void test_vst1q_f16(float16_t * a, float16x8_t b) {
|
||||
vst1q_f16(a, b);
|
||||
|
@ -15895,8 +15889,8 @@ void test_vst1_s64(int64_t * a, int64x1_t b) {
|
|||
// CHECK-LABEL: @test_vst1_f16(
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8>
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
|
||||
// CHECK: call void @llvm.arm.neon.vst1.p0i8.v4i16(i8* [[TMP0]], <4 x i16> [[TMP2]], i32 2)
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
|
||||
// CHECK: call void @llvm.arm.neon.vst1.p0i8.v4f16(i8* [[TMP0]], <4 x half> [[TMP2]], i32 2)
|
||||
// CHECK: ret void
|
||||
void test_vst1_f16(float16_t * a, float16x4_t b) {
|
||||
vst1_f16(a, b);
|
||||
|
@ -16018,10 +16012,10 @@ void test_vst1q_lane_s64(int64_t * a, int64x2_t b) {
|
|||
// CHECK-LABEL: @test_vst1q_lane_f16(
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8>
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
|
||||
// CHECK: [[TMP3:%.*]] = extractelement <8 x i16> [[TMP2]], i32 7
|
||||
// CHECK: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to i16*
|
||||
// CHECK: store i16 [[TMP3]], i16* [[TMP4]], align 2
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
|
||||
// CHECK: [[TMP3:%.*]] = extractelement <8 x half> [[TMP2]], i32 7
|
||||
// CHECK: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to half*
|
||||
// CHECK: store half [[TMP3]], half* [[TMP4]], align 2
|
||||
// CHECK: ret void
|
||||
void test_vst1q_lane_f16(float16_t * a, float16x8_t b) {
|
||||
vst1q_lane_f16(a, b, 7);
|
||||
|
@ -16150,10 +16144,10 @@ void test_vst1_lane_s64(int64_t * a, int64x1_t b) {
|
|||
// CHECK-LABEL: @test_vst1_lane_f16(
|
||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8*
|
||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8>
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
|
||||
// CHECK: [[TMP3:%.*]] = extractelement <4 x i16> [[TMP2]], i32 3
|
||||
// CHECK: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to i16*
|
||||
// CHECK: store i16 [[TMP3]], i16* [[TMP4]], align 2
|
||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
|
||||
// CHECK: [[TMP3:%.*]] = extractelement <4 x half> [[TMP2]], i32 3
|
||||
// CHECK: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to half*
|
||||
// CHECK: store half [[TMP3]], half* [[TMP4]], align 2
|
||||
// CHECK: ret void
|
||||
void test_vst1_lane_f16(float16_t * a, float16x4_t b) {
|
||||
vst1_lane_f16(a, b, 3);
|
||||
|
@ -16355,9 +16349,9 @@ void test_vst2q_s32(int32_t * a, int32x4x2_t b) {
|
|||
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x half>], [2 x <8 x half>]* [[VAL1]], i32 0, i32 1
|
||||
// CHECK: [[TMP6:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16
|
||||
// CHECK: [[TMP7:%.*]] = bitcast <8 x half> [[TMP6]] to <16 x i8>
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
|
||||
// CHECK: call void @llvm.arm.neon.vst2.p0i8.v8i16(i8* [[TMP3]], <8 x i16> [[TMP8]], <8 x i16> [[TMP9]], i32 2)
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x half>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x half>
|
||||
// CHECK: call void @llvm.arm.neon.vst2.p0i8.v8f16(i8* [[TMP3]], <8 x half> [[TMP8]], <8 x half> [[TMP9]], i32 2)
|
||||
// CHECK: ret void
|
||||
void test_vst2q_f16(float16_t * a, float16x8x2_t b) {
|
||||
vst2q_f16(a, b);
|
||||
|
@ -16652,9 +16646,9 @@ void test_vst2_s64(int64_t * a, int64x1x2_t b) {
|
|||
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x half>], [2 x <4 x half>]* [[VAL1]], i32 0, i32 1
|
||||
// CHECK: [[TMP6:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX2]], align 8
|
||||
// CHECK: [[TMP7:%.*]] = bitcast <4 x half> [[TMP6]] to <8 x i8>
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16>
|
||||
// CHECK: call void @llvm.arm.neon.vst2.p0i8.v4i16(i8* [[TMP3]], <4 x i16> [[TMP8]], <4 x i16> [[TMP9]], i32 2)
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x half>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x half>
|
||||
// CHECK: call void @llvm.arm.neon.vst2.p0i8.v4f16(i8* [[TMP3]], <4 x half> [[TMP8]], <4 x half> [[TMP9]], i32 2)
|
||||
// CHECK: ret void
|
||||
void test_vst2_f16(float16_t * a, float16x4x2_t b) {
|
||||
vst2_f16(a, b);
|
||||
|
@ -16855,9 +16849,9 @@ void test_vst2q_lane_s32(int32_t * a, int32x4x2_t b) {
|
|||
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x half>], [2 x <8 x half>]* [[VAL1]], i32 0, i32 1
|
||||
// CHECK: [[TMP6:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16
|
||||
// CHECK: [[TMP7:%.*]] = bitcast <8 x half> [[TMP6]] to <16 x i8>
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
|
||||
// CHECK: call void @llvm.arm.neon.vst2lane.p0i8.v8i16(i8* [[TMP3]], <8 x i16> [[TMP8]], <8 x i16> [[TMP9]], i32 7, i32 2)
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x half>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x half>
|
||||
// CHECK: call void @llvm.arm.neon.vst2lane.p0i8.v8f16(i8* [[TMP3]], <8 x half> [[TMP8]], <8 x half> [[TMP9]], i32 7, i32 2)
|
||||
// CHECK: ret void
|
||||
void test_vst2q_lane_f16(float16_t * a, float16x8x2_t b) {
|
||||
vst2q_lane_f16(a, b, 7);
|
||||
|
@ -17079,9 +17073,9 @@ void test_vst2_lane_s32(int32_t * a, int32x2x2_t b) {
|
|||
// CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x half>], [2 x <4 x half>]* [[VAL1]], i32 0, i32 1
|
||||
// CHECK: [[TMP6:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX2]], align 8
|
||||
// CHECK: [[TMP7:%.*]] = bitcast <4 x half> [[TMP6]] to <8 x i8>
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16>
|
||||
// CHECK: call void @llvm.arm.neon.vst2lane.p0i8.v4i16(i8* [[TMP3]], <4 x i16> [[TMP8]], <4 x i16> [[TMP9]], i32 3, i32 2)
|
||||
// CHECK: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x half>
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x half>
|
||||
// CHECK: call void @llvm.arm.neon.vst2lane.p0i8.v4f16(i8* [[TMP3]], <4 x half> [[TMP8]], <4 x half> [[TMP9]], i32 3, i32 2)
|
||||
// CHECK: ret void
|
||||
void test_vst2_lane_f16(float16_t * a, float16x4x2_t b) {
|
||||
vst2_lane_f16(a, b, 3);
|
||||
|
@ -17354,10 +17348,10 @@ void test_vst3q_s32(int32_t * a, int32x4x3_t b) {
|
|||
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL3]], i32 0, i32 2
|
||||
// CHECK: [[TMP8:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 16
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <8 x half> [[TMP8]] to <16 x i8>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x i16>
|
||||
// CHECK: call void @llvm.arm.neon.vst3.p0i8.v8i16(i8* [[TMP3]], <8 x i16> [[TMP10]], <8 x i16> [[TMP11]], <8 x i16> [[TMP12]], i32 2)
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x half>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x half>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x half>
|
||||
// CHECK: call void @llvm.arm.neon.vst3.p0i8.v8f16(i8* [[TMP3]], <8 x half> [[TMP10]], <8 x half> [[TMP11]], <8 x half> [[TMP12]], i32 2)
|
||||
// CHECK: ret void
|
||||
void test_vst3q_f16(float16_t * a, float16x8x3_t b) {
|
||||
vst3q_f16(a, b);
|
||||
|
@ -17705,10 +17699,10 @@ void test_vst3_s64(int64_t * a, int64x1x3_t b) {
|
|||
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x half>], [3 x <4 x half>]* [[VAL3]], i32 0, i32 2
|
||||
// CHECK: [[TMP8:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX4]], align 8
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <4 x half> [[TMP8]] to <8 x i8>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x i16>
|
||||
// CHECK: call void @llvm.arm.neon.vst3.p0i8.v4i16(i8* [[TMP3]], <4 x i16> [[TMP10]], <4 x i16> [[TMP11]], <4 x i16> [[TMP12]], i32 2)
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x half>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x half>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x half>
|
||||
// CHECK: call void @llvm.arm.neon.vst3.p0i8.v4f16(i8* [[TMP3]], <4 x half> [[TMP10]], <4 x half> [[TMP11]], <4 x half> [[TMP12]], i32 2)
|
||||
// CHECK: ret void
|
||||
void test_vst3_f16(float16_t * a, float16x4x3_t b) {
|
||||
vst3_f16(a, b);
|
||||
|
@ -17946,10 +17940,10 @@ void test_vst3q_lane_s32(int32_t * a, int32x4x3_t b) {
|
|||
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL3]], i32 0, i32 2
|
||||
// CHECK: [[TMP8:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 16
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <8 x half> [[TMP8]] to <16 x i8>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x i16>
|
||||
// CHECK: call void @llvm.arm.neon.vst3lane.p0i8.v8i16(i8* [[TMP3]], <8 x i16> [[TMP10]], <8 x i16> [[TMP11]], <8 x i16> [[TMP12]], i32 7, i32 2)
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x half>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x half>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x half>
|
||||
// CHECK: call void @llvm.arm.neon.vst3lane.p0i8.v8f16(i8* [[TMP3]], <8 x half> [[TMP10]], <8 x half> [[TMP11]], <8 x half> [[TMP12]], i32 7, i32 2)
|
||||
// CHECK: ret void
|
||||
void test_vst3q_lane_f16(float16_t * a, float16x8x3_t b) {
|
||||
vst3q_lane_f16(a, b, 7);
|
||||
|
@ -18211,10 +18205,10 @@ void test_vst3_lane_s32(int32_t * a, int32x2x3_t b) {
|
|||
// CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x half>], [3 x <4 x half>]* [[VAL3]], i32 0, i32 2
|
||||
// CHECK: [[TMP8:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX4]], align 8
|
||||
// CHECK: [[TMP9:%.*]] = bitcast <4 x half> [[TMP8]] to <8 x i8>
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x i16>
|
||||
// CHECK: call void @llvm.arm.neon.vst3lane.p0i8.v4i16(i8* [[TMP3]], <4 x i16> [[TMP10]], <4 x i16> [[TMP11]], <4 x i16> [[TMP12]], i32 3, i32 2)
|
||||
// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x half>
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x half>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x half>
|
||||
// CHECK: call void @llvm.arm.neon.vst3lane.p0i8.v4f16(i8* [[TMP3]], <4 x half> [[TMP10]], <4 x half> [[TMP11]], <4 x half> [[TMP12]], i32 3, i32 2)
|
||||
// CHECK: ret void
|
||||
void test_vst3_lane_f16(float16_t * a, float16x4x3_t b) {
|
||||
vst3_lane_f16(a, b, 3);
|
||||
|
@ -18530,11 +18524,11 @@ void test_vst4q_s32(int32_t * a, int32x4x4_t b) {
|
|||
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL5]], i32 0, i32 3
|
||||
// CHECK: [[TMP10:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX6]], align 16
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x half> [[TMP10]] to <16 x i8>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x i16>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
|
||||
// CHECK: call void @llvm.arm.neon.vst4.p0i8.v8i16(i8* [[TMP3]], <8 x i16> [[TMP12]], <8 x i16> [[TMP13]], <8 x i16> [[TMP14]], <8 x i16> [[TMP15]], i32 2)
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x half>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x half>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x half>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x half>
|
||||
// CHECK: call void @llvm.arm.neon.vst4.p0i8.v8f16(i8* [[TMP3]], <8 x half> [[TMP12]], <8 x half> [[TMP13]], <8 x half> [[TMP14]], <8 x half> [[TMP15]], i32 2)
|
||||
// CHECK: ret void
|
||||
void test_vst4q_f16(float16_t * a, float16x8x4_t b) {
|
||||
vst4q_f16(a, b);
|
||||
|
@ -18935,11 +18929,11 @@ void test_vst4_s64(int64_t * a, int64x1x4_t b) {
|
|||
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x half>], [4 x <4 x half>]* [[VAL5]], i32 0, i32 3
|
||||
// CHECK: [[TMP10:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX6]], align 8
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <4 x half> [[TMP10]] to <8 x i8>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x i16>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
|
||||
// CHECK: call void @llvm.arm.neon.vst4.p0i8.v4i16(i8* [[TMP3]], <4 x i16> [[TMP12]], <4 x i16> [[TMP13]], <4 x i16> [[TMP14]], <4 x i16> [[TMP15]], i32 2)
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x half>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x half>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x half>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x half>
|
||||
// CHECK: call void @llvm.arm.neon.vst4.p0i8.v4f16(i8* [[TMP3]], <4 x half> [[TMP12]], <4 x half> [[TMP13]], <4 x half> [[TMP14]], <4 x half> [[TMP15]], i32 2)
|
||||
// CHECK: ret void
|
||||
void test_vst4_f16(float16_t * a, float16x4x4_t b) {
|
||||
vst4_f16(a, b);
|
||||
|
@ -19214,11 +19208,11 @@ void test_vst4q_lane_s32(int32_t * a, int32x4x4_t b) {
|
|||
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL5]], i32 0, i32 3
|
||||
// CHECK: [[TMP10:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX6]], align 16
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <8 x half> [[TMP10]] to <16 x i8>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x i16>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
|
||||
// CHECK: call void @llvm.arm.neon.vst4lane.p0i8.v8i16(i8* [[TMP3]], <8 x i16> [[TMP12]], <8 x i16> [[TMP13]], <8 x i16> [[TMP14]], <8 x i16> [[TMP15]], i32 7, i32 2)
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x half>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x half>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x half>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x half>
|
||||
// CHECK: call void @llvm.arm.neon.vst4lane.p0i8.v8f16(i8* [[TMP3]], <8 x half> [[TMP12]], <8 x half> [[TMP13]], <8 x half> [[TMP14]], <8 x half> [[TMP15]], i32 7, i32 2)
|
||||
// CHECK: ret void
|
||||
void test_vst4q_lane_f16(float16_t * a, float16x8x4_t b) {
|
||||
vst4q_lane_f16(a, b, 7);
|
||||
|
@ -19520,11 +19514,11 @@ void test_vst4_lane_s32(int32_t * a, int32x2x4_t b) {
|
|||
// CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x half>], [4 x <4 x half>]* [[VAL5]], i32 0, i32 3
|
||||
// CHECK: [[TMP10:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX6]], align 8
|
||||
// CHECK: [[TMP11:%.*]] = bitcast <4 x half> [[TMP10]] to <8 x i8>
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x i16>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
|
||||
// CHECK: call void @llvm.arm.neon.vst4lane.p0i8.v4i16(i8* [[TMP3]], <4 x i16> [[TMP12]], <4 x i16> [[TMP13]], <4 x i16> [[TMP14]], <4 x i16> [[TMP15]], i32 3, i32 2)
|
||||
// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x half>
|
||||
// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x half>
|
||||
// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x half>
|
||||
// CHECK: [[TMP15:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x half>
|
||||
// CHECK: call void @llvm.arm.neon.vst4lane.p0i8.v4f16(i8* [[TMP3]], <4 x half> [[TMP12]], <4 x half> [[TMP13]], <4 x half> [[TMP14]], <4 x half> [[TMP15]], i32 3, i32 2)
|
||||
// CHECK: ret void
|
||||
void test_vst4_lane_f16(float16_t * a, float16x4x4_t b) {
|
||||
vst4_lane_f16(a, b, 3);
|
||||
|
|
|
@ -860,6 +860,10 @@ void Type::applyModifier(char Mod) {
|
|||
Float = true;
|
||||
ElementBitwidth = 64;
|
||||
break;
|
||||
case 'H':
|
||||
Float = true;
|
||||
ElementBitwidth = 16;
|
||||
break;
|
||||
case 'g':
|
||||
if (AppliedQuad)
|
||||
Bitwidth /= 2;
|
||||
|
@ -1006,7 +1010,7 @@ std::string Intrinsic::getInstTypeCode(Type T, ClassKind CK) const {
|
|||
}
|
||||
|
||||
static bool isFloatingPointProtoModifier(char Mod) {
|
||||
return Mod == 'F' || Mod == 'f';
|
||||
return Mod == 'F' || Mod == 'f' || Mod == 'H';
|
||||
}
|
||||
|
||||
std::string Intrinsic::getBuiltinTypeStr() {
|
||||
|
|
Loading…
Reference in New Issue