[NEON] Support vldNq intrinsics in AArch32 (Clang part)

This patch reworks the support for dup NEON intrinsics as
described in D48439.

Differential Revision: https://reviews.llvm.org/D48440

llvm-svn: 335734
This commit is contained in:
Ivan A. Kosarev 2018-06-27 13:58:43 +00:00
parent 7231598fce
commit a9f484ac4a
6 changed files with 1112 additions and 1611 deletions

View File

@ -364,9 +364,12 @@ def VST1_LANE : WInst<"vst1_lane", "vpdi",
def VLD2 : WInst<"vld2", "2c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
def VLD3 : WInst<"vld3", "3c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
def VLD4 : WInst<"vld4", "4c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
def VLD2_DUP : WInst<"vld2_dup", "2c", "UcUsUiUlcsilhfPcPs">;
def VLD3_DUP : WInst<"vld3_dup", "3c", "UcUsUiUlcsilhfPcPs">;
def VLD4_DUP : WInst<"vld4_dup", "4c", "UcUsUiUlcsilhfPcPs">;
def VLD2_DUP : WInst<"vld2_dup", "2c",
"UcUsUiUlcsilhfPcPsQcQfQhQiQlQsQPcQPsQUcQUiQUlQUs">;
def VLD3_DUP : WInst<"vld3_dup", "3c",
"UcUsUiUlcsilhfPcPsQcQfQhQiQlQsQPcQPsQUcQUiQUlQUs">;
def VLD4_DUP : WInst<"vld4_dup", "4c",
"UcUsUiUlcsilhfPcPsQcQfQhQiQlQsQPcQPsQUcQUiQUlQUs">;
def VLD2_LANE : WInst<"vld2_lane", "2c2i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
def VLD3_LANE : WInst<"vld3_lane", "3c3i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
def VLD4_LANE : WInst<"vld4_lane", "4c4i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
@ -601,12 +604,9 @@ def ST3_LANE : WInst<"vst3_lane", "vp3i", "lUlQcQUcQPcQlQUldQdPlQPl">;
def ST4_LANE : WInst<"vst4_lane", "vp4i", "lUlQcQUcQPcQlQUldQdPlQPl">;
def LD1_DUP : WInst<"vld1_dup", "dc", "dQdPlQPl">;
def LD2_DUP : WInst<"vld2_dup", "2c",
"QUcQUsQUiQUlQcQsQiQlQhQfQdQPcQPsQPldPl">;
def LD3_DUP : WInst<"vld3_dup", "3c",
"QUcQUsQUiQUlQcQsQiQlQhQfQdQPcQPsQPldPl">;
def LD4_DUP : WInst<"vld4_dup", "4c",
"QUcQUsQUiQUlQcQsQiQlQhQfQdQPcQPsQPldPl">;
def LD2_DUP : WInst<"vld2_dup", "2c", "dQdPlQPl">;
def LD3_DUP : WInst<"vld3_dup", "3c", "dQdPlQPl">;
def LD4_DUP : WInst<"vld4_dup", "4c", "dQdPlQPl">;
def VLDRQ : WInst<"vldrq", "sc", "Pk">;
def VSTRQ : WInst<"vstrq", "vps", "Pk">;

View File

@ -4081,16 +4081,22 @@ static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
NEONMAP1(vld2_v, arm_neon_vld2, 0),
NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
NEONMAP1(vld2q_v, arm_neon_vld2, 0),
NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
NEONMAP1(vld3_v, arm_neon_vld3, 0),
NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
NEONMAP1(vld3q_v, arm_neon_vld3, 0),
NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
NEONMAP1(vld4_v, arm_neon_vld4, 0),
NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
NEONMAP1(vld4q_v, arm_neon_vld4, 0),
NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
@ -4980,7 +4986,13 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v:
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v: {
case NEON::BI__builtin_neon_vld4q_v:
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld2q_dup_v:
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld3q_dup_v:
case NEON::BI__builtin_neon_vld4_dup_v:
case NEON::BI__builtin_neon_vld4q_dup_v: {
llvm::Type *Tys[] = {Ty, Int8PtrTy};
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
Value *Align = getAlignmentValue32(PtrOp1);
@ -5866,8 +5878,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vld4_lane_v:
case NEON::BI__builtin_neon_vld4q_lane_v:
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld2q_dup_v:
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld3q_dup_v:
case NEON::BI__builtin_neon_vld4_dup_v:
case NEON::BI__builtin_neon_vld4q_dup_v:
// Get the alignment for the argument in addition to the value;
// we'll use it later.
PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
@ -6044,68 +6059,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Value *Ld = Builder.CreateLoad(PtrOp0);
return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
}
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld4_dup_v: {
// Handle 64-bit elements as a special-case. There is no "dup" needed.
if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) {
switch (BuiltinID) {
case NEON::BI__builtin_neon_vld2_dup_v:
Int = Intrinsic::arm_neon_vld2;
break;
case NEON::BI__builtin_neon_vld3_dup_v:
Int = Intrinsic::arm_neon_vld3;
break;
case NEON::BI__builtin_neon_vld4_dup_v:
Int = Intrinsic::arm_neon_vld4;
break;
default: llvm_unreachable("unknown vld_dup intrinsic?");
}
llvm::Type *Tys[] = {Ty, Int8PtrTy};
Function *F = CGM.getIntrinsic(Int, Tys);
llvm::Value *Align = getAlignmentValue32(PtrOp1);
Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, "vld_dup");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
switch (BuiltinID) {
case NEON::BI__builtin_neon_vld2_dup_v:
Int = Intrinsic::arm_neon_vld2lane;
break;
case NEON::BI__builtin_neon_vld3_dup_v:
Int = Intrinsic::arm_neon_vld3lane;
break;
case NEON::BI__builtin_neon_vld4_dup_v:
Int = Intrinsic::arm_neon_vld4lane;
break;
default: llvm_unreachable("unknown vld_dup intrinsic?");
}
llvm::Type *Tys[] = {Ty, Int8PtrTy};
Function *F = CGM.getIntrinsic(Int, Tys);
llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
SmallVector<Value*, 6> Args;
Args.push_back(Ops[1]);
Args.append(STy->getNumElements(), UndefValue::get(Ty));
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Args.push_back(CI);
Args.push_back(getAlignmentValue32(PtrOp1));
Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
// splat lane 0 to all elts in each vector of the result.
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Value *Val = Builder.CreateExtractValue(Ops[1], i);
Value *Elt = Builder.CreateBitCast(Val, Ty);
Elt = EmitNeonSplat(Elt, CI);
Elt = Builder.CreateBitCast(Elt, Val->getType());
Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i);
}
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vqrshrn_n_v:
Int =
usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -4732,111 +4732,6 @@ poly16x4x2_t test_vld2_p16(poly16_t const * a) {
return vld2_p16(a);
}
// CHECK-LABEL: @test_vld2_dup_u8(
// CHECK: [[__RET:%.*]] = alloca %struct.uint8x8x2_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x8x2_t* [[__RET]] to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <8 x i8>, <8 x i8>
uint8x8x2_t test_vld2_dup_u8(uint8_t const * a) {
return vld2_dup_u8(a);
}
// CHECK-LABEL: @test_vld2_dup_u16(
// CHECK: [[__RET:%.*]] = alloca %struct.uint16x4x2_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint16x4x2_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>
uint16x4x2_t test_vld2_dup_u16(uint16_t const * a) {
return vld2_dup_u16(a);
}
// CHECK-LABEL: @test_vld2_dup_u32(
// CHECK: [[__RET:%.*]] = alloca %struct.uint32x2x2_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint32x2x2_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <2 x i32>, <2 x i32>
uint32x2x2_t test_vld2_dup_u32(uint32_t const * a) {
return vld2_dup_u32(a);
}
// CHECK-LABEL: @test_vld2_dup_u64(
// CHECK: [[__RET:%.*]] = alloca %struct.uint64x1x2_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint64x1x2_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i64* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <1 x i64>, <1 x i64>
uint64x1x2_t test_vld2_dup_u64(uint64_t const * a) {
return vld2_dup_u64(a);
}
// CHECK-LABEL: @test_vld2_dup_s8(
// CHECK: [[__RET:%.*]] = alloca %struct.int8x8x2_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.int8x8x2_t* [[__RET]] to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <8 x i8>, <8 x i8>
int8x8x2_t test_vld2_dup_s8(int8_t const * a) {
return vld2_dup_s8(a);
}
// CHECK-LABEL: @test_vld2_dup_s16(
// CHECK: [[__RET:%.*]] = alloca %struct.int16x4x2_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.int16x4x2_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>
int16x4x2_t test_vld2_dup_s16(int16_t const * a) {
return vld2_dup_s16(a);
}
// CHECK-LABEL: @test_vld2_dup_s32(
// CHECK: [[__RET:%.*]] = alloca %struct.int32x2x2_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.int32x2x2_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <2 x i32>, <2 x i32>
int32x2x2_t test_vld2_dup_s32(int32_t const * a) {
return vld2_dup_s32(a);
}
// CHECK-LABEL: @test_vld2_dup_s64(
// CHECK: [[__RET:%.*]] = alloca %struct.int64x1x2_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.int64x1x2_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i64* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <1 x i64>, <1 x i64>
int64x1x2_t test_vld2_dup_s64(int64_t const * a) {
return vld2_dup_s64(a);
}
// CHECK-LABEL: @test_vld2_dup_f16(
// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x2_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x2_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>
float16x4x2_t test_vld2_dup_f16(float16_t const * a) {
return vld2_dup_f16(a);
}
// CHECK-LABEL: @test_vld2_dup_f32(
// CHECK: [[__RET:%.*]] = alloca %struct.float32x2x2_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.float32x2x2_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast float* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <2 x float>, <2 x float>
float32x2x2_t test_vld2_dup_f32(float32_t const * a) {
return vld2_dup_f32(a);
}
// CHECK-LABEL: @test_vld2_dup_p8(
// CHECK: [[__RET:%.*]] = alloca %struct.poly8x8x2_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x8x2_t* [[__RET]] to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <8 x i8>, <8 x i8>
poly8x8x2_t test_vld2_dup_p8(poly8_t const * a) {
return vld2_dup_p8(a);
}
// CHECK-LABEL: @test_vld2_dup_p16(
// CHECK: [[__RET:%.*]] = alloca %struct.poly16x4x2_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly16x4x2_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>
poly16x4x2_t test_vld2_dup_p16(poly16_t const * a) {
return vld2_dup_p16(a);
}
// CHECK-LABEL: @test_vld2q_lane_u16(
// CHECK: [[B:%.*]] = alloca %struct.uint16x8x2_t, align 16
// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x2_t, align 16
@ -5473,111 +5368,6 @@ poly16x4x3_t test_vld3_p16(poly16_t const * a) {
return vld3_p16(a);
}
// CHECK-LABEL: @test_vld3_dup_u8(
// CHECK: [[__RET:%.*]] = alloca %struct.uint8x8x3_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x8x3_t* [[__RET]] to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>
uint8x8x3_t test_vld3_dup_u8(uint8_t const * a) {
return vld3_dup_u8(a);
}
// CHECK-LABEL: @test_vld3_dup_u16(
// CHECK: [[__RET:%.*]] = alloca %struct.uint16x4x3_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint16x4x3_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>
uint16x4x3_t test_vld3_dup_u16(uint16_t const * a) {
return vld3_dup_u16(a);
}
// CHECK-LABEL: @test_vld3_dup_u32(
// CHECK: [[__RET:%.*]] = alloca %struct.uint32x2x3_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint32x2x3_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32>
uint32x2x3_t test_vld3_dup_u32(uint32_t const * a) {
return vld3_dup_u32(a);
}
// CHECK-LABEL: @test_vld3_dup_u64(
// CHECK: [[__RET:%.*]] = alloca %struct.uint64x1x3_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint64x1x3_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i64* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64>
uint64x1x3_t test_vld3_dup_u64(uint64_t const * a) {
return vld3_dup_u64(a);
}
// CHECK-LABEL: @test_vld3_dup_s8(
// CHECK: [[__RET:%.*]] = alloca %struct.int8x8x3_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.int8x8x3_t* [[__RET]] to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>
int8x8x3_t test_vld3_dup_s8(int8_t const * a) {
return vld3_dup_s8(a);
}
// CHECK-LABEL: @test_vld3_dup_s16(
// CHECK: [[__RET:%.*]] = alloca %struct.int16x4x3_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.int16x4x3_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>
int16x4x3_t test_vld3_dup_s16(int16_t const * a) {
return vld3_dup_s16(a);
}
// CHECK-LABEL: @test_vld3_dup_s32(
// CHECK: [[__RET:%.*]] = alloca %struct.int32x2x3_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.int32x2x3_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32>
int32x2x3_t test_vld3_dup_s32(int32_t const * a) {
return vld3_dup_s32(a);
}
// CHECK-LABEL: @test_vld3_dup_s64(
// CHECK: [[__RET:%.*]] = alloca %struct.int64x1x3_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.int64x1x3_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i64* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64>
int64x1x3_t test_vld3_dup_s64(int64_t const * a) {
return vld3_dup_s64(a);
}
// CHECK-LABEL: @test_vld3_dup_f16(
// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x3_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x3_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>
float16x4x3_t test_vld3_dup_f16(float16_t const * a) {
return vld3_dup_f16(a);
}
// CHECK-LABEL: @test_vld3_dup_f32(
// CHECK: [[__RET:%.*]] = alloca %struct.float32x2x3_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.float32x2x3_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast float* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <2 x float>, <2 x float>, <2 x float>
float32x2x3_t test_vld3_dup_f32(float32_t const * a) {
return vld3_dup_f32(a);
}
// CHECK-LABEL: @test_vld3_dup_p8(
// CHECK: [[__RET:%.*]] = alloca %struct.poly8x8x3_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x8x3_t* [[__RET]] to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>
poly8x8x3_t test_vld3_dup_p8(poly8_t const * a) {
return vld3_dup_p8(a);
}
// CHECK-LABEL: @test_vld3_dup_p16(
// CHECK: [[__RET:%.*]] = alloca %struct.poly16x4x3_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly16x4x3_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>
poly16x4x3_t test_vld3_dup_p16(poly16_t const * a) {
return vld3_dup_p16(a);
}
// CHECK-LABEL: @test_vld3q_lane_u16(
// CHECK: [[B:%.*]] = alloca %struct.uint16x8x3_t, align 16
// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x3_t, align 16
@ -6293,111 +6083,6 @@ poly16x4x4_t test_vld4_p16(poly16_t const * a) {
return vld4_p16(a);
}
// CHECK-LABEL: @test_vld4_dup_u8(
// CHECK: [[__RET:%.*]] = alloca %struct.uint8x8x4_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x8x4_t* [[__RET]] to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>
uint8x8x4_t test_vld4_dup_u8(uint8_t const * a) {
return vld4_dup_u8(a);
}
// CHECK-LABEL: @test_vld4_dup_u16(
// CHECK: [[__RET:%.*]] = alloca %struct.uint16x4x4_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint16x4x4_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>
uint16x4x4_t test_vld4_dup_u16(uint16_t const * a) {
return vld4_dup_u16(a);
}
// CHECK-LABEL: @test_vld4_dup_u32(
// CHECK: [[__RET:%.*]] = alloca %struct.uint32x2x4_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint32x2x4_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>
uint32x2x4_t test_vld4_dup_u32(uint32_t const * a) {
return vld4_dup_u32(a);
}
// CHECK-LABEL: @test_vld4_dup_u64(
// CHECK: [[__RET:%.*]] = alloca %struct.uint64x1x4_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint64x1x4_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i64* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>
uint64x1x4_t test_vld4_dup_u64(uint64_t const * a) {
return vld4_dup_u64(a);
}
// CHECK-LABEL: @test_vld4_dup_s8(
// CHECK: [[__RET:%.*]] = alloca %struct.int8x8x4_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.int8x8x4_t* [[__RET]] to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>
int8x8x4_t test_vld4_dup_s8(int8_t const * a) {
return vld4_dup_s8(a);
}
// CHECK-LABEL: @test_vld4_dup_s16(
// CHECK: [[__RET:%.*]] = alloca %struct.int16x4x4_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.int16x4x4_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>
int16x4x4_t test_vld4_dup_s16(int16_t const * a) {
return vld4_dup_s16(a);
}
// CHECK-LABEL: @test_vld4_dup_s32(
// CHECK: [[__RET:%.*]] = alloca %struct.int32x2x4_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.int32x2x4_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i32* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>
int32x2x4_t test_vld4_dup_s32(int32_t const * a) {
return vld4_dup_s32(a);
}
// CHECK-LABEL: @test_vld4_dup_s64(
// CHECK: [[__RET:%.*]] = alloca %struct.int64x1x4_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.int64x1x4_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i64* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>
int64x1x4_t test_vld4_dup_s64(int64_t const * a) {
return vld4_dup_s64(a);
}
// CHECK-LABEL: @test_vld4_dup_f16(
// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x4_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x4_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>
float16x4x4_t test_vld4_dup_f16(float16_t const * a) {
return vld4_dup_f16(a);
}
// CHECK-LABEL: @test_vld4_dup_f32(
// CHECK: [[__RET:%.*]] = alloca %struct.float32x2x4_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.float32x2x4_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast float* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <2 x float>, <2 x float>, <2 x float>, <2 x float>
float32x2x4_t test_vld4_dup_f32(float32_t const * a) {
return vld4_dup_f32(a);
}
// CHECK-LABEL: @test_vld4_dup_p8(
// CHECK: [[__RET:%.*]] = alloca %struct.poly8x8x4_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x8x4_t* [[__RET]] to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>
poly8x8x4_t test_vld4_dup_p8(poly8_t const * a) {
return vld4_dup_p8(a);
}
// CHECK-LABEL: @test_vld4_dup_p16(
// CHECK: [[__RET:%.*]] = alloca %struct.poly16x4x4_t, align 8
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly16x4x4_t* [[__RET]] to i8*
// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>
poly16x4x4_t test_vld4_dup_p16(poly16_t const * a) {
return vld4_dup_p16(a);
}
// CHECK-LABEL: @test_vld4q_lane_u16(
// CHECK: [[B:%.*]] = alloca %struct.uint16x8x4_t, align 16
// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x4_t, align 16

View File

@ -1,50 +0,0 @@
// REQUIRES: arm-registered-target
// RUN: %clang_cc1 -triple armv7a-linux-gnueabi \
// RUN: -target-cpu cortex-a8 \
// RUN: -emit-llvm -o - %s | FileCheck %s
#include <arm_neon.h>
int main(){
int32_t v0[3];
int32x2x3_t v1;
int32_t v2[4];
int32x2x4_t v3;
int64x1x3_t v4;
int64x1x4_t v5;
int64_t v6[3];
int64_t v7[4];
v1 = vld3_dup_s32(v0);
// CHECK: [[T168:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld3lane.v2i32.p0i8(i8* {{.*}}, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 {{[0-9]+}}, i32 {{[0-9]+}})
// CHECK-NEXT: [[T169:%.*]] = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } [[T168]], 0
// CHECK-NEXT: [[T170:%.*]] = shufflevector <2 x i32> [[T169]], <2 x i32> [[T169]], <2 x i32> zeroinitializer
// CHECK-NEXT: [[T171:%.*]] = insertvalue { <2 x i32>, <2 x i32>, <2 x i32> } [[T168]], <2 x i32> [[T170]], 0
// CHECK-NEXT: [[T172:%.*]] = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } [[T171]], 1
// CHECK-NEXT: [[T173:%.*]] = shufflevector <2 x i32> [[T172]], <2 x i32> [[T172]], <2 x i32> zeroinitializer
// CHECK-NEXT: [[T174:%.*]] = insertvalue { <2 x i32>, <2 x i32>, <2 x i32> } [[T171]], <2 x i32> [[T173]], 1
// CHECK-NEXT: [[T175:%.*]] = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } [[T174]], 2
// CHECK-NEXT: [[T176:%.*]] = shufflevector <2 x i32> [[T175]], <2 x i32> [[T175]], <2 x i32> zeroinitializer
// CHECK-NEXT: [[T177:%.*]] = insertvalue { <2 x i32>, <2 x i32>, <2 x i32> } [[T174]], <2 x i32> [[T176]], 2
v3 = vld4_dup_s32(v2);
// CHECK: [[T178:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4lane.v2i32.p0i8(i8* {{.*}}, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 {{[0-9]+}}, i32 {{[0-9]+}})
// CHECK-NEXT: [[T179:%.*]] = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } [[T178]], 0
// CHECK-NEXT: [[T180:%.*]] = shufflevector <2 x i32> [[T179]], <2 x i32> [[T179]], <2 x i32> zeroinitializer
// CHECK-NEXT: [[T181:%.*]] = insertvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } [[T178]], <2 x i32> [[T180]], 0
// CHECK-NEXT: [[T182:%.*]] = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } [[T181]], 1
// CHECK-NEXT: [[T183:%.*]] = shufflevector <2 x i32> [[T182]], <2 x i32> [[T182]], <2 x i32> zeroinitializer
// CHECK-NEXT: [[T184:%.*]] = insertvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } [[T181]], <2 x i32> [[T183]], 1
// CHECK-NEXT: [[T185:%.*]] = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } [[T184]], 2
// CHECK-NEXT: [[T186:%.*]] = shufflevector <2 x i32> [[T185]], <2 x i32> [[T185]], <2 x i32> zeroinitializer
// CHECK-NEXT: [[T187:%.*]] = insertvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } [[T184]], <2 x i32> [[T186]], 2
// CHECK-NEXT: [[T188:%.*]] = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } [[T187]], 3
// CHECK-NEXT: [[T189:%.*]] = shufflevector <2 x i32> [[T188]], <2 x i32> [[T188]], <2 x i32> zeroinitializer
// CHECK-NEXT: [[T190:%.*]] = insertvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } [[T187]], <2 x i32> [[T189]], 3
v4 = vld3_dup_s64(v6);
// CHECK: {{%.*}} = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld3.v1i64.p0i8(i8* {{.*}}, i32 {{[0-9]+}})
v5 = vld4_dup_s64(v7);
// CHECK: {{%.*}} = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld4.v1i64.p0i8(i8* {{.*}}, i32 {{[0-9]+}})
return 0;
}