2014-04-04 00:01:44 +08:00
|
|
|
; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
define <8 x i8> @vmuli8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmuli8:
|
2009-10-08 07:47:21 +08:00
|
|
|
;CHECK: vmul.i8
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <8 x i8>, <8 x i8>* %A
|
|
|
|
%tmp2 = load <8 x i8>, <8 x i8>* %B
|
2009-06-23 07:27:02 +08:00
|
|
|
%tmp3 = mul <8 x i8> %tmp1, %tmp2
|
|
|
|
ret <8 x i8> %tmp3
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i16> @vmuli16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmuli16:
|
2009-10-08 07:47:21 +08:00
|
|
|
;CHECK: vmul.i16
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <4 x i16>, <4 x i16>* %A
|
|
|
|
%tmp2 = load <4 x i16>, <4 x i16>* %B
|
2009-06-23 07:27:02 +08:00
|
|
|
%tmp3 = mul <4 x i16> %tmp1, %tmp2
|
|
|
|
ret <4 x i16> %tmp3
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i32> @vmuli32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmuli32:
|
2009-10-08 07:47:21 +08:00
|
|
|
;CHECK: vmul.i32
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <2 x i32>, <2 x i32>* %A
|
|
|
|
%tmp2 = load <2 x i32>, <2 x i32>* %B
|
2009-06-23 07:27:02 +08:00
|
|
|
%tmp3 = mul <2 x i32> %tmp1, %tmp2
|
|
|
|
ret <2 x i32> %tmp3
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x float> @vmulf32(<2 x float>* %A, <2 x float>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmulf32:
|
2009-10-08 07:47:21 +08:00
|
|
|
;CHECK: vmul.f32
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <2 x float>, <2 x float>* %A
|
|
|
|
%tmp2 = load <2 x float>, <2 x float>* %B
|
2010-05-04 06:36:46 +08:00
|
|
|
%tmp3 = fmul <2 x float> %tmp1, %tmp2
|
2009-06-23 07:27:02 +08:00
|
|
|
ret <2 x float> %tmp3
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i8> @vmulp8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmulp8:
|
2009-10-08 07:47:21 +08:00
|
|
|
;CHECK: vmul.p8
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <8 x i8>, <8 x i8>* %A
|
|
|
|
%tmp2 = load <8 x i8>, <8 x i8>* %B
|
2009-06-23 07:27:02 +08:00
|
|
|
%tmp3 = call <8 x i8> @llvm.arm.neon.vmulp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
|
|
|
|
ret <8 x i8> %tmp3
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i8> @vmulQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmulQi8:
|
2009-10-08 07:47:21 +08:00
|
|
|
;CHECK: vmul.i8
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <16 x i8>, <16 x i8>* %A
|
|
|
|
%tmp2 = load <16 x i8>, <16 x i8>* %B
|
2009-06-23 07:27:02 +08:00
|
|
|
%tmp3 = mul <16 x i8> %tmp1, %tmp2
|
|
|
|
ret <16 x i8> %tmp3
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i16> @vmulQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmulQi16:
|
2009-10-08 07:47:21 +08:00
|
|
|
;CHECK: vmul.i16
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <8 x i16>, <8 x i16>* %A
|
|
|
|
%tmp2 = load <8 x i16>, <8 x i16>* %B
|
2009-06-23 07:27:02 +08:00
|
|
|
%tmp3 = mul <8 x i16> %tmp1, %tmp2
|
|
|
|
ret <8 x i16> %tmp3
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i32> @vmulQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmulQi32:
|
2009-10-08 07:47:21 +08:00
|
|
|
;CHECK: vmul.i32
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <4 x i32>, <4 x i32>* %A
|
|
|
|
%tmp2 = load <4 x i32>, <4 x i32>* %B
|
2009-06-23 07:27:02 +08:00
|
|
|
%tmp3 = mul <4 x i32> %tmp1, %tmp2
|
|
|
|
ret <4 x i32> %tmp3
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @vmulQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmulQf32:
|
2009-10-08 07:47:21 +08:00
|
|
|
;CHECK: vmul.f32
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <4 x float>, <4 x float>* %A
|
|
|
|
%tmp2 = load <4 x float>, <4 x float>* %B
|
2010-05-04 06:36:46 +08:00
|
|
|
%tmp3 = fmul <4 x float> %tmp1, %tmp2
|
2009-06-23 07:27:02 +08:00
|
|
|
ret <4 x float> %tmp3
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i8> @vmulQp8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmulQp8:
|
2009-10-08 07:47:21 +08:00
|
|
|
;CHECK: vmul.p8
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <16 x i8>, <16 x i8>* %A
|
|
|
|
%tmp2 = load <16 x i8>, <16 x i8>* %B
|
2009-06-23 07:27:02 +08:00
|
|
|
%tmp3 = call <16 x i8> @llvm.arm.neon.vmulp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
|
|
|
|
ret <16 x i8> %tmp3
|
|
|
|
}
|
|
|
|
|
|
|
|
declare <8 x i8> @llvm.arm.neon.vmulp.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
|
|
|
|
declare <16 x i8> @llvm.arm.neon.vmulp.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
|
2009-10-10 04:20:54 +08:00
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <2 x float> @test_vmul_lanef32(<2 x float> %arg0_float32x2_t, <2 x float> %arg1_float32x2_t) nounwind readnone {
|
|
|
|
entry:
|
2013-07-14 04:38:47 +08:00
|
|
|
; CHECK-LABEL: test_vmul_lanef32:
|
2009-10-10 04:20:54 +08:00
|
|
|
; CHECK: vmul.f32 d0, d0, d1[0]
|
|
|
|
%0 = shufflevector <2 x float> %arg1_float32x2_t, <2 x float> undef, <2 x i32> zeroinitializer ; <<2 x float>> [#uses=1]
|
|
|
|
%1 = fmul <2 x float> %0, %arg0_float32x2_t ; <<2 x float>> [#uses=1]
|
|
|
|
ret <2 x float> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x i16> @test_vmul_lanes16(<4 x i16> %arg0_int16x4_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
|
|
|
|
entry:
|
2013-07-14 04:38:47 +08:00
|
|
|
; CHECK-LABEL: test_vmul_lanes16:
|
2009-10-10 04:20:54 +08:00
|
|
|
; CHECK: vmul.i16 d0, d0, d1[1]
|
|
|
|
%0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses$
|
|
|
|
%1 = mul <4 x i16> %0, %arg0_int16x4_t ; <<4 x i16>> [#uses=1]
|
|
|
|
ret <4 x i16> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <2 x i32> @test_vmul_lanes32(<2 x i32> %arg0_int32x2_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
|
|
|
|
entry:
|
2013-07-14 04:38:47 +08:00
|
|
|
; CHECK-LABEL: test_vmul_lanes32:
|
2009-10-10 04:20:54 +08:00
|
|
|
; CHECK: vmul.i32 d0, d0, d1[1]
|
|
|
|
%0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
|
|
|
|
%1 = mul <2 x i32> %0, %arg0_int32x2_t ; <<2 x i32>> [#uses=1]
|
|
|
|
ret <2 x i32> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @test_vmulQ_lanef32(<4 x float> %arg0_float32x4_t, <2 x float> %arg1_float32x2_t) nounwind readnone {
|
|
|
|
entry:
|
2013-07-14 04:38:47 +08:00
|
|
|
; CHECK-LABEL: test_vmulQ_lanef32:
|
2009-10-10 04:20:54 +08:00
|
|
|
; CHECK: vmul.f32 q0, q0, d2[1]
|
|
|
|
%0 = shufflevector <2 x float> %arg1_float32x2_t, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x float>$
|
|
|
|
%1 = fmul <4 x float> %0, %arg0_float32x4_t ; <<4 x float>> [#uses=1]
|
|
|
|
ret <4 x float> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <8 x i16> @test_vmulQ_lanes16(<8 x i16> %arg0_int16x8_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
|
|
|
|
entry:
|
2013-07-14 04:38:47 +08:00
|
|
|
; CHECK-LABEL: test_vmulQ_lanes16:
|
2009-10-10 04:20:54 +08:00
|
|
|
; CHECK: vmul.i16 q0, q0, d2[1]
|
|
|
|
%0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
|
|
|
|
%1 = mul <8 x i16> %0, %arg0_int16x8_t ; <<8 x i16>> [#uses=1]
|
|
|
|
ret <8 x i16> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x i32> @test_vmulQ_lanes32(<4 x i32> %arg0_int32x4_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
|
|
|
|
entry:
|
2013-07-14 04:38:47 +08:00
|
|
|
; CHECK-LABEL: test_vmulQ_lanes32:
|
2009-10-10 04:20:54 +08:00
|
|
|
; CHECK: vmul.i32 q0, q0, d2[1]
|
|
|
|
%0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i32>> [#uses$
|
|
|
|
%1 = mul <4 x i32> %0, %arg0_int32x4_t ; <<4 x i32>> [#uses=1]
|
|
|
|
ret <4 x i32> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i16> @vmulls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmulls8:
|
2009-10-10 04:20:54 +08:00
|
|
|
;CHECK: vmull.s8
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <8 x i8>, <8 x i8>* %A
|
|
|
|
%tmp2 = load <8 x i8>, <8 x i8>* %B
|
2010-09-02 07:50:19 +08:00
|
|
|
%tmp3 = sext <8 x i8> %tmp1 to <8 x i16>
|
|
|
|
%tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
|
|
|
|
%tmp5 = mul <8 x i16> %tmp3, %tmp4
|
|
|
|
ret <8 x i16> %tmp5
|
2009-10-10 04:20:54 +08:00
|
|
|
}
|
|
|
|
|
2011-03-30 07:06:19 +08:00
|
|
|
define <8 x i16> @vmulls8_int(<8 x i8>* %A, <8 x i8>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmulls8_int:
|
2011-03-30 07:06:19 +08:00
|
|
|
;CHECK: vmull.s8
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <8 x i8>, <8 x i8>* %A
|
|
|
|
%tmp2 = load <8 x i8>, <8 x i8>* %B
|
2011-03-30 07:06:19 +08:00
|
|
|
%tmp3 = call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
|
|
|
|
ret <8 x i16> %tmp3
|
|
|
|
}
|
|
|
|
|
2009-10-10 04:20:54 +08:00
|
|
|
define <4 x i32> @vmulls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmulls16:
|
2009-10-10 04:20:54 +08:00
|
|
|
;CHECK: vmull.s16
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <4 x i16>, <4 x i16>* %A
|
|
|
|
%tmp2 = load <4 x i16>, <4 x i16>* %B
|
2010-09-02 07:50:19 +08:00
|
|
|
%tmp3 = sext <4 x i16> %tmp1 to <4 x i32>
|
|
|
|
%tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
|
|
|
|
%tmp5 = mul <4 x i32> %tmp3, %tmp4
|
|
|
|
ret <4 x i32> %tmp5
|
2009-10-10 04:20:54 +08:00
|
|
|
}
|
|
|
|
|
2011-03-30 07:06:19 +08:00
|
|
|
define <4 x i32> @vmulls16_int(<4 x i16>* %A, <4 x i16>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmulls16_int:
|
2011-03-30 07:06:19 +08:00
|
|
|
;CHECK: vmull.s16
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <4 x i16>, <4 x i16>* %A
|
|
|
|
%tmp2 = load <4 x i16>, <4 x i16>* %B
|
2011-03-30 07:06:19 +08:00
|
|
|
%tmp3 = call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
|
|
|
|
ret <4 x i32> %tmp3
|
|
|
|
}
|
|
|
|
|
2009-10-10 04:20:54 +08:00
|
|
|
define <2 x i64> @vmulls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmulls32:
|
2009-10-10 04:20:54 +08:00
|
|
|
;CHECK: vmull.s32
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <2 x i32>, <2 x i32>* %A
|
|
|
|
%tmp2 = load <2 x i32>, <2 x i32>* %B
|
2010-09-02 07:50:19 +08:00
|
|
|
%tmp3 = sext <2 x i32> %tmp1 to <2 x i64>
|
|
|
|
%tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
|
|
|
|
%tmp5 = mul <2 x i64> %tmp3, %tmp4
|
|
|
|
ret <2 x i64> %tmp5
|
2009-10-10 04:20:54 +08:00
|
|
|
}
|
|
|
|
|
2011-03-30 07:06:19 +08:00
|
|
|
define <2 x i64> @vmulls32_int(<2 x i32>* %A, <2 x i32>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmulls32_int:
|
2011-03-30 07:06:19 +08:00
|
|
|
;CHECK: vmull.s32
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <2 x i32>, <2 x i32>* %A
|
|
|
|
%tmp2 = load <2 x i32>, <2 x i32>* %B
|
2011-03-30 07:06:19 +08:00
|
|
|
%tmp3 = call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
|
|
|
|
ret <2 x i64> %tmp3
|
|
|
|
}
|
|
|
|
|
2009-10-10 04:20:54 +08:00
|
|
|
define <8 x i16> @vmullu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmullu8:
|
2009-10-10 04:20:54 +08:00
|
|
|
;CHECK: vmull.u8
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <8 x i8>, <8 x i8>* %A
|
|
|
|
%tmp2 = load <8 x i8>, <8 x i8>* %B
|
2010-09-02 07:50:19 +08:00
|
|
|
%tmp3 = zext <8 x i8> %tmp1 to <8 x i16>
|
|
|
|
%tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
|
|
|
|
%tmp5 = mul <8 x i16> %tmp3, %tmp4
|
|
|
|
ret <8 x i16> %tmp5
|
2009-10-10 04:20:54 +08:00
|
|
|
}
|
|
|
|
|
2011-03-30 07:06:19 +08:00
|
|
|
define <8 x i16> @vmullu8_int(<8 x i8>* %A, <8 x i8>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmullu8_int:
|
2011-03-30 07:06:19 +08:00
|
|
|
;CHECK: vmull.u8
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <8 x i8>, <8 x i8>* %A
|
|
|
|
%tmp2 = load <8 x i8>, <8 x i8>* %B
|
2011-03-30 07:06:19 +08:00
|
|
|
%tmp3 = call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
|
|
|
|
ret <8 x i16> %tmp3
|
|
|
|
}
|
|
|
|
|
2009-10-10 04:20:54 +08:00
|
|
|
define <4 x i32> @vmullu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmullu16:
|
2009-10-10 04:20:54 +08:00
|
|
|
;CHECK: vmull.u16
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <4 x i16>, <4 x i16>* %A
|
|
|
|
%tmp2 = load <4 x i16>, <4 x i16>* %B
|
2010-09-02 07:50:19 +08:00
|
|
|
%tmp3 = zext <4 x i16> %tmp1 to <4 x i32>
|
|
|
|
%tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
|
|
|
|
%tmp5 = mul <4 x i32> %tmp3, %tmp4
|
|
|
|
ret <4 x i32> %tmp5
|
2009-10-10 04:20:54 +08:00
|
|
|
}
|
|
|
|
|
2011-03-30 07:06:19 +08:00
|
|
|
define <4 x i32> @vmullu16_int(<4 x i16>* %A, <4 x i16>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmullu16_int:
|
2011-03-30 07:06:19 +08:00
|
|
|
;CHECK: vmull.u16
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <4 x i16>, <4 x i16>* %A
|
|
|
|
%tmp2 = load <4 x i16>, <4 x i16>* %B
|
2011-03-30 07:06:19 +08:00
|
|
|
%tmp3 = call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
|
|
|
|
ret <4 x i32> %tmp3
|
|
|
|
}
|
|
|
|
|
2009-10-10 04:20:54 +08:00
|
|
|
define <2 x i64> @vmullu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmullu32:
|
2009-10-10 04:20:54 +08:00
|
|
|
;CHECK: vmull.u32
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <2 x i32>, <2 x i32>* %A
|
|
|
|
%tmp2 = load <2 x i32>, <2 x i32>* %B
|
2010-09-02 07:50:19 +08:00
|
|
|
%tmp3 = zext <2 x i32> %tmp1 to <2 x i64>
|
|
|
|
%tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
|
|
|
|
%tmp5 = mul <2 x i64> %tmp3, %tmp4
|
|
|
|
ret <2 x i64> %tmp5
|
2009-10-10 04:20:54 +08:00
|
|
|
}
|
|
|
|
|
2011-03-30 07:06:19 +08:00
|
|
|
define <2 x i64> @vmullu32_int(<2 x i32>* %A, <2 x i32>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmullu32_int:
|
2011-03-30 07:06:19 +08:00
|
|
|
;CHECK: vmull.u32
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <2 x i32>, <2 x i32>* %A
|
|
|
|
%tmp2 = load <2 x i32>, <2 x i32>* %B
|
2011-03-30 07:06:19 +08:00
|
|
|
%tmp3 = call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
|
|
|
|
ret <2 x i64> %tmp3
|
|
|
|
}
|
|
|
|
|
2009-10-10 04:20:54 +08:00
|
|
|
define <8 x i16> @vmullp8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
|
2013-07-14 14:24:09 +08:00
|
|
|
;CHECK-LABEL: vmullp8:
|
2009-10-10 04:20:54 +08:00
|
|
|
;CHECK: vmull.p8
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp1 = load <8 x i8>, <8 x i8>* %A
|
|
|
|
%tmp2 = load <8 x i8>, <8 x i8>* %B
|
2009-10-10 04:20:54 +08:00
|
|
|
%tmp3 = call <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
|
|
|
|
ret <8 x i16> %tmp3
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x i32> @test_vmull_lanes16(<4 x i16> %arg0_int16x4_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
; CHECK: test_vmull_lanes16
|
|
|
|
; CHECK: vmull.s16 q0, d0, d1[1]
|
|
|
|
%0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
|
2010-09-02 07:50:19 +08:00
|
|
|
%1 = sext <4 x i16> %arg0_int16x4_t to <4 x i32>
|
|
|
|
%2 = sext <4 x i16> %0 to <4 x i32>
|
|
|
|
%3 = mul <4 x i32> %1, %2
|
|
|
|
ret <4 x i32> %3
|
2009-10-10 04:20:54 +08:00
|
|
|
}
|
|
|
|
|
2011-03-30 07:06:19 +08:00
|
|
|
define arm_aapcs_vfpcc <4 x i32> @test_vmull_lanes16_int(<4 x i16> %arg0_int16x4_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
; CHECK: test_vmull_lanes16_int
|
|
|
|
; CHECK: vmull.s16 q0, d0, d1[1]
|
|
|
|
%0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
|
|
|
|
%1 = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %arg0_int16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
|
|
|
|
ret <4 x i32> %1
|
|
|
|
}
|
|
|
|
|
2009-10-10 04:20:54 +08:00
|
|
|
define arm_aapcs_vfpcc <2 x i64> @test_vmull_lanes32(<2 x i32> %arg0_int32x2_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
; CHECK: test_vmull_lanes32
|
|
|
|
; CHECK: vmull.s32 q0, d0, d1[1]
|
|
|
|
%0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
|
2010-09-02 07:50:19 +08:00
|
|
|
%1 = sext <2 x i32> %arg0_int32x2_t to <2 x i64>
|
|
|
|
%2 = sext <2 x i32> %0 to <2 x i64>
|
|
|
|
%3 = mul <2 x i64> %1, %2
|
|
|
|
ret <2 x i64> %3
|
2009-10-10 04:20:54 +08:00
|
|
|
}
|
|
|
|
|
2011-03-30 07:06:19 +08:00
|
|
|
define arm_aapcs_vfpcc <2 x i64> @test_vmull_lanes32_int(<2 x i32> %arg0_int32x2_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
; CHECK: test_vmull_lanes32_int
|
|
|
|
; CHECK: vmull.s32 q0, d0, d1[1]
|
|
|
|
%0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
|
|
|
|
%1 = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %arg0_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
|
|
|
|
ret <2 x i64> %1
|
|
|
|
}
|
|
|
|
|
2009-10-10 04:20:54 +08:00
|
|
|
define arm_aapcs_vfpcc <4 x i32> @test_vmull_laneu16(<4 x i16> %arg0_uint16x4_t, <4 x i16> %arg1_uint16x4_t) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
; CHECK: test_vmull_laneu16
|
|
|
|
; CHECK: vmull.u16 q0, d0, d1[1]
|
|
|
|
%0 = shufflevector <4 x i16> %arg1_uint16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
|
2010-09-02 07:50:19 +08:00
|
|
|
%1 = zext <4 x i16> %arg0_uint16x4_t to <4 x i32>
|
|
|
|
%2 = zext <4 x i16> %0 to <4 x i32>
|
|
|
|
%3 = mul <4 x i32> %1, %2
|
|
|
|
ret <4 x i32> %3
|
2009-10-10 04:20:54 +08:00
|
|
|
}
|
|
|
|
|
2011-03-30 07:06:19 +08:00
|
|
|
define arm_aapcs_vfpcc <4 x i32> @test_vmull_laneu16_int(<4 x i16> %arg0_uint16x4_t, <4 x i16> %arg1_uint16x4_t) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
; CHECK: test_vmull_laneu16_int
|
|
|
|
; CHECK: vmull.u16 q0, d0, d1[1]
|
|
|
|
%0 = shufflevector <4 x i16> %arg1_uint16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
|
|
|
|
%1 = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %arg0_uint16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
|
|
|
|
ret <4 x i32> %1
|
|
|
|
}
|
|
|
|
|
2009-10-10 04:20:54 +08:00
|
|
|
define arm_aapcs_vfpcc <2 x i64> @test_vmull_laneu32(<2 x i32> %arg0_uint32x2_t, <2 x i32> %arg1_uint32x2_t) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
; CHECK: test_vmull_laneu32
|
|
|
|
; CHECK: vmull.u32 q0, d0, d1[1]
|
|
|
|
%0 = shufflevector <2 x i32> %arg1_uint32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
|
2010-09-02 07:50:19 +08:00
|
|
|
%1 = zext <2 x i32> %arg0_uint32x2_t to <2 x i64>
|
|
|
|
%2 = zext <2 x i32> %0 to <2 x i64>
|
|
|
|
%3 = mul <2 x i64> %1, %2
|
|
|
|
ret <2 x i64> %3
|
2009-10-10 04:20:54 +08:00
|
|
|
}
|
|
|
|
|
2011-03-30 07:06:19 +08:00
|
|
|
define arm_aapcs_vfpcc <2 x i64> @test_vmull_laneu32_int(<2 x i32> %arg0_uint32x2_t, <2 x i32> %arg1_uint32x2_t) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
; CHECK: test_vmull_laneu32_int
|
|
|
|
; CHECK: vmull.u32 q0, d0, d1[1]
|
|
|
|
%0 = shufflevector <2 x i32> %arg1_uint32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
|
|
|
|
%1 = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %arg0_uint32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
|
|
|
|
ret <2 x i64> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
declare <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
|
|
|
|
declare <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
|
|
|
|
declare <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
|
|
|
|
|
|
|
|
declare <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
|
|
|
|
declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
|
|
|
|
declare <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
|
|
|
|
|
2009-10-10 04:20:54 +08:00
|
|
|
declare <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
|
2010-11-24 03:38:38 +08:00
|
|
|
|
|
|
|
|
|
|
|
; Radar 8687140
|
|
|
|
; VMULL needs to recognize BUILD_VECTORs with sign/zero-extended elements.
|
|
|
|
|
|
|
|
define <8 x i16> @vmull_extvec_s8(<8 x i8> %arg) nounwind {
|
|
|
|
; CHECK: vmull_extvec_s8
|
|
|
|
; CHECK: vmull.s8
|
|
|
|
%tmp3 = sext <8 x i8> %arg to <8 x i16>
|
|
|
|
%tmp4 = mul <8 x i16> %tmp3, <i16 -12, i16 -12, i16 -12, i16 -12, i16 -12, i16 -12, i16 -12, i16 -12>
|
|
|
|
ret <8 x i16> %tmp4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i16> @vmull_extvec_u8(<8 x i8> %arg) nounwind {
|
|
|
|
; CHECK: vmull_extvec_u8
|
|
|
|
; CHECK: vmull.u8
|
|
|
|
%tmp3 = zext <8 x i8> %arg to <8 x i16>
|
|
|
|
%tmp4 = mul <8 x i16> %tmp3, <i16 12, i16 12, i16 12, i16 12, i16 12, i16 12, i16 12, i16 12>
|
|
|
|
ret <8 x i16> %tmp4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i16> @vmull_noextvec_s8(<8 x i8> %arg) nounwind {
|
|
|
|
; Do not use VMULL if the BUILD_VECTOR element values are too big.
|
|
|
|
; CHECK: vmull_noextvec_s8
|
|
|
|
; CHECK: vmovl.s8
|
|
|
|
; CHECK: vmul.i16
|
|
|
|
%tmp3 = sext <8 x i8> %arg to <8 x i16>
|
|
|
|
%tmp4 = mul <8 x i16> %tmp3, <i16 -999, i16 -999, i16 -999, i16 -999, i16 -999, i16 -999, i16 -999, i16 -999>
|
|
|
|
ret <8 x i16> %tmp4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i16> @vmull_noextvec_u8(<8 x i8> %arg) nounwind {
|
|
|
|
; Do not use VMULL if the BUILD_VECTOR element values are too big.
|
|
|
|
; CHECK: vmull_noextvec_u8
|
|
|
|
; CHECK: vmovl.u8
|
|
|
|
; CHECK: vmul.i16
|
|
|
|
%tmp3 = zext <8 x i8> %arg to <8 x i16>
|
|
|
|
%tmp4 = mul <8 x i16> %tmp3, <i16 999, i16 999, i16 999, i16 999, i16 999, i16 999, i16 999, i16 999>
|
|
|
|
ret <8 x i16> %tmp4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i32> @vmull_extvec_s16(<4 x i16> %arg) nounwind {
|
|
|
|
; CHECK: vmull_extvec_s16
|
|
|
|
; CHECK: vmull.s16
|
|
|
|
%tmp3 = sext <4 x i16> %arg to <4 x i32>
|
|
|
|
%tmp4 = mul <4 x i32> %tmp3, <i32 -12, i32 -12, i32 -12, i32 -12>
|
|
|
|
ret <4 x i32> %tmp4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i32> @vmull_extvec_u16(<4 x i16> %arg) nounwind {
|
|
|
|
; CHECK: vmull_extvec_u16
|
|
|
|
; CHECK: vmull.u16
|
|
|
|
%tmp3 = zext <4 x i16> %arg to <4 x i32>
|
|
|
|
%tmp4 = mul <4 x i32> %tmp3, <i32 1234, i32 1234, i32 1234, i32 1234>
|
|
|
|
ret <4 x i32> %tmp4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i64> @vmull_extvec_s32(<2 x i32> %arg) nounwind {
|
|
|
|
; CHECK: vmull_extvec_s32
|
|
|
|
; CHECK: vmull.s32
|
|
|
|
%tmp3 = sext <2 x i32> %arg to <2 x i64>
|
|
|
|
%tmp4 = mul <2 x i64> %tmp3, <i64 -1234, i64 -1234>
|
|
|
|
ret <2 x i64> %tmp4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i64> @vmull_extvec_u32(<2 x i32> %arg) nounwind {
|
|
|
|
; CHECK: vmull_extvec_u32
|
|
|
|
; CHECK: vmull.u32
|
|
|
|
%tmp3 = zext <2 x i32> %arg to <2 x i64>
|
|
|
|
%tmp4 = mul <2 x i64> %tmp3, <i64 1234, i64 1234>
|
|
|
|
ret <2 x i64> %tmp4
|
|
|
|
}
|
Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
isel lowering to fold the zero-extend's and take advantage of no-stall
back to back vmul + vmla:
vmull q0, d4, d6
vmlal q0, d5, d6
is faster than
vaddl q0, d4, d5
vmovl q1, d6
vmul q0, q0, q1
This allows us to vmull + vmlal for:
f = vmull_u8( vget_high_u8(s), c);
f = vmlal_u8(f, vget_low_u8(s), c);
rdar://9197392
llvm-svn: 128444
2011-03-29 09:56:09 +08:00
|
|
|
|
|
|
|
; rdar://9197392
|
2011-06-16 09:24:24 +08:00
|
|
|
define void @distribute(i16* %dst, i8* %src, i32 %mul) nounwind {
|
Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
isel lowering to fold the zero-extend's and take advantage of no-stall
back to back vmul + vmla:
vmull q0, d4, d6
vmlal q0, d5, d6
is faster than
vaddl q0, d4, d5
vmovl q1, d6
vmul q0, q0, q1
This allows us to vmull + vmlal for:
f = vmull_u8( vget_high_u8(s), c);
f = vmlal_u8(f, vget_low_u8(s), c);
rdar://9197392
llvm-svn: 128444
2011-03-29 09:56:09 +08:00
|
|
|
entry:
|
2013-07-14 14:24:09 +08:00
|
|
|
; CHECK-LABEL: distribute:
|
Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
isel lowering to fold the zero-extend's and take advantage of no-stall
back to back vmul + vmla:
vmull q0, d4, d6
vmlal q0, d5, d6
is faster than
vaddl q0, d4, d5
vmovl q1, d6
vmul q0, q0, q1
This allows us to vmull + vmlal for:
f = vmull_u8( vget_high_u8(s), c);
f = vmlal_u8(f, vget_low_u8(s), c);
rdar://9197392
llvm-svn: 128444
2011-03-29 09:56:09 +08:00
|
|
|
; CHECK: vmull.u8 [[REG1:(q[0-9]+)]], d{{.*}}, [[REG2:(d[0-9]+)]]
|
|
|
|
; CHECK: vmlal.u8 [[REG1]], d{{.*}}, [[REG2]]
|
|
|
|
%0 = trunc i32 %mul to i8
|
|
|
|
%1 = insertelement <8 x i8> undef, i8 %0, i32 0
|
|
|
|
%2 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
|
2015-09-30 18:56:37 +08:00
|
|
|
%3 = tail call <16 x i8> @llvm.arm.neon.vld1.v16i8.p0i8(i8* %src, i32 1)
|
Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
isel lowering to fold the zero-extend's and take advantage of no-stall
back to back vmul + vmla:
vmull q0, d4, d6
vmlal q0, d5, d6
is faster than
vaddl q0, d4, d5
vmovl q1, d6
vmul q0, q0, q1
This allows us to vmull + vmlal for:
f = vmull_u8( vget_high_u8(s), c);
f = vmlal_u8(f, vget_low_u8(s), c);
rdar://9197392
llvm-svn: 128444
2011-03-29 09:56:09 +08:00
|
|
|
%4 = bitcast <16 x i8> %3 to <2 x double>
|
|
|
|
%5 = extractelement <2 x double> %4, i32 1
|
|
|
|
%6 = bitcast double %5 to <8 x i8>
|
|
|
|
%7 = zext <8 x i8> %6 to <8 x i16>
|
|
|
|
%8 = zext <8 x i8> %2 to <8 x i16>
|
|
|
|
%9 = extractelement <2 x double> %4, i32 0
|
|
|
|
%10 = bitcast double %9 to <8 x i8>
|
|
|
|
%11 = zext <8 x i8> %10 to <8 x i16>
|
|
|
|
%12 = add <8 x i16> %7, %11
|
|
|
|
%13 = mul <8 x i16> %12, %8
|
|
|
|
%14 = bitcast i16* %dst to i8*
|
2015-09-30 18:56:37 +08:00
|
|
|
tail call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %14, <8 x i16> %13, i32 2)
|
Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
isel lowering to fold the zero-extend's and take advantage of no-stall
back to back vmul + vmla:
vmull q0, d4, d6
vmlal q0, d5, d6
is faster than
vaddl q0, d4, d5
vmovl q1, d6
vmul q0, q0, q1
This allows us to vmull + vmlal for:
f = vmull_u8( vget_high_u8(s), c);
f = vmlal_u8(f, vget_low_u8(s), c);
rdar://9197392
llvm-svn: 128444
2011-03-29 09:56:09 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-09-30 18:56:37 +08:00
|
|
|
declare <16 x i8> @llvm.arm.neon.vld1.v16i8.p0i8(i8*, i32) nounwind readonly
|
Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
isel lowering to fold the zero-extend's and take advantage of no-stall
back to back vmul + vmla:
vmull q0, d4, d6
vmlal q0, d5, d6
is faster than
vaddl q0, d4, d5
vmovl q1, d6
vmul q0, q0, q1
This allows us to vmull + vmlal for:
f = vmull_u8( vget_high_u8(s), c);
f = vmlal_u8(f, vget_low_u8(s), c);
rdar://9197392
llvm-svn: 128444
2011-03-29 09:56:09 +08:00
|
|
|
|
2015-09-30 18:56:37 +08:00
|
|
|
declare void @llvm.arm.neon.vst1.p0i8.v8i16(i8*, <8 x i16>, i32) nounwind
|
2011-04-01 03:38:48 +08:00
|
|
|
|
|
|
|
; Take advantage of the Cortex-A8 multiplier accumulator forward.
|
|
|
|
|
|
|
|
%struct.uint8x8_t = type { <8 x i8> }
|
|
|
|
|
2011-06-16 09:24:24 +08:00
|
|
|
define void @distribute2(%struct.uint8x8_t* nocapture %dst, i8* %src, i32 %mul) nounwind {
|
2011-04-01 03:38:48 +08:00
|
|
|
entry:
|
2011-06-16 09:24:24 +08:00
|
|
|
; CHECK: distribute2
|
2011-04-01 03:38:48 +08:00
|
|
|
; CHECK-NOT: vadd.i8
|
|
|
|
; CHECK: vmul.i8
|
|
|
|
; CHECK: vmla.i8
|
|
|
|
%0 = trunc i32 %mul to i8
|
|
|
|
%1 = insertelement <8 x i8> undef, i8 %0, i32 0
|
|
|
|
%2 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
|
2015-09-30 18:56:37 +08:00
|
|
|
%3 = tail call <16 x i8> @llvm.arm.neon.vld1.v16i8.p0i8(i8* %src, i32 1)
|
2011-04-01 03:38:48 +08:00
|
|
|
%4 = bitcast <16 x i8> %3 to <2 x double>
|
|
|
|
%5 = extractelement <2 x double> %4, i32 1
|
|
|
|
%6 = bitcast double %5 to <8 x i8>
|
|
|
|
%7 = extractelement <2 x double> %4, i32 0
|
|
|
|
%8 = bitcast double %7 to <8 x i8>
|
|
|
|
%9 = add <8 x i8> %6, %8
|
|
|
|
%10 = mul <8 x i8> %9, %2
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%11 = getelementptr inbounds %struct.uint8x8_t, %struct.uint8x8_t* %dst, i32 0, i32 0
|
2011-04-01 03:38:48 +08:00
|
|
|
store <8 x i8> %10, <8 x i8>* %11, align 8
|
|
|
|
ret void
|
|
|
|
}
|
2011-06-16 09:21:54 +08:00
|
|
|
|
|
|
|
define void @distribute2_commutative(%struct.uint8x8_t* nocapture %dst, i8* %src, i32 %mul) nounwind {
|
|
|
|
entry:
|
|
|
|
; CHECK: distribute2_commutative
|
|
|
|
; CHECK-NOT: vadd.i8
|
|
|
|
; CHECK: vmul.i8
|
|
|
|
; CHECK: vmla.i8
|
|
|
|
%0 = trunc i32 %mul to i8
|
|
|
|
%1 = insertelement <8 x i8> undef, i8 %0, i32 0
|
|
|
|
%2 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
|
2015-09-30 18:56:37 +08:00
|
|
|
%3 = tail call <16 x i8> @llvm.arm.neon.vld1.v16i8.p0i8(i8* %src, i32 1)
|
2011-06-16 09:21:54 +08:00
|
|
|
%4 = bitcast <16 x i8> %3 to <2 x double>
|
|
|
|
%5 = extractelement <2 x double> %4, i32 1
|
|
|
|
%6 = bitcast double %5 to <8 x i8>
|
|
|
|
%7 = extractelement <2 x double> %4, i32 0
|
|
|
|
%8 = bitcast double %7 to <8 x i8>
|
|
|
|
%9 = add <8 x i8> %6, %8
|
|
|
|
%10 = mul <8 x i8> %2, %9
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%11 = getelementptr inbounds %struct.uint8x8_t, %struct.uint8x8_t* %dst, i32 0, i32 0
|
2011-06-16 09:21:54 +08:00
|
|
|
store <8 x i8> %10, <8 x i8>* %11, align 8
|
|
|
|
ret void
|
|
|
|
}
|
2011-10-19 01:34:51 +08:00
|
|
|
|
2013-09-26 07:12:06 +08:00
|
|
|
define <8 x i8> @no_distribute(<8 x i8> %a, <8 x i8> %b) nounwind {
|
|
|
|
entry:
|
|
|
|
; CHECK: no_distribute
|
|
|
|
; CHECK: vadd.i8
|
|
|
|
; CHECK: vmul.i8
|
|
|
|
; CHECK-NOT: vmla.i8
|
|
|
|
%0 = add <8 x i8> %a, %b
|
|
|
|
%1 = mul <8x i8> %0, %0
|
|
|
|
ret <8 x i8> %1
|
|
|
|
}
|
|
|
|
|
2011-10-19 01:34:51 +08:00
|
|
|
; If one operand has a zero-extend and the other a sign-extend, vmull
|
|
|
|
; cannot be used.
|
|
|
|
define i16 @vmullWithInconsistentExtensions(<8 x i8> %vec) {
|
|
|
|
; CHECK: vmullWithInconsistentExtensions
|
|
|
|
; CHECK-NOT: vmull.s8
|
|
|
|
%1 = sext <8 x i8> %vec to <8 x i16>
|
|
|
|
%2 = mul <8 x i16> %1, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
|
|
|
|
%3 = extractelement <8 x i16> %2, i32 0
|
|
|
|
ret i16 %3
|
|
|
|
}
|
2012-05-01 00:53:34 +08:00
|
|
|
|
|
|
|
; A constant build_vector created for a vmull with half-width elements must
|
|
|
|
; not introduce illegal types. <rdar://problem/11324364>
|
|
|
|
define void @vmull_buildvector() nounwind optsize ssp align 2 {
|
|
|
|
; CHECK: vmull_buildvector
|
|
|
|
entry:
|
|
|
|
br i1 undef, label %for.end179, label %for.body.lr.ph
|
|
|
|
|
|
|
|
for.body.lr.ph: ; preds = %entry
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.cond.loopexit: ; preds = %for.body33, %for.body
|
|
|
|
br i1 undef, label %for.end179, label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.cond.loopexit, %for.body.lr.ph
|
|
|
|
br i1 undef, label %for.cond.loopexit, label %for.body33.lr.ph
|
|
|
|
|
|
|
|
for.body33.lr.ph: ; preds = %for.body
|
|
|
|
%.sub = select i1 undef, i32 0, i32 undef
|
|
|
|
br label %for.body33
|
|
|
|
|
|
|
|
for.body33: ; preds = %for.body33, %for.body33.lr.ph
|
|
|
|
%add45 = add i32 undef, undef
|
2015-09-30 18:56:37 +08:00
|
|
|
%vld155 = tail call <16 x i8> @llvm.arm.neon.vld1.v16i8.p0i8(i8* undef, i32 1)
|
2015-02-28 05:17:42 +08:00
|
|
|
%0 = load i32*, i32** undef, align 4
|
2012-05-01 00:53:34 +08:00
|
|
|
%shuffle.i250 = shufflevector <2 x i64> undef, <2 x i64> undef, <1 x i32> zeroinitializer
|
|
|
|
%1 = bitcast <1 x i64> %shuffle.i250 to <8 x i8>
|
|
|
|
%vmovl.i249 = zext <8 x i8> %1 to <8 x i16>
|
|
|
|
%shuffle.i246 = shufflevector <2 x i64> undef, <2 x i64> undef, <1 x i32> zeroinitializer
|
|
|
|
%shuffle.i240 = shufflevector <2 x i64> undef, <2 x i64> undef, <1 x i32> <i32 1>
|
|
|
|
%2 = bitcast <1 x i64> %shuffle.i240 to <8 x i8>
|
|
|
|
%3 = bitcast <16 x i8> undef to <2 x i64>
|
|
|
|
%vmovl.i237 = zext <8 x i8> undef to <8 x i16>
|
|
|
|
%shuffle.i234 = shufflevector <2 x i64> undef, <2 x i64> undef, <1 x i32> zeroinitializer
|
|
|
|
%shuffle.i226 = shufflevector <2 x i64> undef, <2 x i64> undef, <1 x i32> zeroinitializer
|
|
|
|
%vmovl.i225 = zext <8 x i8> undef to <8 x i16>
|
|
|
|
%mul.i223 = mul <8 x i16> %vmovl.i249, %vmovl.i249
|
|
|
|
%vshl_n = shl <8 x i16> %mul.i223, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
|
2019-11-27 19:01:27 +08:00
|
|
|
%vqsub2.i216 = tail call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> <i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256>, <8 x i16> %vshl_n) nounwind
|
2012-05-01 00:53:34 +08:00
|
|
|
%mul.i209 = mul <8 x i16> undef, <i16 80, i16 80, i16 80, i16 80, i16 80, i16 80, i16 80, i16 80>
|
|
|
|
%vshr_n130 = lshr <8 x i16> undef, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
|
|
|
|
%vshr_n134 = lshr <8 x i16> %mul.i209, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
|
|
|
|
%sub.i205 = sub <8 x i16> <i16 80, i16 80, i16 80, i16 80, i16 80, i16 80, i16 80, i16 80>, %vshr_n130
|
|
|
|
%sub.i203 = sub <8 x i16> <i16 80, i16 80, i16 80, i16 80, i16 80, i16 80, i16 80, i16 80>, %vshr_n134
|
|
|
|
%add.i200 = add <8 x i16> %sub.i205, <i16 96, i16 96, i16 96, i16 96, i16 96, i16 96, i16 96, i16 96>
|
|
|
|
%add.i198 = add <8 x i16> %add.i200, %sub.i203
|
|
|
|
%mul.i194 = mul <8 x i16> %add.i198, %vmovl.i237
|
|
|
|
%mul.i191 = mul <8 x i16> %vshr_n130, undef
|
|
|
|
%add.i192 = add <8 x i16> %mul.i191, %mul.i194
|
|
|
|
%mul.i187 = mul <8 x i16> %vshr_n134, undef
|
|
|
|
%add.i188 = add <8 x i16> %mul.i187, %add.i192
|
|
|
|
%mul.i185 = mul <8 x i16> undef, undef
|
|
|
|
%add.i186 = add <8 x i16> %mul.i185, undef
|
|
|
|
%vrshr_n160 = tail call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %add.i188, <8 x i16> <i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8>)
|
|
|
|
%vrshr_n163 = tail call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %add.i186, <8 x i16> <i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8>)
|
|
|
|
%mul.i184 = mul <8 x i16> undef, %vrshr_n160
|
|
|
|
%mul.i181 = mul <8 x i16> undef, %vmovl.i225
|
|
|
|
%add.i182 = add <8 x i16> %mul.i181, %mul.i184
|
|
|
|
%vrshr_n170 = tail call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %add.i182, <8 x i16> <i16 -7, i16 -7, i16 -7, i16 -7, i16 -7, i16 -7, i16 -7, i16 -7>)
|
|
|
|
%vqmovn1.i180 = tail call <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16> %vrshr_n170) nounwind
|
|
|
|
%4 = bitcast <8 x i8> %vqmovn1.i180 to <1 x i64>
|
|
|
|
%shuffle.i = shufflevector <1 x i64> %4, <1 x i64> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%5 = bitcast <2 x i64> %shuffle.i to <16 x i8>
|
|
|
|
store <16 x i8> %5, <16 x i8>* undef, align 16
|
|
|
|
%add177 = add nsw i32 undef, 16
|
|
|
|
br i1 undef, label %for.body33, label %for.cond.loopexit
|
|
|
|
|
|
|
|
for.end179: ; preds = %for.cond.loopexit, %entry
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
declare <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
|
2019-11-27 19:01:27 +08:00
|
|
|
declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
|
2012-05-01 00:53:34 +08:00
|
|
|
declare <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16>) nounwind readnone
|
2013-05-15 06:33:24 +08:00
|
|
|
|
|
|
|
; vmull lowering would create a zext(v4i8 load()) instead of a zextload(v4i8),
|
|
|
|
; creating an illegal type during legalization and causing an assert.
|
|
|
|
; PR15970
|
|
|
|
define void @no_illegal_types_vmull_sext(<4 x i32> %a) {
|
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%wide.load283.i = load <4 x i8>, <4 x i8>* undef, align 1
|
2013-05-15 06:33:24 +08:00
|
|
|
%0 = sext <4 x i8> %wide.load283.i to <4 x i32>
|
|
|
|
%1 = sub nsw <4 x i32> %0, %a
|
|
|
|
%2 = mul nsw <4 x i32> %1, %1
|
|
|
|
%predphi290.v.i = select <4 x i1> undef, <4 x i32> undef, <4 x i32> %2
|
|
|
|
store <4 x i32> %predphi290.v.i, <4 x i32>* undef, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
define void @no_illegal_types_vmull_zext(<4 x i32> %a) {
|
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%wide.load283.i = load <4 x i8>, <4 x i8>* undef, align 1
|
2013-05-15 06:33:24 +08:00
|
|
|
%0 = zext <4 x i8> %wide.load283.i to <4 x i32>
|
|
|
|
%1 = sub nsw <4 x i32> %0, %a
|
|
|
|
%2 = mul nsw <4 x i32> %1, %1
|
|
|
|
%predphi290.v.i = select <4 x i1> undef, <4 x i32> undef, <4 x i32> %2
|
|
|
|
store <4 x i32> %predphi290.v.i, <4 x i32>* undef, align 4
|
|
|
|
ret void
|
|
|
|
}
|
2013-09-04 04:08:17 +08:00
|
|
|
|
2016-12-17 02:44:08 +08:00
|
|
|
define void @fmul_splat(<4 x float> * %a, <4 x float>* nocapture %dst, float %tmp) nounwind {
|
|
|
|
; Look for a scalar float rather than a splat, then a vector*scalar multiply.
|
|
|
|
; CHECK: vmov s0, r2
|
2013-09-04 04:08:17 +08:00
|
|
|
; CHECK: vmul.f32 q8, q8, d0[0]
|
2016-12-17 02:44:08 +08:00
|
|
|
%tmp5 = load <4 x float>, <4 x float>* %a, align 4
|
|
|
|
%tmp6 = insertelement <4 x float> undef, float %tmp, i32 0
|
|
|
|
%tmp7 = insertelement <4 x float> %tmp6, float %tmp, i32 1
|
|
|
|
%tmp8 = insertelement <4 x float> %tmp7, float %tmp, i32 2
|
|
|
|
%tmp9 = insertelement <4 x float> %tmp8, float %tmp, i32 3
|
|
|
|
%tmp10 = fmul <4 x float> %tmp9, %tmp5
|
|
|
|
store <4 x float> %tmp10, <4 x float>* %dst, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @fmul_splat_load(<4 x float> * %a, <4 x float>* nocapture %dst, float* nocapture readonly %src) nounwind {
|
|
|
|
; Look for doing a normal scalar FP load rather than an to-all-lanes load,
|
|
|
|
; then a vector*scalar multiply.
|
|
|
|
; FIXME: Temporarily broken due to splat representation changes.
|
|
|
|
; CHECK: vld1.32 {d18[], d19[]}, [r2:32]
|
|
|
|
; CHECK: vmul.f32 q8, q9, q8
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp = load float, float* %src, align 4
|
|
|
|
%tmp5 = load <4 x float>, <4 x float>* %a, align 4
|
2013-09-04 04:08:17 +08:00
|
|
|
%tmp6 = insertelement <4 x float> undef, float %tmp, i32 0
|
|
|
|
%tmp7 = insertelement <4 x float> %tmp6, float %tmp, i32 1
|
|
|
|
%tmp8 = insertelement <4 x float> %tmp7, float %tmp, i32 2
|
|
|
|
%tmp9 = insertelement <4 x float> %tmp8, float %tmp, i32 3
|
|
|
|
%tmp10 = fmul <4 x float> %tmp9, %tmp5
|
|
|
|
store <4 x float> %tmp10, <4 x float>* %dst, align 4
|
|
|
|
ret void
|
|
|
|
}
|