2012-04-22 21:22:48 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s
|
|
|
|
|
|
|
|
; CHECK: trunc4
|
|
|
|
; CHECK: vpermd
|
|
|
|
; CHECK-NOT: vinsert
|
|
|
|
; CHECK: ret
|
|
|
|
define <4 x i32> @trunc4(<4 x i64> %A) nounwind {
|
|
|
|
%B = trunc <4 x i64> %A to <4 x i32>
|
|
|
|
ret <4 x i32>%B
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: trunc8
|
|
|
|
; CHECK: vpshufb
|
|
|
|
; CHECK-NOT: vinsert
|
|
|
|
; CHECK: ret
|
|
|
|
|
|
|
|
define <8 x i16> @trunc8(<8 x i32> %A) nounwind {
|
|
|
|
%B = trunc <8 x i32> %A to <8 x i16>
|
|
|
|
ret <8 x i16>%B
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: sext4
|
|
|
|
; CHECK: vpmovsxdq
|
|
|
|
; CHECK-NOT: vinsert
|
|
|
|
; CHECK: ret
|
|
|
|
define <4 x i64> @sext4(<4 x i32> %A) nounwind {
|
|
|
|
%B = sext <4 x i32> %A to <4 x i64>
|
|
|
|
ret <4 x i64>%B
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: sext8
|
|
|
|
; CHECK: vpmovsxwd
|
|
|
|
; CHECK-NOT: vinsert
|
|
|
|
; CHECK: ret
|
|
|
|
define <8 x i32> @sext8(<8 x i16> %A) nounwind {
|
|
|
|
%B = sext <8 x i16> %A to <8 x i32>
|
|
|
|
ret <8 x i32>%B
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: zext4
|
|
|
|
; CHECK: vpmovzxdq
|
|
|
|
; CHECK-NOT: vinsert
|
|
|
|
; CHECK: ret
|
|
|
|
define <4 x i64> @zext4(<4 x i32> %A) nounwind {
|
|
|
|
%B = zext <4 x i32> %A to <4 x i64>
|
|
|
|
ret <4 x i64>%B
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: zext8
|
|
|
|
; CHECK: vpmovzxwd
|
|
|
|
; CHECK-NOT: vinsert
|
|
|
|
; CHECK: ret
|
|
|
|
define <8 x i32> @zext8(<8 x i16> %A) nounwind {
|
|
|
|
%B = zext <8 x i16> %A to <8 x i32>
|
|
|
|
ret <8 x i32>%B
|
|
|
|
}
|
|
|
|
; CHECK: zext_8i8_8i32
|
|
|
|
; CHECK: vpmovzxwd
|
|
|
|
; CHECK: vpand
|
|
|
|
; CHECK: ret
|
|
|
|
define <8 x i32> @zext_8i8_8i32(<8 x i8> %A) nounwind {
|
|
|
|
%B = zext <8 x i8> %A to <8 x i32>
|
|
|
|
ret <8 x i32>%B
|
|
|
|
}
|
|
|
|
|
X86: Custom lower zext v16i8 to v16i16.
On sandy bridge (PR17654) we now get
vpxor %xmm1, %xmm1, %xmm1
vpunpckhbw %xmm1, %xmm0, %xmm2
vpunpcklbw %xmm1, %xmm0, %xmm0
vinsertf128 $1, %xmm2, %ymm0, %ymm0
On haswell it's a simple
vpmovzxbw %xmm0, %ymm0
There is a maze of duplicated and dead transforms and patterns in this
area. Remove the dead custom lowering of zext v8i16 to v8i32, that's
already handled by LowerAVXExtend.
llvm-svn: 193262
2013-10-24 03:19:04 +08:00
|
|
|
; CHECK-LABEL: zext_16i8_16i16:
|
|
|
|
; CHECK: vpmovzxbw
|
|
|
|
; CHECK-NOT: vinsert
|
|
|
|
; CHECK: ret
|
|
|
|
define <16 x i16> @zext_16i8_16i16(<16 x i8> %z) {
|
|
|
|
%t = zext <16 x i8> %z to <16 x i16>
|
|
|
|
ret <16 x i16> %t
|
|
|
|
}
|
|
|
|
|
2013-10-24 05:06:07 +08:00
|
|
|
; CHECK-LABEL: sext_16i8_16i16:
|
|
|
|
; CHECK: vpmovsxbw
|
|
|
|
; CHECK-NOT: vinsert
|
|
|
|
; CHECK: ret
|
|
|
|
define <16 x i16> @sext_16i8_16i16(<16 x i8> %z) {
|
|
|
|
%t = sext <16 x i8> %z to <16 x i16>
|
|
|
|
ret <16 x i16> %t
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: trunc_16i16_16i8:
|
|
|
|
; CHECK: vpshufb
|
|
|
|
; CHECK: vpshufb
|
2015-02-19 20:10:37 +08:00
|
|
|
; CHECK: vpunpcklqdq
|
2013-10-24 05:06:07 +08:00
|
|
|
; CHECK: ret
|
|
|
|
define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
|
|
|
|
%t = trunc <16 x i16> %z to <16 x i8>
|
|
|
|
ret <16 x i8> %t
|
|
|
|
}
|
|
|
|
|
2012-12-19 15:50:20 +08:00
|
|
|
; CHECK: load_sext_test1
|
|
|
|
; CHECK: vpmovsxdq (%r{{[^,]*}}), %ymm{{.*}}
|
|
|
|
; CHECK: ret
|
|
|
|
define <4 x i64> @load_sext_test1(<4 x i32> *%ptr) {
|
2015-02-28 05:17:42 +08:00
|
|
|
%X = load <4 x i32>, <4 x i32>* %ptr
|
2012-12-19 15:50:20 +08:00
|
|
|
%Y = sext <4 x i32> %X to <4 x i64>
|
|
|
|
ret <4 x i64>%Y
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: load_sext_test2
|
|
|
|
; CHECK: vpmovsxbq (%r{{[^,]*}}), %ymm{{.*}}
|
|
|
|
; CHECK: ret
|
|
|
|
define <4 x i64> @load_sext_test2(<4 x i8> *%ptr) {
|
2015-02-28 05:17:42 +08:00
|
|
|
%X = load <4 x i8>, <4 x i8>* %ptr
|
2012-12-19 15:50:20 +08:00
|
|
|
%Y = sext <4 x i8> %X to <4 x i64>
|
|
|
|
ret <4 x i64>%Y
|
|
|
|
}
|
2012-04-22 21:22:48 +08:00
|
|
|
|
2012-12-19 15:50:20 +08:00
|
|
|
; CHECK: load_sext_test3
|
|
|
|
; CHECK: vpmovsxwq (%r{{[^,]*}}), %ymm{{.*}}
|
|
|
|
; CHECK: ret
|
|
|
|
define <4 x i64> @load_sext_test3(<4 x i16> *%ptr) {
|
2015-02-28 05:17:42 +08:00
|
|
|
%X = load <4 x i16>, <4 x i16>* %ptr
|
2012-12-19 15:50:20 +08:00
|
|
|
%Y = sext <4 x i16> %X to <4 x i64>
|
|
|
|
ret <4 x i64>%Y
|
|
|
|
}
|
2012-04-22 21:22:48 +08:00
|
|
|
|
2012-12-19 15:50:20 +08:00
|
|
|
; CHECK: load_sext_test4
|
|
|
|
; CHECK: vpmovsxwd (%r{{[^,]*}}), %ymm{{.*}}
|
|
|
|
; CHECK: ret
|
|
|
|
define <8 x i32> @load_sext_test4(<8 x i16> *%ptr) {
|
2015-02-28 05:17:42 +08:00
|
|
|
%X = load <8 x i16>, <8 x i16>* %ptr
|
2012-12-19 15:50:20 +08:00
|
|
|
%Y = sext <8 x i16> %X to <8 x i32>
|
|
|
|
ret <8 x i32>%Y
|
|
|
|
}
|
2012-04-22 21:22:48 +08:00
|
|
|
|
2012-12-19 15:50:20 +08:00
|
|
|
; CHECK: load_sext_test5
|
|
|
|
; CHECK: vpmovsxbd (%r{{[^,]*}}), %ymm{{.*}}
|
|
|
|
; CHECK: ret
|
|
|
|
define <8 x i32> @load_sext_test5(<8 x i8> *%ptr) {
|
2015-02-28 05:17:42 +08:00
|
|
|
%X = load <8 x i8>, <8 x i8>* %ptr
|
2012-12-19 15:50:20 +08:00
|
|
|
%Y = sext <8 x i8> %X to <8 x i32>
|
|
|
|
ret <8 x i32>%Y
|
|
|
|
}
|