2015-11-24 05:33:58 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2018-01-10 00:26:06 +08:00
|
|
|
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=X32,X32-SLOW
|
|
|
|
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=X32,X32-FAST
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=X64,X64-SLOW
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=X64,X64-FAST
|
2012-04-22 21:22:48 +08:00
|
|
|
|
|
|
|
define <4 x i32> @trunc4(<4 x i64> %A) nounwind {
|
2018-01-10 00:26:06 +08:00
|
|
|
; X32-SLOW-LABEL: trunc4:
|
|
|
|
; X32-SLOW: # %bb.0:
|
|
|
|
; X32-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
|
|
|
|
; X32-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
|
|
|
|
; X32-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
|
|
|
|
; X32-SLOW-NEXT: vzeroupper
|
|
|
|
; X32-SLOW-NEXT: retl
|
2016-10-09 02:36:57 +08:00
|
|
|
;
|
2018-01-10 00:26:06 +08:00
|
|
|
; X32-FAST-LABEL: trunc4:
|
|
|
|
; X32-FAST: # %bb.0:
|
|
|
|
; X32-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
|
|
|
|
; X32-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
|
|
|
|
; X32-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
|
|
|
|
; X32-FAST-NEXT: vzeroupper
|
|
|
|
; X32-FAST-NEXT: retl
|
|
|
|
;
|
|
|
|
; X64-SLOW-LABEL: trunc4:
|
|
|
|
; X64-SLOW: # %bb.0:
|
|
|
|
; X64-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
|
|
|
|
; X64-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
|
|
|
|
; X64-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
|
|
|
|
; X64-SLOW-NEXT: vzeroupper
|
|
|
|
; X64-SLOW-NEXT: retq
|
|
|
|
;
|
|
|
|
; X64-FAST-LABEL: trunc4:
|
|
|
|
; X64-FAST: # %bb.0:
|
|
|
|
; X64-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
|
|
|
|
; X64-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
|
|
|
|
; X64-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
|
|
|
|
; X64-FAST-NEXT: vzeroupper
|
|
|
|
; X64-FAST-NEXT: retq
|
2012-04-22 21:22:48 +08:00
|
|
|
%B = trunc <4 x i64> %A to <4 x i32>
|
|
|
|
ret <4 x i32>%B
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i16> @trunc8(<8 x i32> %A) nounwind {
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-LABEL: trunc8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X32: # %bb.0:
|
2017-02-06 02:33:14 +08:00
|
|
|
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
|
2017-12-07 18:40:31 +08:00
|
|
|
; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-NEXT: vzeroupper
|
|
|
|
; X32-NEXT: retl
|
|
|
|
;
|
|
|
|
; X64-LABEL: trunc8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X64: # %bb.0:
|
2017-02-06 02:33:14 +08:00
|
|
|
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
|
2016-10-09 02:36:57 +08:00
|
|
|
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
|
2017-12-07 18:40:31 +08:00
|
|
|
; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
|
2016-10-09 02:36:57 +08:00
|
|
|
; X64-NEXT: vzeroupper
|
|
|
|
; X64-NEXT: retq
|
2012-04-22 21:22:48 +08:00
|
|
|
%B = trunc <8 x i32> %A to <8 x i16>
|
|
|
|
ret <8 x i16>%B
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @sext4(<4 x i32> %A) nounwind {
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-LABEL: sext4:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X32: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-NEXT: vpmovsxdq %xmm0, %ymm0
|
|
|
|
; X32-NEXT: retl
|
|
|
|
;
|
|
|
|
; X64-LABEL: sext4:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X64: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X64-NEXT: vpmovsxdq %xmm0, %ymm0
|
|
|
|
; X64-NEXT: retq
|
2012-04-22 21:22:48 +08:00
|
|
|
%B = sext <4 x i32> %A to <4 x i64>
|
|
|
|
ret <4 x i64>%B
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i32> @sext8(<8 x i16> %A) nounwind {
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-LABEL: sext8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X32: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-NEXT: vpmovsxwd %xmm0, %ymm0
|
|
|
|
; X32-NEXT: retl
|
|
|
|
;
|
|
|
|
; X64-LABEL: sext8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X64: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X64-NEXT: vpmovsxwd %xmm0, %ymm0
|
|
|
|
; X64-NEXT: retq
|
2012-04-22 21:22:48 +08:00
|
|
|
%B = sext <8 x i16> %A to <8 x i32>
|
|
|
|
ret <8 x i32>%B
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @zext4(<4 x i32> %A) nounwind {
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-LABEL: zext4:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X32: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
|
|
|
; X32-NEXT: retl
|
|
|
|
;
|
|
|
|
; X64-LABEL: zext4:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X64: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X64-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
|
|
|
; X64-NEXT: retq
|
2012-04-22 21:22:48 +08:00
|
|
|
%B = zext <4 x i32> %A to <4 x i64>
|
|
|
|
ret <4 x i64>%B
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i32> @zext8(<8 x i16> %A) nounwind {
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-LABEL: zext8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X32: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
|
|
; X32-NEXT: retl
|
|
|
|
;
|
|
|
|
; X64-LABEL: zext8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X64: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
|
|
; X64-NEXT: retq
|
2012-04-22 21:22:48 +08:00
|
|
|
%B = zext <8 x i16> %A to <8 x i32>
|
|
|
|
ret <8 x i32>%B
|
|
|
|
}
|
2015-07-27 01:01:16 +08:00
|
|
|
|
2012-04-22 21:22:48 +08:00
|
|
|
define <8 x i32> @zext_8i8_8i32(<8 x i8> %A) nounwind {
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-LABEL: zext_8i8_8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X32: # %bb.0:
|
2017-06-28 18:54:54 +08:00
|
|
|
; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
|
|
; X32-NEXT: retl
|
|
|
|
;
|
|
|
|
; X64-LABEL: zext_8i8_8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X64: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
|
|
|
|
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
|
|
; X64-NEXT: retq
|
2015-07-27 01:01:16 +08:00
|
|
|
%B = zext <8 x i8> %A to <8 x i32>
|
2012-04-22 21:22:48 +08:00
|
|
|
ret <8 x i32>%B
|
|
|
|
}
|
|
|
|
|
X86: Custom lower zext v16i8 to v16i16.
On sandy bridge (PR17654) we now get
vpxor %xmm1, %xmm1, %xmm1
vpunpckhbw %xmm1, %xmm0, %xmm2
vpunpcklbw %xmm1, %xmm0, %xmm0
vinsertf128 $1, %xmm2, %ymm0, %ymm0
On haswell it's a simple
vpmovzxbw %xmm0, %ymm0
There is a maze of duplicated and dead transforms and patterns in this
area. Remove the dead custom lowering of zext v8i16 to v8i32, that's
already handled by LowerAVXExtend.
llvm-svn: 193262
2013-10-24 03:19:04 +08:00
|
|
|
define <16 x i16> @zext_16i8_16i16(<16 x i8> %z) {
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-LABEL: zext_16i8_16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X32: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
|
|
|
; X32-NEXT: retl
|
|
|
|
;
|
|
|
|
; X64-LABEL: zext_16i8_16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X64: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X64-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
|
|
|
; X64-NEXT: retq
|
X86: Custom lower zext v16i8 to v16i16.
On sandy bridge (PR17654) we now get
vpxor %xmm1, %xmm1, %xmm1
vpunpckhbw %xmm1, %xmm0, %xmm2
vpunpcklbw %xmm1, %xmm0, %xmm0
vinsertf128 $1, %xmm2, %ymm0, %ymm0
On haswell it's a simple
vpmovzxbw %xmm0, %ymm0
There is a maze of duplicated and dead transforms and patterns in this
area. Remove the dead custom lowering of zext v8i16 to v8i32, that's
already handled by LowerAVXExtend.
llvm-svn: 193262
2013-10-24 03:19:04 +08:00
|
|
|
%t = zext <16 x i8> %z to <16 x i16>
|
|
|
|
ret <16 x i16> %t
|
|
|
|
}
|
|
|
|
|
2013-10-24 05:06:07 +08:00
|
|
|
define <16 x i16> @sext_16i8_16i16(<16 x i8> %z) {
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-LABEL: sext_16i8_16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X32: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
|
|
; X32-NEXT: retl
|
|
|
|
;
|
|
|
|
; X64-LABEL: sext_16i8_16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X64: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X64-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
|
|
; X64-NEXT: retq
|
2013-10-24 05:06:07 +08:00
|
|
|
%t = sext <16 x i8> %z to <16 x i16>
|
|
|
|
ret <16 x i16> %t
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-LABEL: trunc_16i16_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X32: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; X32-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
|
|
; X32-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
|
|
; X32-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
|
|
; X32-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; X32-NEXT: vzeroupper
|
|
|
|
; X32-NEXT: retl
|
|
|
|
;
|
|
|
|
; X64-LABEL: trunc_16i16_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X64: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; X64-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
|
|
; X64-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
|
|
; X64-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
|
|
; X64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; X64-NEXT: vzeroupper
|
|
|
|
; X64-NEXT: retq
|
2013-10-24 05:06:07 +08:00
|
|
|
%t = trunc <16 x i16> %z to <16 x i8>
|
|
|
|
ret <16 x i8> %t
|
|
|
|
}
|
|
|
|
|
2012-12-19 15:50:20 +08:00
|
|
|
define <4 x i64> @load_sext_test1(<4 x i32> *%ptr) {
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-LABEL: load_sext_test1:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X32: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-NEXT: vpmovsxdq (%eax), %ymm0
|
|
|
|
; X32-NEXT: retl
|
|
|
|
;
|
|
|
|
; X64-LABEL: load_sext_test1:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X64: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X64-NEXT: vpmovsxdq (%rdi), %ymm0
|
|
|
|
; X64-NEXT: retq
|
2015-02-28 05:17:42 +08:00
|
|
|
%X = load <4 x i32>, <4 x i32>* %ptr
|
2012-12-19 15:50:20 +08:00
|
|
|
%Y = sext <4 x i32> %X to <4 x i64>
|
|
|
|
ret <4 x i64>%Y
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @load_sext_test2(<4 x i8> *%ptr) {
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-LABEL: load_sext_test2:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X32: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-NEXT: vpmovsxbq (%eax), %ymm0
|
|
|
|
; X32-NEXT: retl
|
|
|
|
;
|
|
|
|
; X64-LABEL: load_sext_test2:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X64: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X64-NEXT: vpmovsxbq (%rdi), %ymm0
|
|
|
|
; X64-NEXT: retq
|
2015-02-28 05:17:42 +08:00
|
|
|
%X = load <4 x i8>, <4 x i8>* %ptr
|
2012-12-19 15:50:20 +08:00
|
|
|
%Y = sext <4 x i8> %X to <4 x i64>
|
|
|
|
ret <4 x i64>%Y
|
|
|
|
}
|
2012-04-22 21:22:48 +08:00
|
|
|
|
2012-12-19 15:50:20 +08:00
|
|
|
define <4 x i64> @load_sext_test3(<4 x i16> *%ptr) {
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-LABEL: load_sext_test3:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X32: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-NEXT: vpmovsxwq (%eax), %ymm0
|
|
|
|
; X32-NEXT: retl
|
|
|
|
;
|
|
|
|
; X64-LABEL: load_sext_test3:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X64: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X64-NEXT: vpmovsxwq (%rdi), %ymm0
|
|
|
|
; X64-NEXT: retq
|
2015-02-28 05:17:42 +08:00
|
|
|
%X = load <4 x i16>, <4 x i16>* %ptr
|
2012-12-19 15:50:20 +08:00
|
|
|
%Y = sext <4 x i16> %X to <4 x i64>
|
|
|
|
ret <4 x i64>%Y
|
|
|
|
}
|
2012-04-22 21:22:48 +08:00
|
|
|
|
2012-12-19 15:50:20 +08:00
|
|
|
define <8 x i32> @load_sext_test4(<8 x i16> *%ptr) {
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-LABEL: load_sext_test4:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X32: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-NEXT: vpmovsxwd (%eax), %ymm0
|
|
|
|
; X32-NEXT: retl
|
|
|
|
;
|
|
|
|
; X64-LABEL: load_sext_test4:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X64: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X64-NEXT: vpmovsxwd (%rdi), %ymm0
|
|
|
|
; X64-NEXT: retq
|
2015-02-28 05:17:42 +08:00
|
|
|
%X = load <8 x i16>, <8 x i16>* %ptr
|
2012-12-19 15:50:20 +08:00
|
|
|
%Y = sext <8 x i16> %X to <8 x i32>
|
|
|
|
ret <8 x i32>%Y
|
|
|
|
}
|
2012-04-22 21:22:48 +08:00
|
|
|
|
2012-12-19 15:50:20 +08:00
|
|
|
define <8 x i32> @load_sext_test5(<8 x i8> *%ptr) {
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-LABEL: load_sext_test5:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X32: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-NEXT: vpmovsxbd (%eax), %ymm0
|
|
|
|
; X32-NEXT: retl
|
|
|
|
;
|
|
|
|
; X64-LABEL: load_sext_test5:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X64: # %bb.0:
|
2016-10-09 02:36:57 +08:00
|
|
|
; X64-NEXT: vpmovsxbd (%rdi), %ymm0
|
|
|
|
; X64-NEXT: retq
|
2015-02-28 05:17:42 +08:00
|
|
|
%X = load <8 x i8>, <8 x i8>* %ptr
|
2012-12-19 15:50:20 +08:00
|
|
|
%Y = sext <8 x i8> %X to <8 x i32>
|
|
|
|
ret <8 x i32>%Y
|
|
|
|
}
|