2016-02-27 20:33:08 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2019-01-09 21:46:14 +08:00
|
|
|
; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
|
|
|
|
; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX2,X86-AVX2
|
|
|
|
; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X86,AVX512,X86-AVX512
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX2,X64-AVX2
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X64,AVX512,X64-AVX512
|
2016-02-27 20:33:08 +08:00
|
|
|
;
|
|
|
|
; Combine tests involving AVX target shuffles
|
|
|
|
|
|
|
|
declare <4 x float> @llvm.x86.avx.vpermil.ps(<4 x float>, i8)
|
|
|
|
declare <8 x float> @llvm.x86.avx.vpermil.ps.256(<8 x float>, i8)
|
|
|
|
declare <2 x double> @llvm.x86.avx.vpermil.pd(<2 x double>, i8)
|
|
|
|
declare <4 x double> @llvm.x86.avx.vpermil.pd.256(<4 x double>, i8)
|
|
|
|
|
|
|
|
declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>)
|
|
|
|
declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>)
|
|
|
|
declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>)
|
|
|
|
declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i64>)
|
|
|
|
|
|
|
|
declare <8 x i32> @llvm.x86.avx.vperm2f128.si.256(<8 x i32>, <8 x i32>, i8)
|
|
|
|
declare <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float>, <8 x float>, i8)
|
|
|
|
declare <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double>, <4 x double>, i8)
|
|
|
|
|
2016-03-11 22:39:10 +08:00
|
|
|
define <4 x float> @combine_vpermilvar_4f32_identity(<4 x float> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_4f32_identity:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-02-27 20:33:08 +08:00
|
|
|
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
|
|
|
|
%2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
|
|
|
|
ret <4 x float> %2
|
|
|
|
}
|
|
|
|
|
2016-04-17 04:30:59 +08:00
|
|
|
define <4 x float> @combine_vpermilvar_4f32_movddup(<4 x float> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_4f32_movddup:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-04-17 04:30:59 +08:00
|
|
|
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 1, i32 0, i32 1>)
|
|
|
|
ret <4 x float> %1
|
|
|
|
}
|
2016-05-11 00:08:24 +08:00
|
|
|
define <4 x float> @combine_vpermilvar_4f32_movddup_load(<4 x float> *%a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; X86-LABEL: combine_vpermilvar_4f32_movddup_load:
|
|
|
|
; X86: # %bb.0:
|
|
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X86-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
|
|
|
|
; X86-NEXT: retl
|
2016-09-18 02:42:41 +08:00
|
|
|
;
|
|
|
|
; X64-LABEL: combine_vpermilvar_4f32_movddup_load:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X64: # %bb.0:
|
2016-09-18 02:42:41 +08:00
|
|
|
; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
|
|
|
|
; X64-NEXT: retq
|
2016-05-11 00:08:24 +08:00
|
|
|
%1 = load <4 x float>, <4 x float> *%a0
|
|
|
|
%2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 0, i32 1, i32 0, i32 1>)
|
|
|
|
ret <4 x float> %2
|
|
|
|
}
|
2016-04-17 04:30:59 +08:00
|
|
|
|
|
|
|
define <4 x float> @combine_vpermilvar_4f32_movshdup(<4 x float> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_4f32_movshdup:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-06-20 02:03:52 +08:00
|
|
|
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 undef, i32 1, i32 3, i32 3>)
|
2016-04-17 04:30:59 +08:00
|
|
|
ret <4 x float> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @combine_vpermilvar_4f32_movsldup(<4 x float> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_4f32_movsldup:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-06-20 02:03:52 +08:00
|
|
|
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 0, i32 2, i32 undef>)
|
2016-04-17 04:30:59 +08:00
|
|
|
ret <4 x float> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @combine_vpermilvar_4f32_unpckh(<4 x float> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_4f32_unpckh:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-04-17 04:30:59 +08:00
|
|
|
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 2, i32 2, i32 3, i32 3>)
|
|
|
|
ret <4 x float> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @combine_vpermilvar_4f32_unpckl(<4 x float> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_4f32_unpckl:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-04-17 04:30:59 +08:00
|
|
|
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 0, i32 1, i32 1>)
|
|
|
|
ret <4 x float> %1
|
|
|
|
}
|
|
|
|
|
2016-03-11 22:39:10 +08:00
|
|
|
define <8 x float> @combine_vpermilvar_8f32_identity(<8 x float> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_8f32_identity:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-06-20 02:03:52 +08:00
|
|
|
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 undef>)
|
2016-02-27 20:33:08 +08:00
|
|
|
%2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 1>)
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
2016-07-13 23:10:43 +08:00
|
|
|
define <8 x float> @combine_vpermilvar_8f32_10326u4u(<8 x float> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_8f32_10326u4u:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-07-13 23:10:43 +08:00
|
|
|
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 0, i32 1, i32 2, i32 undef>)
|
|
|
|
%2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 undef>)
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
2016-07-13 04:27:32 +08:00
|
|
|
define <8 x float> @combine_vpermilvar_vperm2f128_8f32(<8 x float> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; AVX1-LABEL: combine_vpermilvar_vperm2f128_8f32:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
|
|
|
|
; AVX1-NEXT: ret{{[l|q]}}
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: combine_vpermilvar_vperm2f128_8f32:
|
|
|
|
; AVX2: # %bb.0:
|
|
|
|
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
|
|
|
|
; AVX2-NEXT: ret{{[l|q]}}
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: combine_vpermilvar_vperm2f128_8f32:
|
|
|
|
; AVX512: # %bb.0:
|
|
|
|
; AVX512-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
|
|
|
|
; AVX512-NEXT: ret{{[l|q]}}
|
2016-07-13 04:27:32 +08:00
|
|
|
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
|
|
|
|
%2 = shufflevector <8 x float> %1, <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %2, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
|
|
|
|
ret <8 x float> %3
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @combine_vpermilvar_vperm2f128_zero_8f32(<8 x float> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-07-13 04:27:32 +08:00
|
|
|
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
|
|
|
|
%2 = shufflevector <8 x float> %1, <8 x float> zeroinitializer, <8 x i32> <i32 8, i32 8, i32 8, i32 8, i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %2, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
|
|
|
|
ret <8 x float> %3
|
|
|
|
}
|
|
|
|
|
2016-07-14 20:21:40 +08:00
|
|
|
define <4 x double> @combine_vperm2f128_vpermilvar_as_vpblendpd(<4 x double> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
|
|
|
|
; CHECK-NEXT: vmovapd %xmm0, %xmm0
|
|
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-07-14 20:21:40 +08:00
|
|
|
%1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
|
|
|
|
%2 = shufflevector <4 x double> %1, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
|
|
|
|
%3 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %2, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
|
|
|
|
ret <4 x double> %3
|
|
|
|
}
|
|
|
|
|
2016-04-17 04:30:59 +08:00
|
|
|
define <8 x float> @combine_vpermilvar_8f32_movddup(<8 x float> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_8f32_movddup:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-04-17 04:30:59 +08:00
|
|
|
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>)
|
|
|
|
ret <8 x float> %1
|
|
|
|
}
|
2016-05-11 00:08:24 +08:00
|
|
|
define <8 x float> @combine_vpermilvar_8f32_movddup_load(<8 x float> *%a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; X86-LABEL: combine_vpermilvar_8f32_movddup_load:
|
|
|
|
; X86: # %bb.0:
|
|
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X86-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
|
|
|
|
; X86-NEXT: retl
|
2016-09-18 02:42:41 +08:00
|
|
|
;
|
|
|
|
; X64-LABEL: combine_vpermilvar_8f32_movddup_load:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X64: # %bb.0:
|
2016-09-18 02:42:41 +08:00
|
|
|
; X64-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
|
|
|
|
; X64-NEXT: retq
|
2016-05-11 00:08:24 +08:00
|
|
|
%1 = load <8 x float>, <8 x float> *%a0
|
|
|
|
%2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>)
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
2016-04-17 04:30:59 +08:00
|
|
|
|
|
|
|
define <8 x float> @combine_vpermilvar_8f32_movshdup(<8 x float> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_8f32_movshdup:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-06-20 02:03:52 +08:00
|
|
|
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 undef, i32 5, i32 7, i32 7>)
|
2016-04-17 04:30:59 +08:00
|
|
|
ret <8 x float> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @combine_vpermilvar_8f32_movsldup(<8 x float> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_8f32_movsldup:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-04-17 04:30:59 +08:00
|
|
|
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>)
|
|
|
|
ret <8 x float> %1
|
|
|
|
}
|
|
|
|
|
2016-03-11 22:39:10 +08:00
|
|
|
define <2 x double> @combine_vpermilvar_2f64_identity(<2 x double> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_2f64_identity:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-02-27 20:51:46 +08:00
|
|
|
%1 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> <i64 2, i64 0>)
|
|
|
|
%2 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %1, <2 x i64> <i64 2, i64 0>)
|
2016-02-27 20:33:08 +08:00
|
|
|
ret <2 x double> %2
|
|
|
|
}
|
|
|
|
|
2016-04-17 04:30:59 +08:00
|
|
|
define <2 x double> @combine_vpermilvar_2f64_movddup(<2 x double> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_2f64_movddup:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-04-17 04:30:59 +08:00
|
|
|
%1 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> <i64 0, i64 0>)
|
|
|
|
ret <2 x double> %1
|
|
|
|
}
|
|
|
|
|
2016-03-11 22:39:10 +08:00
|
|
|
define <4 x double> @combine_vpermilvar_4f64_identity(<4 x double> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_4f64_identity:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-02-27 20:51:46 +08:00
|
|
|
%1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
|
|
|
|
%2 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %1, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
|
2016-02-27 20:33:08 +08:00
|
|
|
ret <4 x double> %2
|
|
|
|
}
|
2016-03-11 22:39:10 +08:00
|
|
|
|
2016-04-17 04:30:59 +08:00
|
|
|
define <4 x double> @combine_vpermilvar_4f64_movddup(<4 x double> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_4f64_movddup:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-04-17 04:30:59 +08:00
|
|
|
%1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 0, i64 0, i64 4, i64 4>)
|
|
|
|
ret <4 x double> %1
|
|
|
|
}
|
|
|
|
|
2016-03-11 22:39:10 +08:00
|
|
|
define <4 x float> @combine_vpermilvar_4f32_4stage(<4 x float> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_4f32_4stage:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-03-11 22:39:10 +08:00
|
|
|
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
|
|
|
|
%2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 2, i32 3, i32 0, i32 1>)
|
|
|
|
%3 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>)
|
|
|
|
%4 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %3, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
|
|
|
|
ret <4 x float> %4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @combine_vpermilvar_8f32_4stage(<8 x float> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_8f32_4stage:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-03-11 22:39:10 +08:00
|
|
|
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
|
|
|
|
%2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>)
|
|
|
|
%3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %2, <8 x i32> <i32 0, i32 2, i32 1, i32 3, i32 0, i32 2, i32 1, i32 3>)
|
|
|
|
%4 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %3, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
|
|
|
|
ret <8 x float> %4
|
|
|
|
}
|
2016-07-09 01:01:42 +08:00
|
|
|
|
|
|
|
define <4 x float> @combine_vpermilvar_4f32_as_insertps(<4 x float> %a0) {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: combine_vpermilvar_4f32_as_insertps:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-07-09 01:01:42 +08:00
|
|
|
%1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
|
|
|
|
%2 = shufflevector <4 x float> %1, <4 x float> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 1, i32 4>
|
|
|
|
ret <4 x float> %2
|
|
|
|
}
|
2016-09-18 01:41:14 +08:00
|
|
|
|
|
|
|
define <2 x double> @constant_fold_vpermilvar_pd() {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: constant_fold_vpermilvar_pd:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vmovaps {{.*#+}} xmm0 = [2.0E+0,1.0E+0]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-09-18 01:41:14 +08:00
|
|
|
%1 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> <double 1.0, double 2.0>, <2 x i64> <i64 2, i64 0>)
|
|
|
|
ret <2 x double> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @constant_fold_vpermilvar_pd_256() {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: constant_fold_vpermilvar_pd_256:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [2.0E+0,1.0E+0,3.0E+0,4.0E+0]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-09-18 01:41:14 +08:00
|
|
|
%1 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> <double 1.0, double 2.0, double 3.0, double 4.0>, <4 x i64> <i64 2, i64 0, i64 0, i64 2>)
|
|
|
|
ret <4 x double> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @constant_fold_vpermilvar_ps() {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: constant_fold_vpermilvar_ps:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vmovaps {{.*#+}} xmm0 = [4.0E+0,1.0E+0,3.0E+0,2.0E+0]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-09-18 01:41:14 +08:00
|
|
|
%1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, <4 x i32> <i32 3, i32 0, i32 2, i32 1>)
|
|
|
|
ret <4 x float> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @constant_fold_vpermilvar_ps_256() {
|
2019-01-09 21:46:14 +08:00
|
|
|
; CHECK-LABEL: constant_fold_vpermilvar_ps_256:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [1.0E+0,1.0E+0,3.0E+0,2.0E+0,5.0E+0,6.0E+0,6.0E+0,6.0E+0]
|
|
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
2016-09-18 01:41:14 +08:00
|
|
|
%1 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, <8 x i32> <i32 4, i32 0, i32 2, i32 1, i32 0, i32 1, i32 1, i32 1>)
|
|
|
|
ret <8 x float> %1
|
|
|
|
}
|
2018-10-30 02:25:48 +08:00
|
|
|
|
|
|
|
define void @PR39483() {
|
2019-01-09 21:46:14 +08:00
|
|
|
; X86-AVX1-LABEL: PR39483:
|
|
|
|
; X86-AVX1: # %bb.0: # %entry
|
|
|
|
; X86-AVX1-NEXT: vmovups 32, %ymm0
|
|
|
|
; X86-AVX1-NEXT: vmovups 64, %xmm1
|
|
|
|
; X86-AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1],mem[0,3]
|
|
|
|
; X86-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
|
|
|
|
; X86-AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
|
|
|
|
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; X86-AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3]
|
|
|
|
; X86-AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
|
|
|
|
; X86-AVX1-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
|
|
|
|
; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
|
|
; X86-AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
|
|
|
|
; X86-AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
|
|
; X86-AVX1-NEXT: vmulps %ymm1, %ymm0, %ymm0
|
|
|
|
; X86-AVX1-NEXT: vaddps %ymm1, %ymm0, %ymm0
|
|
|
|
; X86-AVX1-NEXT: vmovups %ymm0, (%eax)
|
|
|
|
;
|
|
|
|
; X86-AVX2-LABEL: PR39483:
|
|
|
|
; X86-AVX2: # %bb.0: # %entry
|
|
|
|
; X86-AVX2-NEXT: vmovups 32, %ymm0
|
|
|
|
; X86-AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
|
|
|
|
; X86-AVX2-NEXT: vmovaps {{.*#+}} ymm1 = <2,5,0,3,6,u,u,u>
|
|
|
|
; X86-AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
|
|
|
|
; X86-AVX2-NEXT: vpermilps {{.*#+}} ymm1 = mem[0,1,0,3,4,5,4,7]
|
|
|
|
; X86-AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
|
|
|
|
; X86-AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
|
|
|
|
; X86-AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
|
|
; X86-AVX2-NEXT: vmulps %ymm1, %ymm0, %ymm0
|
|
|
|
; X86-AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
|
|
|
|
; X86-AVX2-NEXT: vmovups %ymm0, (%eax)
|
|
|
|
;
|
|
|
|
; X86-AVX512-LABEL: PR39483:
|
|
|
|
; X86-AVX512: # %bb.0: # %entry
|
|
|
|
; X86-AVX512-NEXT: vmovups 0, %zmm0
|
|
|
|
; X86-AVX512-NEXT: vmovups 64, %ymm1
|
|
|
|
; X86-AVX512-NEXT: vmovaps {{.*#+}} zmm2 = <2,5,8,11,14,17,20,23,u,u,u,u,u,u,u,u>
|
|
|
|
; X86-AVX512-NEXT: vpermi2ps %zmm1, %zmm0, %zmm2
|
|
|
|
; X86-AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
|
|
|
|
; X86-AVX512-NEXT: vmulps %ymm0, %ymm2, %ymm1
|
|
|
|
; X86-AVX512-NEXT: vaddps %ymm0, %ymm1, %ymm0
|
|
|
|
; X86-AVX512-NEXT: vmovups %ymm0, (%eax)
|
2018-10-30 02:25:48 +08:00
|
|
|
;
|
|
|
|
; X64-AVX1-LABEL: PR39483:
|
|
|
|
; X64-AVX1: # %bb.0: # %entry
|
|
|
|
; X64-AVX1-NEXT: vmovups 32, %ymm0
|
[x86] allow vector load narrowing with multi-use values
This is a long-awaited follow-up suggested in D33578. Since then, we've picked up even more
opportunities for vector narrowing from changes like D53784, so there are a lot of test diffs.
Apart from 2-3 strange cases, these are all wins.
I've structured this to be no-functional-change-intended for any target except for x86
because I couldn't tell if AArch64, ARM, and AMDGPU would improve or not. All of those
targets have existing regression tests (4, 4, 10 files respectively) that would be
affected. Also, Hexagon overrides the shouldReduceLoadWidth() hook, but doesn't show
any regression test diffs. The trade-off is deciding if an extra vector load is better
than a single wide load + extract_subvector.
For x86, this is almost always better (on paper at least) because we often can fold
loads into subsequent ops and not increase the official instruction count. There's also
some unknown -- but potentially large -- benefit from using narrower vector ops if wide
ops are implemented with multiple uops and/or frequency throttling is avoided.
Differential Revision: https://reviews.llvm.org/D54073
llvm-svn: 346595
2018-11-11 04:05:31 +08:00
|
|
|
; X64-AVX1-NEXT: vmovups 64, %xmm1
|
|
|
|
; X64-AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1],mem[0,3]
|
2018-10-30 02:25:48 +08:00
|
|
|
; X64-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
|
|
|
|
; X64-AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
|
|
|
|
; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; X64-AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3]
|
|
|
|
; X64-AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
|
|
|
|
; X64-AVX1-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
|
|
|
|
; X64-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
|
|
; X64-AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
|
|
|
|
; X64-AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
|
|
; X64-AVX1-NEXT: vmulps %ymm1, %ymm0, %ymm0
|
|
|
|
; X64-AVX1-NEXT: vaddps %ymm1, %ymm0, %ymm0
|
|
|
|
; X64-AVX1-NEXT: vmovups %ymm0, (%rax)
|
|
|
|
;
|
|
|
|
; X64-AVX2-LABEL: PR39483:
|
|
|
|
; X64-AVX2: # %bb.0: # %entry
|
|
|
|
; X64-AVX2-NEXT: vmovups 32, %ymm0
|
|
|
|
; X64-AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
|
|
|
|
; X64-AVX2-NEXT: vmovaps {{.*#+}} ymm1 = <2,5,0,3,6,u,u,u>
|
|
|
|
; X64-AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
|
|
|
|
; X64-AVX2-NEXT: vpermilps {{.*#+}} ymm1 = mem[0,1,0,3,4,5,4,7]
|
|
|
|
; X64-AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
|
|
|
|
; X64-AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
|
|
|
|
; X64-AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
|
|
; X64-AVX2-NEXT: vmulps %ymm1, %ymm0, %ymm0
|
|
|
|
; X64-AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
|
|
|
|
; X64-AVX2-NEXT: vmovups %ymm0, (%rax)
|
|
|
|
;
|
|
|
|
; X64-AVX512-LABEL: PR39483:
|
|
|
|
; X64-AVX512: # %bb.0: # %entry
|
|
|
|
; X64-AVX512-NEXT: vmovups 0, %zmm0
|
|
|
|
; X64-AVX512-NEXT: vmovups 64, %ymm1
|
|
|
|
; X64-AVX512-NEXT: vmovaps {{.*#+}} zmm2 = <2,5,8,11,14,17,20,23,u,u,u,u,u,u,u,u>
|
|
|
|
; X64-AVX512-NEXT: vpermi2ps %zmm1, %zmm0, %zmm2
|
|
|
|
; X64-AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
|
|
|
|
; X64-AVX512-NEXT: vmulps %ymm0, %ymm2, %ymm1
|
|
|
|
; X64-AVX512-NEXT: vaddps %ymm0, %ymm1, %ymm0
|
|
|
|
; X64-AVX512-NEXT: vmovups %ymm0, (%rax)
|
|
|
|
entry:
|
|
|
|
%wide.vec = load <24 x float>, <24 x float>* null, align 4
|
|
|
|
%strided.vec18 = shufflevector <24 x float> %wide.vec, <24 x float> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
|
|
|
|
%0 = fmul <8 x float> %strided.vec18, zeroinitializer
|
|
|
|
%1 = fadd <8 x float> zeroinitializer, %0
|
|
|
|
store <8 x float> %1, <8 x float>* undef, align 16
|
|
|
|
unreachable
|
|
|
|
}
|