2016-10-15 02:20:41 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2017-07-12 16:01:44 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx | FileCheck %s --check-prefix=AVX1
|
2017-07-12 03:46:11 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
|
2017-07-25 02:29:56 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx512f -mattr=+avx512bw | FileCheck %s --check-prefix=AVX --check-prefix=AVX3
|
2016-10-15 02:20:41 +08:00
|
|
|
|
|
|
|
define <4 x double> @load_factorf64_4(<16 x double>* %ptr) {
|
2017-07-12 16:01:44 +08:00
|
|
|
; AVX1-LABEL: load_factorf64_4:
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vmovupd (%rdi), %ymm0
|
|
|
|
; AVX1-NEXT: vmovupd 32(%rdi), %ymm1
|
|
|
|
; AVX1-NEXT: vmovupd 64(%rdi), %ymm2
|
|
|
|
; AVX1-NEXT: vmovupd 96(%rdi), %ymm3
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
|
|
|
|
; AVX1-NEXT: vhaddpd %ymm5, %ymm4, %ymm4
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
|
|
|
|
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
|
|
|
|
; AVX1-NEXT: vaddpd %ymm2, %ymm4, %ymm2
|
|
|
|
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
|
|
|
|
; AVX1-NEXT: vaddpd %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2016-10-15 02:20:41 +08:00
|
|
|
; AVX-LABEL: load_factorf64_4:
|
|
|
|
; AVX: # BB#0:
|
|
|
|
; AVX-NEXT: vmovupd (%rdi), %ymm0
|
|
|
|
; AVX-NEXT: vmovupd 32(%rdi), %ymm1
|
|
|
|
; AVX-NEXT: vmovupd 64(%rdi), %ymm2
|
|
|
|
; AVX-NEXT: vmovupd 96(%rdi), %ymm3
|
2017-06-12 05:18:58 +08:00
|
|
|
; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
|
|
|
|
; AVX-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
|
Add LiveRangeShrink pass to shrink live range within BB.
Summary: LiveRangeShrink pass moves instruction right after the definition with the same BB if the instruction and its operands all have more than one use. This pass is inexpensive and guarantees optimal live-range within BB.
Reviewers: davidxl, wmi, hfinkel, MatzeB, andreadb
Reviewed By: MatzeB, andreadb
Subscribers: hiraditya, jyknight, sanjoy, skatkov, gberry, jholewinski, qcolombet, javed.absar, krytarowski, atrick, spatel, RKSimon, andreadb, MatzeB, mehdi_amini, mgorny, efriedma, davide, dberlin, llvm-commits
Differential Revision: https://reviews.llvm.org/D32563
llvm-svn: 304371
2017-06-01 07:25:25 +08:00
|
|
|
; AVX-NEXT: vhaddpd %ymm5, %ymm4, %ymm4
|
2016-10-15 02:20:41 +08:00
|
|
|
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
|
|
|
|
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
|
|
|
|
; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
|
Add LiveRangeShrink pass to shrink live range within BB.
Summary: LiveRangeShrink pass moves instruction right after the definition with the same BB if the instruction and its operands all have more than one use. This pass is inexpensive and guarantees optimal live-range within BB.
Reviewers: davidxl, wmi, hfinkel, MatzeB, andreadb
Reviewed By: MatzeB, andreadb
Subscribers: hiraditya, jyknight, sanjoy, skatkov, gberry, jholewinski, qcolombet, javed.absar, krytarowski, atrick, spatel, RKSimon, andreadb, MatzeB, mehdi_amini, mgorny, efriedma, davide, dberlin, llvm-commits
Differential Revision: https://reviews.llvm.org/D32563
llvm-svn: 304371
2017-06-01 07:25:25 +08:00
|
|
|
; AVX-NEXT: vaddpd %ymm2, %ymm4, %ymm2
|
2016-10-15 02:20:41 +08:00
|
|
|
; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
|
Add LiveRangeShrink pass to shrink live range within BB.
Summary: LiveRangeShrink pass moves instruction right after the definition with the same BB if the instruction and its operands all have more than one use. This pass is inexpensive and guarantees optimal live-range within BB.
Reviewers: davidxl, wmi, hfinkel, MatzeB, andreadb
Reviewed By: MatzeB, andreadb
Subscribers: hiraditya, jyknight, sanjoy, skatkov, gberry, jholewinski, qcolombet, javed.absar, krytarowski, atrick, spatel, RKSimon, andreadb, MatzeB, mehdi_amini, mgorny, efriedma, davide, dberlin, llvm-commits
Differential Revision: https://reviews.llvm.org/D32563
llvm-svn: 304371
2017-06-01 07:25:25 +08:00
|
|
|
; AVX-NEXT: vaddpd %ymm0, %ymm2, %ymm0
|
2016-10-15 02:20:41 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
%wide.vec = load <16 x double>, <16 x double>* %ptr, align 16
|
|
|
|
%strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
|
|
|
|
%strided.v1 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
|
|
|
|
%strided.v2 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
|
|
|
|
%strided.v3 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
|
|
|
|
%add1 = fadd <4 x double> %strided.v0, %strided.v1
|
|
|
|
%add2 = fadd <4 x double> %add1, %strided.v2
|
|
|
|
%add3 = fadd <4 x double> %add2, %strided.v3
|
|
|
|
ret <4 x double> %add3
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @load_factorf64_2(<16 x double>* %ptr) {
|
2017-07-12 16:01:44 +08:00
|
|
|
; AVX1-LABEL: load_factorf64_2:
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vmovupd (%rdi), %ymm0
|
|
|
|
; AVX1-NEXT: vmovupd 32(%rdi), %ymm1
|
|
|
|
; AVX1-NEXT: vmovupd 64(%rdi), %ymm2
|
|
|
|
; AVX1-NEXT: vmovupd 96(%rdi), %ymm3
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
|
|
|
|
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
|
|
|
|
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
|
|
|
|
; AVX1-NEXT: vmulpd %ymm0, %ymm4, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2016-10-15 02:20:41 +08:00
|
|
|
; AVX-LABEL: load_factorf64_2:
|
|
|
|
; AVX: # BB#0:
|
|
|
|
; AVX-NEXT: vmovupd (%rdi), %ymm0
|
|
|
|
; AVX-NEXT: vmovupd 32(%rdi), %ymm1
|
|
|
|
; AVX-NEXT: vmovupd 64(%rdi), %ymm2
|
|
|
|
; AVX-NEXT: vmovupd 96(%rdi), %ymm3
|
2017-06-12 05:18:58 +08:00
|
|
|
; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
|
|
|
|
; AVX-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
|
Add LiveRangeShrink pass to shrink live range within BB.
Summary: LiveRangeShrink pass moves instruction right after the definition with the same BB if the instruction and its operands all have more than one use. This pass is inexpensive and guarantees optimal live-range within BB.
Reviewers: davidxl, wmi, hfinkel, MatzeB, andreadb
Reviewed By: MatzeB, andreadb
Subscribers: hiraditya, jyknight, sanjoy, skatkov, gberry, jholewinski, qcolombet, javed.absar, krytarowski, atrick, spatel, RKSimon, andreadb, MatzeB, mehdi_amini, mgorny, efriedma, davide, dberlin, llvm-commits
Differential Revision: https://reviews.llvm.org/D32563
llvm-svn: 304371
2017-06-01 07:25:25 +08:00
|
|
|
; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
|
2016-10-15 02:20:41 +08:00
|
|
|
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
|
|
|
|
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
|
|
|
|
; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
|
Add LiveRangeShrink pass to shrink live range within BB.
Summary: LiveRangeShrink pass moves instruction right after the definition with the same BB if the instruction and its operands all have more than one use. This pass is inexpensive and guarantees optimal live-range within BB.
Reviewers: davidxl, wmi, hfinkel, MatzeB, andreadb
Reviewed By: MatzeB, andreadb
Subscribers: hiraditya, jyknight, sanjoy, skatkov, gberry, jholewinski, qcolombet, javed.absar, krytarowski, atrick, spatel, RKSimon, andreadb, MatzeB, mehdi_amini, mgorny, efriedma, davide, dberlin, llvm-commits
Differential Revision: https://reviews.llvm.org/D32563
llvm-svn: 304371
2017-06-01 07:25:25 +08:00
|
|
|
; AVX-NEXT: vmulpd %ymm0, %ymm4, %ymm0
|
2016-10-15 02:20:41 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
%wide.vec = load <16 x double>, <16 x double>* %ptr, align 16
|
|
|
|
%strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
|
|
|
|
%strided.v3 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
|
|
|
|
%mul = fmul <4 x double> %strided.v0, %strided.v3
|
|
|
|
ret <4 x double> %mul
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @load_factorf64_1(<16 x double>* %ptr) {
|
2017-07-12 16:01:44 +08:00
|
|
|
; AVX1-LABEL: load_factorf64_1:
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vmovupd (%rdi), %ymm0
|
|
|
|
; AVX1-NEXT: vmovupd 32(%rdi), %ymm1
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],mem[0,1]
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],mem[0,1]
|
|
|
|
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
|
|
|
|
; AVX1-NEXT: vmulpd %ymm0, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2017-06-12 05:18:58 +08:00
|
|
|
; AVX-LABEL: load_factorf64_1:
|
|
|
|
; AVX: # BB#0:
|
|
|
|
; AVX-NEXT: vmovupd (%rdi), %ymm0
|
|
|
|
; AVX-NEXT: vmovupd 32(%rdi), %ymm1
|
|
|
|
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],mem[0,1]
|
|
|
|
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],mem[0,1]
|
|
|
|
; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
|
|
|
|
; AVX-NEXT: vmulpd %ymm0, %ymm0, %ymm0
|
|
|
|
; AVX-NEXT: retq
|
2016-10-15 02:20:41 +08:00
|
|
|
%wide.vec = load <16 x double>, <16 x double>* %ptr, align 16
|
|
|
|
%strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
|
|
|
|
%strided.v3 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
|
|
|
|
%mul = fmul <4 x double> %strided.v0, %strided.v3
|
|
|
|
ret <4 x double> %mul
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @load_factori64_4(<16 x i64>* %ptr) {
|
|
|
|
; AVX1-LABEL: load_factori64_4:
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vmovupd (%rdi), %ymm0
|
|
|
|
; AVX1-NEXT: vmovupd 32(%rdi), %ymm1
|
|
|
|
; AVX1-NEXT: vmovupd 64(%rdi), %ymm2
|
|
|
|
; AVX1-NEXT: vmovupd 96(%rdi), %ymm3
|
2017-06-12 05:18:58 +08:00
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
|
2016-10-15 02:20:41 +08:00
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
|
|
|
|
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
|
|
|
|
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
|
|
|
|
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
|
|
|
|
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
|
|
|
|
; AVX1-NEXT: vpaddq %xmm3, %xmm4, %xmm4
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
|
|
|
|
; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
|
|
; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpaddq %xmm1, %xmm5, %xmm1
|
|
|
|
; AVX1-NEXT: vpaddq %xmm0, %xmm4, %xmm0
|
|
|
|
; AVX1-NEXT: vpaddq %xmm0, %xmm2, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2017-07-12 16:01:44 +08:00
|
|
|
; AVX-LABEL: load_factori64_4:
|
|
|
|
; AVX: # BB#0:
|
|
|
|
; AVX-NEXT: vmovdqu (%rdi), %ymm0
|
|
|
|
; AVX-NEXT: vmovdqu 32(%rdi), %ymm1
|
|
|
|
; AVX-NEXT: vmovdqu 64(%rdi), %ymm2
|
|
|
|
; AVX-NEXT: vmovdqu 96(%rdi), %ymm3
|
|
|
|
; AVX-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
|
|
|
|
; AVX-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
|
|
|
|
; AVX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
|
|
|
|
; AVX-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
|
|
|
|
; AVX-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
|
|
|
|
; AVX-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
|
|
|
|
; AVX-NEXT: vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
|
|
|
|
; AVX-NEXT: vpaddq %ymm3, %ymm4, %ymm3
|
|
|
|
; AVX-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
|
|
|
|
; AVX-NEXT: vpaddq %ymm0, %ymm3, %ymm0
|
|
|
|
; AVX-NEXT: vpaddq %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX-NEXT: retq
|
2016-10-15 02:20:41 +08:00
|
|
|
%wide.vec = load <16 x i64>, <16 x i64>* %ptr, align 16
|
|
|
|
%strided.v0 = shufflevector <16 x i64> %wide.vec, <16 x i64> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
|
|
|
|
%strided.v1 = shufflevector <16 x i64> %wide.vec, <16 x i64> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
|
|
|
|
%strided.v2 = shufflevector <16 x i64> %wide.vec, <16 x i64> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
|
|
|
|
%strided.v3 = shufflevector <16 x i64> %wide.vec, <16 x i64> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
|
|
|
|
%add1 = add <4 x i64> %strided.v0, %strided.v1
|
|
|
|
%add2 = add <4 x i64> %add1, %strided.v2
|
|
|
|
%add3 = add <4 x i64> %add2, %strided.v3
|
|
|
|
ret <4 x i64> %add3
|
|
|
|
}
|
2017-06-07 05:08:00 +08:00
|
|
|
|
|
|
|
define void @store_factorf64_4(<16 x double>* %ptr, <4 x double> %v0, <4 x double> %v1, <4 x double> %v2, <4 x double> %v3) {
|
2017-07-12 16:01:44 +08:00
|
|
|
; AVX1-LABEL: store_factorf64_4:
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
|
|
|
|
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
|
|
|
|
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
|
|
|
|
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
|
|
|
|
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
|
|
|
|
; AVX1-NEXT: vmovupd %ymm0, 96(%rdi)
|
|
|
|
; AVX1-NEXT: vmovupd %ymm3, 64(%rdi)
|
|
|
|
; AVX1-NEXT: vmovupd %ymm4, 32(%rdi)
|
|
|
|
; AVX1-NEXT: vmovupd %ymm2, (%rdi)
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2017-07-25 02:29:56 +08:00
|
|
|
; AVX2-LABEL: store_factorf64_4:
|
|
|
|
; AVX2: # BB#0:
|
|
|
|
; AVX2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
|
|
|
|
; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
|
|
|
|
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
|
|
|
|
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
|
|
|
|
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
|
|
|
|
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
|
|
|
|
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
|
|
|
|
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
|
|
|
|
; AVX2-NEXT: vmovupd %ymm0, 96(%rdi)
|
|
|
|
; AVX2-NEXT: vmovupd %ymm3, 64(%rdi)
|
|
|
|
; AVX2-NEXT: vmovupd %ymm4, 32(%rdi)
|
|
|
|
; AVX2-NEXT: vmovupd %ymm2, (%rdi)
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX3-LABEL: store_factorf64_4:
|
|
|
|
; AVX3: # BB#0:
|
|
|
|
; AVX3-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
|
|
|
|
; AVX3-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
|
|
|
|
; AVX3-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
|
|
|
|
; AVX3-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
|
|
|
|
; AVX3-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
|
|
|
|
; AVX3-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
|
|
|
|
; AVX3-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
|
|
|
|
; AVX3-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
|
|
|
|
; AVX3-NEXT: vinsertf64x4 $1, %ymm4, %zmm2, %zmm1
|
|
|
|
; AVX3-NEXT: vinsertf64x4 $1, %ymm0, %zmm3, %zmm0
|
|
|
|
; AVX3-NEXT: vmovupd %zmm0, 64(%rdi)
|
|
|
|
; AVX3-NEXT: vmovupd %zmm1, (%rdi)
|
|
|
|
; AVX3-NEXT: vzeroupper
|
|
|
|
; AVX3-NEXT: retq
|
2017-06-07 05:08:00 +08:00
|
|
|
%s0 = shufflevector <4 x double> %v0, <4 x double> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
|
|
%s1 = shufflevector <4 x double> %v2, <4 x double> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
|
|
%interleaved.vec = shufflevector <8 x double> %s0, <8 x double> %s1, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
|
|
|
|
store <16 x double> %interleaved.vec, <16 x double>* %ptr, align 16
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @store_factori64_4(<16 x i64>* %ptr, <4 x i64> %v0, <4 x i64> %v1, <4 x i64> %v2, <4 x i64> %v3) {
|
|
|
|
; AVX1-LABEL: store_factori64_4:
|
|
|
|
; AVX1: # BB#0:
|
2017-06-23 06:59:04 +08:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
|
|
|
|
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
|
|
|
|
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
|
|
|
|
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
|
2017-06-07 05:08:00 +08:00
|
|
|
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
|
|
|
|
; AVX1-NEXT: vmovupd %ymm0, 96(%rdi)
|
2017-06-23 06:59:04 +08:00
|
|
|
; AVX1-NEXT: vmovupd %ymm3, 64(%rdi)
|
|
|
|
; AVX1-NEXT: vmovupd %ymm4, 32(%rdi)
|
|
|
|
; AVX1-NEXT: vmovupd %ymm2, (%rdi)
|
2017-06-07 05:08:00 +08:00
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2017-07-25 02:29:56 +08:00
|
|
|
; AVX2-LABEL: store_factori64_4:
|
|
|
|
; AVX2: # BB#0:
|
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm4
|
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm5
|
|
|
|
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
|
|
|
|
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
|
|
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
|
|
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
|
|
|
|
; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
|
|
|
|
; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, 96(%rdi)
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm3, 64(%rdi)
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm4, 32(%rdi)
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm2, (%rdi)
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX3-LABEL: store_factori64_4:
|
|
|
|
; AVX3: # BB#0:
|
|
|
|
; AVX3-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm4
|
|
|
|
; AVX3-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm5
|
|
|
|
; AVX3-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
|
|
|
|
; AVX3-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
|
|
|
|
; AVX3-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
|
|
|
|
; AVX3-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
|
|
|
|
; AVX3-NEXT: vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
|
|
|
|
; AVX3-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
|
|
|
|
; AVX3-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm1
|
|
|
|
; AVX3-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
|
|
|
|
; AVX3-NEXT: vmovdqu64 %zmm0, 64(%rdi)
|
|
|
|
; AVX3-NEXT: vmovdqu64 %zmm1, (%rdi)
|
|
|
|
; AVX3-NEXT: vzeroupper
|
|
|
|
; AVX3-NEXT: retq
|
2017-06-07 05:08:00 +08:00
|
|
|
%s0 = shufflevector <4 x i64> %v0, <4 x i64> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
|
|
%s1 = shufflevector <4 x i64> %v2, <4 x i64> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
|
|
%interleaved.vec = shufflevector <8 x i64> %s0, <8 x i64> %s1, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
|
|
|
|
store <16 x i64> %interleaved.vec, <16 x i64>* %ptr, align 16
|
|
|
|
ret void
|
|
|
|
}
|
2017-06-26 21:27:32 +08:00
|
|
|
|
|
|
|
|
|
|
|
define void @interleaved_store_vf32_i8_stride4(<32 x i8> %x1, <32 x i8> %x2, <32 x i8> %x3, <32 x i8> %x4, <128 x i8>* %p) {
|
2017-06-28 21:42:45 +08:00
|
|
|
; AVX1-LABEL: interleaved_store_vf32_i8_stride4:
|
|
|
|
; AVX1: # BB#0:
|
[X86][LLVM]Expanding Supports lowerInterleavedStore() in X86InterleavedAccess.
This patch expands the support of lowerInterleavedStore to 32x8i stride 4.
LLVM creates suboptimal shuffle code-gen for AVX2. In overall, this patch is a specific fix for the pattern (Strid=4 VF=32) and we plan to include more patterns in the future. To reach our goal of "more patterns". We include two mask creators. The first function creates shuffle's mask equivalent to unpacklo/unpackhi instructions. The other creator creates mask equivalent to a concat of two half vectors(high/low).
The patch goal is to optimize the following sequence:
At the end of the computation, we have ymm2, ymm0, ymm12 and ymm3 holding
each 32 chars:
c0, c1, , c31
m0, m1, , m31
y0, y1, , y31
k0, k1, ., k31
And these need to be transposed/interleaved and stored like so:
c0 m0 y0 k0 c1 m1 y1 k1 c2 m2 y2 k2 c3 m3 y3 k3 ....
Reviewers:
dorit
Farhana
RKSimon
guyblank
DavidKreitzer
Differential Revision: https://reviews.llvm.org/D34601
llvm-svn: 309086
2017-07-26 16:10:14 +08:00
|
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
|
|
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
|
|
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm8 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
|
|
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15]
|
2017-06-28 21:42:45 +08:00
|
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
|
[X86][LLVM]Expanding Supports lowerInterleavedStore() in X86InterleavedAccess.
This patch expands the support of lowerInterleavedStore to 32x8i stride 4.
LLVM creates suboptimal shuffle code-gen for AVX2. In overall, this patch is a specific fix for the pattern (Strid=4 VF=32) and we plan to include more patterns in the future. To reach our goal of "more patterns". We include two mask creators. The first function creates shuffle's mask equivalent to unpacklo/unpackhi instructions. The other creator creates mask equivalent to a concat of two half vectors(high/low).
The patch goal is to optimize the following sequence:
At the end of the computation, we have ymm2, ymm0, ymm12 and ymm3 holding
each 32 chars:
c0, c1, , c31
m0, m1, , m31
y0, y1, , y31
k0, k1, ., k31
And these need to be transposed/interleaved and stored like so:
c0 m0 y0 k0 c1 m1 y1 k1 c2 m2 y2 k2 c3 m3 y3 k3 ....
Reviewers:
dorit
Farhana
RKSimon
guyblank
DavidKreitzer
Differential Revision: https://reviews.llvm.org/D34601
llvm-svn: 309086
2017-07-26 16:10:14 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm6
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm0
|
|
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
|
2017-06-28 21:42:45 +08:00
|
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
|
[X86][LLVM]Expanding Supports lowerInterleavedStore() in X86InterleavedAccess.
This patch expands the support of lowerInterleavedStore to 32x8i stride 4.
LLVM creates suboptimal shuffle code-gen for AVX2. In overall, this patch is a specific fix for the pattern (Strid=4 VF=32) and we plan to include more patterns in the future. To reach our goal of "more patterns". We include two mask creators. The first function creates shuffle's mask equivalent to unpacklo/unpackhi instructions. The other creator creates mask equivalent to a concat of two half vectors(high/low).
The patch goal is to optimize the following sequence:
At the end of the computation, we have ymm2, ymm0, ymm12 and ymm3 holding
each 32 chars:
c0, c1, , c31
m0, m1, , m31
y0, y1, , y31
k0, k1, ., k31
And these need to be transposed/interleaved and stored like so:
c0 m0 y0 k0 c1 m1 y1 k1 c2 m2 y2 k2 c3 m3 y3 k3 ....
Reviewers:
dorit
Farhana
RKSimon
guyblank
DavidKreitzer
Differential Revision: https://reviews.llvm.org/D34601
llvm-svn: 309086
2017-07-26 16:10:14 +08:00
|
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm6[8],xmm0[9],xmm6[9],xmm0[10],xmm6[10],xmm0[11],xmm6[11],xmm0[12],xmm6[12],xmm0[13],xmm6[13],xmm0[14],xmm6[14],xmm0[15],xmm6[15]
|
|
|
|
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
|
|
|
|
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm9[4],xmm5[4],xmm9[5],xmm5[5],xmm9[6],xmm5[6],xmm9[7],xmm5[7]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm6, %ymm10
|
|
|
|
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
|
|
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm11 = xmm8[4],xmm2[4],xmm8[5],xmm2[5],xmm8[6],xmm2[6],xmm8[7],xmm2[7]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm11, %ymm3
|
|
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
|
|
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm9[0],xmm5[0],xmm9[1],xmm5[1],xmm9[2],xmm5[2],xmm9[3],xmm5[3]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4
|
|
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm2
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3],ymm10[2,3]
|
|
|
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm3[2,3]
|
2017-06-28 21:42:45 +08:00
|
|
|
; AVX1-NEXT: vmovaps %ymm0, 96(%rdi)
|
[X86][LLVM]Expanding Supports lowerInterleavedStore() in X86InterleavedAccess.
This patch expands the support of lowerInterleavedStore to 32x8i stride 4.
LLVM creates suboptimal shuffle code-gen for AVX2. In overall, this patch is a specific fix for the pattern (Strid=4 VF=32) and we plan to include more patterns in the future. To reach our goal of "more patterns". We include two mask creators. The first function creates shuffle's mask equivalent to unpacklo/unpackhi instructions. The other creator creates mask equivalent to a concat of two half vectors(high/low).
The patch goal is to optimize the following sequence:
At the end of the computation, we have ymm2, ymm0, ymm12 and ymm3 holding
each 32 chars:
c0, c1, , c31
m0, m1, , m31
y0, y1, , y31
k0, k1, ., k31
And these need to be transposed/interleaved and stored like so:
c0 m0 y0 k0 c1 m1 y1 k1 c2 m2 y2 k2 c3 m3 y3 k3 ....
Reviewers:
dorit
Farhana
RKSimon
guyblank
DavidKreitzer
Differential Revision: https://reviews.llvm.org/D34601
llvm-svn: 309086
2017-07-26 16:10:14 +08:00
|
|
|
; AVX1-NEXT: vmovaps %ymm4, 64(%rdi)
|
|
|
|
; AVX1-NEXT: vmovaps %ymm2, 32(%rdi)
|
|
|
|
; AVX1-NEXT: vmovaps %ymm1, (%rdi)
|
2017-06-28 21:42:45 +08:00
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2017-07-25 02:29:56 +08:00
|
|
|
; AVX2-LABEL: interleaved_store_vf32_i8_stride4:
|
|
|
|
; AVX2: # BB#0:
|
[X86][LLVM]Expanding Supports lowerInterleavedStore() in X86InterleavedAccess.
This patch expands the support of lowerInterleavedStore to 32x8i stride 4.
LLVM creates suboptimal shuffle code-gen for AVX2. In overall, this patch is a specific fix for the pattern (Strid=4 VF=32) and we plan to include more patterns in the future. To reach our goal of "more patterns". We include two mask creators. The first function creates shuffle's mask equivalent to unpacklo/unpackhi instructions. The other creator creates mask equivalent to a concat of two half vectors(high/low).
The patch goal is to optimize the following sequence:
At the end of the computation, we have ymm2, ymm0, ymm12 and ymm3 holding
each 32 chars:
c0, c1, , c31
m0, m1, , m31
y0, y1, , y31
k0, k1, ., k31
And these need to be transposed/interleaved and stored like so:
c0 m0 y0 k0 c1 m1 y1 k1 c2 m2 y2 k2 c3 m3 y3 k3 ....
Reviewers:
dorit
Farhana
RKSimon
guyblank
DavidKreitzer
Differential Revision: https://reviews.llvm.org/D34601
llvm-svn: 309086
2017-07-26 16:10:14 +08:00
|
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
|
|
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
|
|
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
|
|
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
|
|
|
|
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm4[4],ymm1[4],ymm4[5],ymm1[5],ymm4[6],ymm1[6],ymm4[7],ymm1[7],ymm4[12],ymm1[12],ymm4[13],ymm1[13],ymm4[14],ymm1[14],ymm4[15],ymm1[15]
|
|
|
|
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15]
|
|
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[8],ymm1[8],ymm4[9],ymm1[9],ymm4[10],ymm1[10],ymm4[11],ymm1[11]
|
|
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11]
|
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm2
|
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm4
|
|
|
|
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
|
|
|
|
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm5[2,3]
|
2017-07-25 02:29:56 +08:00
|
|
|
; AVX2-NEXT: vmovdqa %ymm0, 96(%rdi)
|
[X86][LLVM]Expanding Supports lowerInterleavedStore() in X86InterleavedAccess.
This patch expands the support of lowerInterleavedStore to 32x8i stride 4.
LLVM creates suboptimal shuffle code-gen for AVX2. In overall, this patch is a specific fix for the pattern (Strid=4 VF=32) and we plan to include more patterns in the future. To reach our goal of "more patterns". We include two mask creators. The first function creates shuffle's mask equivalent to unpacklo/unpackhi instructions. The other creator creates mask equivalent to a concat of two half vectors(high/low).
The patch goal is to optimize the following sequence:
At the end of the computation, we have ymm2, ymm0, ymm12 and ymm3 holding
each 32 chars:
c0, c1, , c31
m0, m1, , m31
y0, y1, , y31
k0, k1, ., k31
And these need to be transposed/interleaved and stored like so:
c0 m0 y0 k0 c1 m1 y1 k1 c2 m2 y2 k2 c3 m3 y3 k3 ....
Reviewers:
dorit
Farhana
RKSimon
guyblank
DavidKreitzer
Differential Revision: https://reviews.llvm.org/D34601
llvm-svn: 309086
2017-07-26 16:10:14 +08:00
|
|
|
; AVX2-NEXT: vmovdqa %ymm1, 64(%rdi)
|
|
|
|
; AVX2-NEXT: vmovdqa %ymm4, 32(%rdi)
|
|
|
|
; AVX2-NEXT: vmovdqa %ymm2, (%rdi)
|
2017-07-25 02:29:56 +08:00
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX3-LABEL: interleaved_store_vf32_i8_stride4:
|
|
|
|
; AVX3: # BB#0:
|
[X86][LLVM]Expanding Supports lowerInterleavedStore() in X86InterleavedAccess.
This patch expands the support of lowerInterleavedStore to 32x8i stride 4.
LLVM creates suboptimal shuffle code-gen for AVX2. In overall, this patch is a specific fix for the pattern (Strid=4 VF=32) and we plan to include more patterns in the future. To reach our goal of "more patterns". We include two mask creators. The first function creates shuffle's mask equivalent to unpacklo/unpackhi instructions. The other creator creates mask equivalent to a concat of two half vectors(high/low).
The patch goal is to optimize the following sequence:
At the end of the computation, we have ymm2, ymm0, ymm12 and ymm3 holding
each 32 chars:
c0, c1, , c31
m0, m1, , m31
y0, y1, , y31
k0, k1, ., k31
And these need to be transposed/interleaved and stored like so:
c0 m0 y0 k0 c1 m1 y1 k1 c2 m2 y2 k2 c3 m3 y3 k3 ....
Reviewers:
dorit
Farhana
RKSimon
guyblank
DavidKreitzer
Differential Revision: https://reviews.llvm.org/D34601
llvm-svn: 309086
2017-07-26 16:10:14 +08:00
|
|
|
; AVX3-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
|
|
|
|
; AVX3-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
|
|
|
|
; AVX3-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
|
|
|
|
; AVX3-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
|
|
|
|
; AVX3-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm4[4],ymm1[4],ymm4[5],ymm1[5],ymm4[6],ymm1[6],ymm4[7],ymm1[7],ymm4[12],ymm1[12],ymm4[13],ymm1[13],ymm4[14],ymm1[14],ymm4[15],ymm1[15]
|
|
|
|
; AVX3-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15]
|
|
|
|
; AVX3-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[8],ymm1[8],ymm4[9],ymm1[9],ymm4[10],ymm1[10],ymm4[11],ymm1[11]
|
|
|
|
; AVX3-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11]
|
|
|
|
; AVX3-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm2
|
|
|
|
; AVX3-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm4
|
|
|
|
; AVX3-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
|
|
|
|
; AVX3-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm5[2,3]
|
|
|
|
; AVX3-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm2
|
|
|
|
; AVX3-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
|
2017-07-25 02:29:56 +08:00
|
|
|
; AVX3-NEXT: vmovdqu8 %zmm0, 64(%rdi)
|
[X86][LLVM]Expanding Supports lowerInterleavedStore() in X86InterleavedAccess.
This patch expands the support of lowerInterleavedStore to 32x8i stride 4.
LLVM creates suboptimal shuffle code-gen for AVX2. In overall, this patch is a specific fix for the pattern (Strid=4 VF=32) and we plan to include more patterns in the future. To reach our goal of "more patterns". We include two mask creators. The first function creates shuffle's mask equivalent to unpacklo/unpackhi instructions. The other creator creates mask equivalent to a concat of two half vectors(high/low).
The patch goal is to optimize the following sequence:
At the end of the computation, we have ymm2, ymm0, ymm12 and ymm3 holding
each 32 chars:
c0, c1, , c31
m0, m1, , m31
y0, y1, , y31
k0, k1, ., k31
And these need to be transposed/interleaved and stored like so:
c0 m0 y0 k0 c1 m1 y1 k1 c2 m2 y2 k2 c3 m3 y3 k3 ....
Reviewers:
dorit
Farhana
RKSimon
guyblank
DavidKreitzer
Differential Revision: https://reviews.llvm.org/D34601
llvm-svn: 309086
2017-07-26 16:10:14 +08:00
|
|
|
; AVX3-NEXT: vmovdqu8 %zmm2, (%rdi)
|
2017-07-25 02:29:56 +08:00
|
|
|
; AVX3-NEXT: vzeroupper
|
|
|
|
; AVX3-NEXT: retq
|
2017-06-26 21:27:32 +08:00
|
|
|
%v1 = shufflevector <32 x i8> %x1, <32 x i8> %x2, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
|
|
|
|
%v2 = shufflevector <32 x i8> %x3, <32 x i8> %x4, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
|
|
|
|
%interleaved.vec = shufflevector <64 x i8> %v1, <64 x i8> %v2, <128 x i32> <i32 0, i32 32, i32 64, i32 96, i32 1, i32 33, i32 65, i32 97, i32 2, i32 34, i32 66, i32 98, i32 3, i32 35, i32 67, i32 99, i32 4, i32 36, i32 68, i32 100, i32 5, i32 37, i32 69, i32 101, i32 6, i32 38, i32 70, i32 102, i32 7, i32 39, i32 71, i32 103, i32 8, i32 40, i32 72, i32 104, i32 9, i32 41, i32 73, i32 105, i32 10, i32 42, i32 74, i32 106, i32 11, i32 43, i32 75, i32 107, i32 12, i32 44, i32 76, i32 108, i32 13, i32 45, i32 77, i32 109, i32 14, i32 46, i32 78, i32 110, i32 15, i32 47, i32 79, i32 111, i32 16, i32 48, i32 80, i32 112, i32 17, i32 49, i32 81, i32 113, i32 18, i32 50, i32 82, i32 114, i32 19, i32 51, i32 83, i32 115, i32 20, i32 52, i32 84, i32 116, i32 21, i32 53, i32 85, i32 117, i32 22, i32 54, i32 86, i32 118, i32 23, i32 55, i32 87, i32 119, i32 24, i32 56, i32 88, i32 120, i32 25, i32 57, i32 89, i32 121, i32 26, i32 58, i32 90, i32 122, i32 27, i32 59, i32 91, i32 123, i32 28, i32 60, i32 92, i32 124, i32 29, i32 61, i32 93, i32 125, i32 30, i32 62, i32 94, i32 126, i32 31, i32 63, i32 95, i32 127>
|
|
|
|
store <128 x i8> %interleaved.vec, <128 x i8>* %p
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-07-25 02:29:56 +08:00
|
|
|
define void @interleaved_store_vf16_i8_stride4(<16 x i8> %x1, <16 x i8> %x2, <16 x i8> %x3, <16 x i8> %x4, <64 x i8>* %p) {
|
|
|
|
; AVX1-LABEL: interleaved_store_vf16_i8_stride4:
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
|
|
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
|
|
|
|
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
|
|
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4
|
|
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
|
|
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
|
|
|
|
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
|
|
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovaps %ymm0, 32(%rdi)
|
|
|
|
; AVX1-NEXT: vmovaps %ymm4, (%rdi)
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: interleaved_store_vf16_i8_stride4:
|
|
|
|
; AVX2: # BB#0:
|
|
|
|
; AVX2-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<def>
|
|
|
|
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
|
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm1
|
|
|
|
; AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,0,u,u,u,1,u,u,u,2,u,u,u,3,u,u,u,u,20,u,u,u,21,u,u,u,22,u,u,u,23]
|
|
|
|
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3,0,1]
|
|
|
|
; AVX2-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[u,u,u,0,u,u,u,1,u,u,u,2,u,u,u,3,u,u,20,u,u,u,21,u,u,u,22,u,u,u,23,u]
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = <u,u,255,0,u,u,255,0,u,u,255,0,u,u,255,0,u,u,0,255,u,u,0,255,u,u,0,255,u,u,0,255>
|
|
|
|
; AVX2-NEXT: vpblendvb %ymm5, %ymm2, %ymm4, %ymm2
|
|
|
|
; AVX2-NEXT: vpshufb {{.*#+}} ymm4 = ymm0[0,u,u,u,1,u,u,u,2,u,u,u,3,u,u,u,u,20,u,u,u,21,u,u,u,22,u,u,u,23,u,u]
|
|
|
|
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm0[2,3,0,1]
|
|
|
|
; AVX2-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[u,0,u,u,u,1,u,u,u,2,u,u,u,3,u,u,20,u,u,u,21,u,u,u,22,u,u,u,23,u,u,u]
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} ymm8 = <255,0,u,u,255,0,u,u,255,0,u,u,255,0,u,u,0,255,u,u,0,255,u,u,0,255,u,u,0,255,u,u>
|
|
|
|
; AVX2-NEXT: vpblendvb %ymm8, %ymm4, %ymm7, %ymm4
|
|
|
|
; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4],ymm2[5],ymm4[6],ymm2[7],ymm4[8],ymm2[9],ymm4[10],ymm2[11],ymm4[12],ymm2[13],ymm4[14],ymm2[15]
|
|
|
|
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,8,u,u,u,9,u,u,u,10,u,u,u,11,u,u,u,u,28,u,u,u,29,u,u,u,30,u,u,u,31]
|
|
|
|
; AVX2-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,8,u,u,u,9,u,u,u,10,u,u,u,11,u,u,28,u,u,u,29,u,u,u,30,u,u,u,31,u]
|
|
|
|
; AVX2-NEXT: vpblendvb %ymm5, %ymm1, %ymm3, %ymm1
|
|
|
|
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,u,u,u,9,u,u,u,10,u,u,u,11,u,u,u,u,28,u,u,u,29,u,u,u,30,u,u,u,31,u,u]
|
|
|
|
; AVX2-NEXT: vpshufb {{.*#+}} ymm3 = ymm6[u,8,u,u,u,9,u,u,u,10,u,u,u,11,u,u,28,u,u,u,29,u,u,u,30,u,u,u,31,u,u,u]
|
|
|
|
; AVX2-NEXT: vpblendvb %ymm8, %ymm0, %ymm3, %ymm0
|
|
|
|
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
|
|
|
|
; AVX2-NEXT: vmovdqa %ymm0, 32(%rdi)
|
|
|
|
; AVX2-NEXT: vmovdqa %ymm2, (%rdi)
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX3-LABEL: interleaved_store_vf16_i8_stride4:
|
|
|
|
; AVX3: # BB#0:
|
|
|
|
; AVX3-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<def>
|
|
|
|
; AVX3-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
|
|
|
|
; AVX3-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX3-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm1
|
|
|
|
; AVX3-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,8,u,u,u,9,u,u,u,10,u,u,u,11,u,u,u,u,28,u,u,u,29,u,u,u,30,u,u,u,31]
|
|
|
|
; AVX3-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3,0,1]
|
|
|
|
; AVX3-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[u,u,u,8,u,u,u,9,u,u,u,10,u,u,u,11,u,u,28,u,u,u,29,u,u,u,30,u,u,u,31,u]
|
|
|
|
; AVX3-NEXT: vmovdqa {{.*#+}} ymm5 = <u,u,255,0,u,u,255,0,u,u,255,0,u,u,255,0,u,u,0,255,u,u,0,255,u,u,0,255,u,u,0,255>
|
|
|
|
; AVX3-NEXT: vpblendvb %ymm5, %ymm2, %ymm4, %ymm2
|
|
|
|
; AVX3-NEXT: vpshufb {{.*#+}} ymm4 = ymm0[8,u,u,u,9,u,u,u,10,u,u,u,11,u,u,u,u,28,u,u,u,29,u,u,u,30,u,u,u,31,u,u]
|
|
|
|
; AVX3-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm0[2,3,0,1]
|
|
|
|
; AVX3-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[u,8,u,u,u,9,u,u,u,10,u,u,u,11,u,u,28,u,u,u,29,u,u,u,30,u,u,u,31,u,u,u]
|
|
|
|
; AVX3-NEXT: vmovdqa {{.*#+}} ymm8 = <255,0,u,u,255,0,u,u,255,0,u,u,255,0,u,u,0,255,u,u,0,255,u,u,0,255,u,u,0,255,u,u>
|
|
|
|
; AVX3-NEXT: vpblendvb %ymm8, %ymm4, %ymm7, %ymm4
|
|
|
|
; AVX3-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4],ymm2[5],ymm4[6],ymm2[7],ymm4[8],ymm2[9],ymm4[10],ymm2[11],ymm4[12],ymm2[13],ymm4[14],ymm2[15]
|
|
|
|
; AVX3-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,0,u,u,u,1,u,u,u,2,u,u,u,3,u,u,u,u,20,u,u,u,21,u,u,u,22,u,u,u,23]
|
|
|
|
; AVX3-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,0,u,u,u,1,u,u,u,2,u,u,u,3,u,u,20,u,u,u,21,u,u,u,22,u,u,u,23,u]
|
|
|
|
; AVX3-NEXT: vpblendvb %ymm5, %ymm1, %ymm3, %ymm1
|
|
|
|
; AVX3-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,u,u,u,1,u,u,u,2,u,u,u,3,u,u,u,u,20,u,u,u,21,u,u,u,22,u,u,u,23,u,u]
|
|
|
|
; AVX3-NEXT: vpshufb {{.*#+}} ymm3 = ymm6[u,0,u,u,u,1,u,u,u,2,u,u,u,3,u,u,20,u,u,u,21,u,u,u,22,u,u,u,23,u,u,u]
|
|
|
|
; AVX3-NEXT: vpblendvb %ymm8, %ymm0, %ymm3, %ymm0
|
|
|
|
; AVX3-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
|
|
|
|
; AVX3-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
|
|
|
|
; AVX3-NEXT: vmovdqu8 %zmm0, (%rdi)
|
|
|
|
; AVX3-NEXT: vzeroupper
|
|
|
|
; AVX3-NEXT: retq
|
|
|
|
%v1 = shufflevector <16 x i8> %x1, <16 x i8> %x2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
|
|
|
|
%v2 = shufflevector <16 x i8> %x3, <16 x i8> %x4, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
|
|
|
|
%interleaved.vec = shufflevector <32 x i8> %v1, <32 x i8> %v2, <64 x i32> <i32 0,i32 16,i32 32,i32 48,i32 1,i32 17,i32 33,i32 49,i32 2,i32 18,i32 34,i32 50,i32 3,i32 19,i32 35,i32 51,i32 4,i32 20,i32 36,i32 52,i32 5,i32 21,i32 37,i32 53,i32 6,i32 22,i32 38,i32 54,i32 7,i32 23,i32 39,i32 55,i32 8,i32 24,i32 40,i32 56,i32 9,i32 25,i32 41,i32 57,i32 10,i32 26,i32 42,i32 58,i32 11,i32 27,i32 43,i32 59,i32 12,i32 28,i32 44,i32 60,i32 13,i32 29,i32 45,i32 61,i32 14,i32 30,i32 46,i32 62,i32 15,i32 31,i32 47,i32 63>
|
|
|
|
store <64 x i8> %interleaved.vec, <64 x i8>* %p
|
|
|
|
ret void
|
|
|
|
}
|
2017-07-29 06:43:34 +08:00
|
|
|
|
|
|
|
define <8 x i8> @interleaved_load_vf8_i8_stride4(<32 x i8>* %ptr) {
|
|
|
|
; AVX1-LABEL: interleaved_load_vf8_i8_stride4:
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vmovdqu (%rdi), %ymm0
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
|
|
|
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm3
|
|
|
|
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm2
|
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,5,5,9,9,13,13,13,13,5,5,12,12,13,13]
|
|
|
|
; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm4
|
|
|
|
; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm3
|
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
|
|
|
|
; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [6,7,2,3,14,15,10,11,14,15,10,11,12,13,14,15]
|
|
|
|
; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpshuflw {{.*#+}} xmm4 = xmm1[1,0,3,2,4,5,6,7]
|
|
|
|
; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpshuflw {{.*#+}} xmm3 = xmm0[1,0,3,2,4,5,6,7]
|
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [3,3,1,1,7,7,5,5,1,1,5,5,0,0,1,1]
|
|
|
|
; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufb %xmm4, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; AVX1-NEXT: vpaddw %xmm3, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpmullw %xmm0, %xmm2, %xmm0
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: interleaved_load_vf8_i8_stride4:
|
|
|
|
; AVX2: # BB#0:
|
|
|
|
; AVX2-NEXT: vmovdqu (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
|
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
|
|
|
|
; AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm0[1,u,5,u,9,u,13,u,u,u,u,u,u,u,u,u,17,u,21,u,25,u,29,u,u,u,u,u,u,u,u,u]
|
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
|
|
|
|
; AVX2-NEXT: vpshufb {{.*#+}} ymm3 = ymm0[2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,30,31,26,27,28,29,30,31]
|
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
|
|
|
|
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[3,u,7,u,11,u,15,u,u,u,u,u,u,u,u,u,19,u,23,u,27,u,31,u,u,u,u,u,u,u,u,u]
|
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
|
|
|
|
; AVX2-NEXT: vpaddw %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: vpaddw %xmm3, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpmullw %xmm0, %xmm1, %xmm0
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX3-LABEL: interleaved_load_vf8_i8_stride4:
|
|
|
|
; AVX3: # BB#0:
|
|
|
|
; AVX3-NEXT: vmovdqu (%rdi), %ymm0
|
|
|
|
; AVX3-NEXT: vpmovdw %zmm0, %ymm1
|
|
|
|
; AVX3-NEXT: vpsrlw $8, %ymm0, %ymm2
|
|
|
|
; AVX3-NEXT: vpmovdw %zmm2, %ymm2
|
|
|
|
; AVX3-NEXT: vpsrld $16, %ymm0, %ymm3
|
|
|
|
; AVX3-NEXT: vpmovdw %zmm3, %ymm3
|
|
|
|
; AVX3-NEXT: vpsrld $24, %ymm0, %ymm0
|
|
|
|
; AVX3-NEXT: vpmovdw %zmm0, %ymm0
|
|
|
|
; AVX3-NEXT: vpaddw %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX3-NEXT: vpaddw %xmm3, %xmm0, %xmm0
|
|
|
|
; AVX3-NEXT: vpmullw %xmm0, %xmm1, %xmm0
|
|
|
|
; AVX3-NEXT: vzeroupper
|
|
|
|
; AVX3-NEXT: retq
|
|
|
|
%wide.vec = load <32 x i8>, <32 x i8>* %ptr, align 16
|
|
|
|
%v1 = shufflevector <32 x i8> %wide.vec, <32 x i8> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
|
|
|
|
%v2 = shufflevector <32 x i8> %wide.vec, <32 x i8> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
|
|
|
|
%v3 = shufflevector <32 x i8> %wide.vec, <32 x i8> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
|
|
|
|
%v4 = shufflevector <32 x i8> %wide.vec, <32 x i8> undef, <8 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31>
|
|
|
|
|
|
|
|
%add1 = add <8 x i8> %v1, %v2
|
|
|
|
%add2 = add <8 x i8> %v4, %v3
|
|
|
|
%add3 = mul <8 x i8> %add1, %add2
|
|
|
|
ret <8 x i8> %add3
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i1> @interleaved_load_vf16_i8_stride4(<64 x i8>* %ptr) {
|
|
|
|
; AVX1-LABEL: interleaved_load_vf16_i8_stride4:
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
|
|
|
|
; AVX1-NEXT: vmovdqa 32(%rdi), %ymm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
|
|
|
|
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
|
|
|
|
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm5
|
|
|
|
; AVX1-NEXT: vpackuswb %xmm4, %xmm5, %xmm4
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
|
|
|
|
; AVX1-NEXT: vpand %xmm3, %xmm5, %xmm6
|
|
|
|
; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm3
|
|
|
|
; AVX1-NEXT: vpackuswb %xmm6, %xmm3, %xmm3
|
|
|
|
; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX1-NEXT: vpshufb %xmm4, %xmm2, %xmm6
|
|
|
|
; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm4
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX1-NEXT: vpshufb %xmm6, %xmm5, %xmm7
|
|
|
|
; AVX1-NEXT: vpshufb %xmm6, %xmm0, %xmm6
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
|
|
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4,5,6,7]
|
|
|
|
; AVX1-NEXT: vpcmpeqb %xmm4, %xmm3, %xmm3
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = <u,u,u,u,2,6,10,14,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX1-NEXT: vpshufb %xmm4, %xmm2, %xmm6
|
|
|
|
; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm4
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX1-NEXT: vpshufb %xmm6, %xmm5, %xmm7
|
|
|
|
; AVX1-NEXT: vpshufb %xmm6, %xmm0, %xmm6
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
|
|
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4,5,6,7]
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = <u,u,u,u,3,7,11,15,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX1-NEXT: vpshufb %xmm6, %xmm2, %xmm2
|
|
|
|
; AVX1-NEXT: vpshufb %xmm6, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX1-NEXT: vpshufb %xmm2, %xmm5, %xmm5
|
|
|
|
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
|
|
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
|
|
|
|
; AVX1-NEXT: vpcmpeqb %xmm0, %xmm4, %xmm0
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
|
|
|
; AVX1-NEXT: vpand %xmm1, %xmm3, %xmm2
|
|
|
|
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpcmpeqb %xmm0, %xmm2, %xmm0
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: interleaved_load_vf16_i8_stride4:
|
|
|
|
; AVX2: # BB#0:
|
|
|
|
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
|
|
|
|
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm3
|
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm3
|
|
|
|
; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm2
|
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
|
|
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2
|
|
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm5
|
|
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm4
|
|
|
|
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX2-NEXT: vpshufb %xmm6, %xmm5, %xmm7
|
|
|
|
; AVX2-NEXT: vpshufb %xmm6, %xmm0, %xmm6
|
|
|
|
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
|
|
|
|
; AVX2-NEXT: vpblendd {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
|
|
|
|
; AVX2-NEXT: vpcmpeqb %xmm4, %xmm2, %xmm2
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <u,u,u,u,2,6,10,14,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm6
|
|
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm4
|
|
|
|
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX2-NEXT: vpshufb %xmm6, %xmm5, %xmm7
|
|
|
|
; AVX2-NEXT: vpshufb %xmm6, %xmm0, %xmm6
|
|
|
|
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
|
|
|
|
; AVX2-NEXT: vpblendd {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <u,u,u,u,3,7,11,15,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX2-NEXT: vpshufb %xmm6, %xmm3, %xmm3
|
|
|
|
; AVX2-NEXT: vpshufb %xmm6, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX2-NEXT: vpshufb %xmm3, %xmm5, %xmm5
|
|
|
|
; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
|
|
|
|
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
|
|
|
; AVX2-NEXT: vpcmpeqb %xmm0, %xmm4, %xmm0
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
|
|
|
; AVX2-NEXT: vpand %xmm1, %xmm2, %xmm2
|
|
|
|
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpcmpeqb %xmm0, %xmm2, %xmm0
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX3-LABEL: interleaved_load_vf16_i8_stride4:
|
|
|
|
; AVX3: # BB#0:
|
|
|
|
; AVX3-NEXT: vmovdqa64 (%rdi), %zmm0
|
|
|
|
; AVX3-NEXT: vpmovdb %zmm0, %xmm1
|
|
|
|
; AVX3-NEXT: vpsrlw $8, %zmm0, %zmm2
|
|
|
|
; AVX3-NEXT: vpmovdb %zmm2, %xmm2
|
|
|
|
; AVX3-NEXT: vpsrld $16, %zmm0, %zmm3
|
|
|
|
; AVX3-NEXT: vpmovdb %zmm3, %xmm3
|
|
|
|
; AVX3-NEXT: vpsrld $24, %zmm0, %zmm0
|
|
|
|
; AVX3-NEXT: vpmovdb %zmm0, %xmm0
|
|
|
|
; AVX3-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX3-NEXT: vpsllw $7, %xmm1, %xmm1
|
|
|
|
; AVX3-NEXT: vpmovb2m %zmm1, %k0
|
|
|
|
; AVX3-NEXT: vpcmpeqb %xmm0, %xmm3, %xmm0
|
|
|
|
; AVX3-NEXT: vpsllw $7, %xmm0, %xmm0
|
|
|
|
; AVX3-NEXT: vpmovb2m %zmm0, %k1
|
|
|
|
; AVX3-NEXT: kxnorw %k1, %k0, %k0
|
|
|
|
; AVX3-NEXT: vpmovm2b %k0, %zmm0
|
|
|
|
; AVX3-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
|
|
|
|
; AVX3-NEXT: vzeroupper
|
|
|
|
; AVX3-NEXT: retq
|
|
|
|
%wide.vec = load <64 x i8>, <64 x i8>* %ptr
|
|
|
|
%v1 = shufflevector <64 x i8> %wide.vec, <64 x i8> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60>
|
|
|
|
%v2 = shufflevector <64 x i8> %wide.vec, <64 x i8> undef, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61>
|
|
|
|
%v3 = shufflevector <64 x i8> %wide.vec, <64 x i8> undef, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62>
|
|
|
|
%v4 = shufflevector <64 x i8> %wide.vec, <64 x i8> undef, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63>
|
|
|
|
|
|
|
|
%cmp1 = icmp eq <16 x i8> %v1, %v2
|
|
|
|
%cmp2 = icmp eq <16 x i8> %v3, %v4
|
|
|
|
%res = icmp eq <16 x i1> %cmp1, %cmp2
|
|
|
|
|
|
|
|
ret <16 x i1> %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define <32 x i1> @interleaved_load_vf32_i8_stride4(<128 x i8>* %ptr) {
|
|
|
|
; AVX1-LABEL: interleaved_load_vf32_i8_stride4:
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vmovdqa (%rdi), %ymm12
|
|
|
|
; AVX1-NEXT: vmovdqa 32(%rdi), %ymm15
|
|
|
|
; AVX1-NEXT: vmovdqa 64(%rdi), %ymm3
|
|
|
|
; AVX1-NEXT: vmovdqa 96(%rdi), %ymm4
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm11
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
|
|
|
|
; AVX1-NEXT: vpand %xmm5, %xmm11, %xmm2
|
|
|
|
; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm6
|
|
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm6, %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm13
|
|
|
|
; AVX1-NEXT: vpand %xmm5, %xmm13, %xmm7
|
|
|
|
; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm0
|
|
|
|
; AVX1-NEXT: vpackuswb %xmm7, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm8
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm15, %xmm14
|
|
|
|
; AVX1-NEXT: vpand %xmm5, %xmm14, %xmm0
|
|
|
|
; AVX1-NEXT: vpand %xmm5, %xmm15, %xmm2
|
|
|
|
; AVX1-NEXT: vpackuswb %xmm0, %xmm2, %xmm0
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm12, %xmm2
|
|
|
|
; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm6
|
|
|
|
; AVX1-NEXT: vpand %xmm5, %xmm12, %xmm5
|
|
|
|
; AVX1-NEXT: vpackuswb %xmm6, %xmm5, %xmm5
|
|
|
|
; AVX1-NEXT: vpackuswb %xmm0, %xmm5, %xmm9
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX1-NEXT: vpshufb %xmm0, %xmm11, %xmm5
|
|
|
|
; AVX1-NEXT: vpshufb %xmm0, %xmm4, %xmm6
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX1-NEXT: vpshufb %xmm6, %xmm13, %xmm7
|
|
|
|
; AVX1-NEXT: vpshufb %xmm6, %xmm3, %xmm1
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
|
|
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm5[4,5,6,7]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
|
|
|
|
; AVX1-NEXT: vpshufb %xmm0, %xmm14, %xmm5
|
|
|
|
; AVX1-NEXT: vpshufb %xmm0, %xmm15, %xmm0
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
|
|
|
|
; AVX1-NEXT: vpshufb %xmm6, %xmm2, %xmm5
|
|
|
|
; AVX1-NEXT: vpshufb %xmm6, %xmm12, %xmm6
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
|
|
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm5[0,1,2,3],xmm0[4,5,6,7]
|
|
|
|
; AVX1-NEXT: vblendpd {{.*#+}} ymm10 = ymm0[0,1],ymm1[2,3]
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = <u,u,u,u,2,6,10,14,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX1-NEXT: vpshufb %xmm0, %xmm11, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufb %xmm0, %xmm4, %xmm5
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX1-NEXT: vpshufb %xmm5, %xmm13, %xmm6
|
|
|
|
; AVX1-NEXT: vpshufb %xmm5, %xmm3, %xmm7
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
|
|
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm6[0,1,2,3],xmm1[4,5,6,7]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
|
|
|
|
; AVX1-NEXT: vpshufb %xmm0, %xmm14, %xmm6
|
|
|
|
; AVX1-NEXT: vpshufb %xmm0, %xmm15, %xmm0
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
|
|
|
|
; AVX1-NEXT: vpshufb %xmm5, %xmm2, %xmm6
|
|
|
|
; AVX1-NEXT: vpshufb %xmm5, %xmm12, %xmm5
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
|
|
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm5[0,1,2,3],xmm0[4,5,6,7]
|
|
|
|
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = <u,u,u,u,3,7,11,15,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX1-NEXT: vpshufb %xmm1, %xmm11, %xmm5
|
|
|
|
; AVX1-NEXT: vpshufb %xmm1, %xmm4, %xmm4
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX1-NEXT: vpshufb %xmm5, %xmm13, %xmm6
|
|
|
|
; AVX1-NEXT: vpshufb %xmm5, %xmm3, %xmm3
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
|
|
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
|
|
|
|
; AVX1-NEXT: vpshufb %xmm1, %xmm14, %xmm4
|
|
|
|
; AVX1-NEXT: vpshufb %xmm1, %xmm15, %xmm1
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
|
|
|
|
; AVX1-NEXT: vpshufb %xmm5, %xmm2, %xmm2
|
|
|
|
; AVX1-NEXT: vpshufb %xmm5, %xmm12, %xmm4
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
|
|
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
|
|
|
|
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3]
|
|
|
|
; AVX1-NEXT: vpcmpeqb %xmm10, %xmm9, %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm10, %xmm3
|
|
|
|
; AVX1-NEXT: vpcmpeqb %xmm3, %xmm8, %xmm3
|
|
|
|
; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm4
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [72340172838076673,72340172838076673]
|
|
|
|
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpand %xmm1, %xmm3, %xmm3
|
|
|
|
; AVX1-NEXT: vpcmpeqb %xmm0, %xmm3, %xmm0
|
|
|
|
; AVX1-NEXT: vpand %xmm1, %xmm4, %xmm3
|
|
|
|
; AVX1-NEXT: vpand %xmm1, %xmm2, %xmm1
|
|
|
|
; AVX1-NEXT: vpcmpeqb %xmm3, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: interleaved_load_vf32_i8_stride4:
|
|
|
|
; AVX2: # BB#0:
|
|
|
|
; AVX2-NEXT: vmovdqa (%rdi), %ymm9
|
|
|
|
; AVX2-NEXT: vmovdqa 32(%rdi), %ymm11
|
|
|
|
; AVX2-NEXT: vmovdqa 64(%rdi), %ymm14
|
|
|
|
; AVX2-NEXT: vmovdqa 96(%rdi), %ymm3
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
|
|
|
|
; AVX2-NEXT: vpshufb %ymm4, %ymm3, %ymm5
|
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,2,2,3]
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX2-NEXT: vpshufb %xmm6, %xmm5, %xmm5
|
|
|
|
; AVX2-NEXT: vpshufb %ymm4, %ymm14, %ymm7
|
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,2,2,3]
|
|
|
|
; AVX2-NEXT: vpshufb %xmm6, %xmm7, %xmm7
|
|
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm7[0],xmm5[0]
|
|
|
|
; AVX2-NEXT: vpshufb %ymm4, %ymm11, %ymm7
|
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,2,2,3]
|
|
|
|
; AVX2-NEXT: vpshufb %xmm6, %xmm7, %xmm7
|
|
|
|
; AVX2-NEXT: vpshufb %ymm4, %ymm9, %ymm4
|
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,2,3]
|
|
|
|
; AVX2-NEXT: vpshufb %xmm6, %xmm4, %xmm4
|
|
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm7[0]
|
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm4, %ymm8
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm10
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm7 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX2-NEXT: vpshufb %xmm7, %xmm10, %xmm5
|
|
|
|
; AVX2-NEXT: vpshufb %xmm7, %xmm3, %xmm6
|
|
|
|
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
|
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm6
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm14, %xmm13
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm0 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX2-NEXT: vpshufb %xmm0, %xmm13, %xmm4
|
|
|
|
; AVX2-NEXT: vpshufb %xmm0, %xmm14, %xmm1
|
|
|
|
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
|
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
|
|
|
|
; AVX2-NEXT: vpblendd {{.*#+}} ymm12 = ymm1[0,1,2,3,4,5],ymm6[6,7]
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm11, %xmm6
|
|
|
|
; AVX2-NEXT: vpshufb %xmm7, %xmm6, %xmm4
|
|
|
|
; AVX2-NEXT: vpshufb %xmm7, %xmm11, %xmm7
|
|
|
|
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm9, %xmm7
|
|
|
|
; AVX2-NEXT: vpshufb %xmm0, %xmm7, %xmm1
|
|
|
|
; AVX2-NEXT: vpshufb %xmm0, %xmm9, %xmm0
|
|
|
|
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
|
|
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3]
|
|
|
|
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm12[4,5,6,7]
|
|
|
|
; AVX2-NEXT: vpcmpeqb %ymm0, %ymm8, %ymm8
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm0 = <u,u,u,u,2,6,10,14,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX2-NEXT: vpshufb %xmm0, %xmm10, %xmm1
|
|
|
|
; AVX2-NEXT: vpshufb %xmm0, %xmm3, %xmm4
|
|
|
|
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
|
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm13, %xmm5
|
|
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm14, %xmm2
|
|
|
|
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
|
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
|
|
|
|
; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
|
|
|
|
; AVX2-NEXT: vpshufb %xmm0, %xmm6, %xmm2
|
|
|
|
; AVX2-NEXT: vpshufb %xmm0, %xmm11, %xmm0
|
|
|
|
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm7, %xmm2
|
|
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm9, %xmm4
|
|
|
|
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
|
|
|
|
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
|
|
|
|
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = <u,u,u,u,3,7,11,15,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX2-NEXT: vpshufb %xmm1, %xmm10, %xmm2
|
|
|
|
; AVX2-NEXT: vpshufb %xmm1, %xmm3, %xmm3
|
|
|
|
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX2-NEXT: vpshufb %xmm3, %xmm13, %xmm4
|
|
|
|
; AVX2-NEXT: vpshufb %xmm3, %xmm14, %xmm5
|
|
|
|
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
|
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
|
|
|
|
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5],ymm2[6,7]
|
|
|
|
; AVX2-NEXT: vpshufb %xmm1, %xmm6, %xmm4
|
|
|
|
; AVX2-NEXT: vpshufb %xmm1, %xmm11, %xmm1
|
|
|
|
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
|
|
|
|
; AVX2-NEXT: vpshufb %xmm3, %xmm7, %xmm4
|
|
|
|
; AVX2-NEXT: vpshufb %xmm3, %xmm9, %xmm3
|
|
|
|
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
|
|
|
|
; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
|
|
|
|
; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
|
|
|
|
; AVX2-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
|
|
|
; AVX2-NEXT: vpand %ymm1, %ymm8, %ymm2
|
|
|
|
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpcmpeqb %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX3-LABEL: interleaved_load_vf32_i8_stride4:
|
|
|
|
; AVX3: # BB#0:
|
|
|
|
; AVX3-NEXT: vmovdqa64 (%rdi), %zmm0
|
|
|
|
; AVX3-NEXT: vmovdqa64 64(%rdi), %zmm1
|
|
|
|
; AVX3-NEXT: vpmovdw %zmm0, %ymm2
|
|
|
|
; AVX3-NEXT: vpmovdw %zmm1, %ymm3
|
|
|
|
; AVX3-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
|
|
|
|
; AVX3-NEXT: vpmovwb %zmm2, %ymm8
|
|
|
|
; AVX3-NEXT: vmovdqa {{.*#+}} xmm7 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX3-NEXT: vextracti64x4 $1, %zmm1, %ymm14
|
|
|
|
; AVX3-NEXT: vextracti128 $1, %ymm14, %xmm9
|
|
|
|
; AVX3-NEXT: vpshufb %xmm7, %xmm9, %xmm4
|
|
|
|
; AVX3-NEXT: vpshufb %xmm7, %xmm14, %xmm5
|
|
|
|
; AVX3-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
|
|
|
|
; AVX3-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm5
|
|
|
|
; AVX3-NEXT: vmovdqa {{.*#+}} xmm3 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX3-NEXT: vextracti128 $1, %ymm1, %xmm10
|
|
|
|
; AVX3-NEXT: vpshufb %xmm3, %xmm10, %xmm6
|
|
|
|
; AVX3-NEXT: vpshufb %xmm3, %xmm1, %xmm4
|
|
|
|
; AVX3-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
|
|
|
|
; AVX3-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
|
|
|
|
; AVX3-NEXT: vpblendd {{.*#+}} ymm11 = ymm4[0,1,2,3,4,5],ymm5[6,7]
|
|
|
|
; AVX3-NEXT: vextracti64x4 $1, %zmm0, %ymm5
|
|
|
|
; AVX3-NEXT: vextracti128 $1, %ymm5, %xmm12
|
|
|
|
; AVX3-NEXT: vpshufb %xmm7, %xmm12, %xmm4
|
|
|
|
; AVX3-NEXT: vpshufb %xmm7, %xmm5, %xmm7
|
|
|
|
; AVX3-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
|
|
|
|
; AVX3-NEXT: vextracti128 $1, %ymm0, %xmm13
|
|
|
|
; AVX3-NEXT: vpshufb %xmm3, %xmm13, %xmm6
|
|
|
|
; AVX3-NEXT: vpshufb %xmm3, %xmm0, %xmm3
|
|
|
|
; AVX3-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
|
|
|
|
; AVX3-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3]
|
|
|
|
; AVX3-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm11[4,5,6,7]
|
|
|
|
; AVX3-NEXT: vpcmpeqb %ymm3, %ymm8, %ymm8
|
|
|
|
; AVX3-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,u,u,2,6,10,14,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX3-NEXT: vpshufb %xmm3, %xmm9, %xmm4
|
|
|
|
; AVX3-NEXT: vpshufb %xmm3, %xmm14, %xmm6
|
|
|
|
; AVX3-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
|
|
|
|
; AVX3-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
|
|
|
|
; AVX3-NEXT: vmovdqa {{.*#+}} xmm6 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX3-NEXT: vpshufb %xmm6, %xmm10, %xmm7
|
|
|
|
; AVX3-NEXT: vpshufb %xmm6, %xmm1, %xmm2
|
|
|
|
; AVX3-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1]
|
|
|
|
; AVX3-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
|
|
|
|
; AVX3-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm4[6,7]
|
|
|
|
; AVX3-NEXT: vpshufb %xmm3, %xmm12, %xmm4
|
|
|
|
; AVX3-NEXT: vpshufb %xmm3, %xmm5, %xmm3
|
|
|
|
; AVX3-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
|
|
|
|
; AVX3-NEXT: vpshufb %xmm6, %xmm13, %xmm4
|
|
|
|
; AVX3-NEXT: vpshufb %xmm6, %xmm0, %xmm6
|
|
|
|
; AVX3-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
|
|
|
|
; AVX3-NEXT: vpblendd {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3]
|
|
|
|
; AVX3-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
|
|
|
|
; AVX3-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,u,u,3,7,11,15,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX3-NEXT: vpshufb %xmm3, %xmm9, %xmm4
|
|
|
|
; AVX3-NEXT: vpshufb %xmm3, %xmm14, %xmm6
|
|
|
|
; AVX3-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
|
|
|
|
; AVX3-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
|
|
|
|
; AVX3-NEXT: vmovdqa {{.*#+}} xmm6 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX3-NEXT: vpshufb %xmm6, %xmm10, %xmm7
|
|
|
|
; AVX3-NEXT: vpshufb %xmm6, %xmm1, %xmm1
|
|
|
|
; AVX3-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
|
|
|
|
; AVX3-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
|
|
|
|
; AVX3-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
|
|
|
|
; AVX3-NEXT: vpshufb %xmm3, %xmm12, %xmm4
|
|
|
|
; AVX3-NEXT: vpshufb %xmm3, %xmm5, %xmm3
|
|
|
|
; AVX3-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
|
|
|
|
; AVX3-NEXT: vpshufb %xmm6, %xmm13, %xmm4
|
|
|
|
; AVX3-NEXT: vpshufb %xmm6, %xmm0, %xmm0
|
|
|
|
; AVX3-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
|
|
|
|
; AVX3-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3]
|
|
|
|
; AVX3-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
|
|
|
|
; AVX3-NEXT: vpcmpeqb %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX3-NEXT: vpsllw $7, %ymm8, %ymm1
|
|
|
|
; AVX3-NEXT: vpmovb2m %zmm1, %k0
|
|
|
|
; AVX3-NEXT: vpsllw $7, %ymm0, %ymm0
|
|
|
|
; AVX3-NEXT: vpmovb2m %zmm0, %k1
|
|
|
|
; AVX3-NEXT: kxnord %k1, %k0, %k0
|
|
|
|
; AVX3-NEXT: vpmovm2b %k0, %zmm0
|
|
|
|
; AVX3-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
|
|
|
|
; AVX3-NEXT: retq
|
|
|
|
%wide.vec = load <128 x i8>, <128 x i8>* %ptr
|
|
|
|
%v1 = shufflevector <128 x i8> %wide.vec, <128 x i8> undef, <32 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60, i32 64, i32 68, i32 72, i32 76, i32 80, i32 84, i32 88, i32 92, i32 96, i32 100, i32 104, i32 108, i32 112, i32 116, i32 120, i32 124>
|
|
|
|
|
|
|
|
%v2 = shufflevector <128 x i8> %wide.vec, <128 x i8> undef, <32 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61, i32 65, i32 69, i32 73, i32 77, i32 81, i32 85, i32 89, i32 93, i32 97, i32 101, i32 105, i32 109, i32 113, i32 117, i32 121, i32 125>
|
|
|
|
|
|
|
|
%v3 = shufflevector <128 x i8> %wide.vec, <128 x i8> undef, <32 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62, i32 66, i32 70, i32 74, i32 78, i32 82, i32 86, i32 90, i32 94, i32 98, i32 102, i32 106, i32 110, i32 114, i32 118, i32 122, i32 126>
|
|
|
|
|
|
|
|
%v4 = shufflevector <128 x i8> %wide.vec, <128 x i8> undef, <32 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63, i32 67, i32 71, i32 75, i32 79, i32 83, i32 87, i32 91, i32 95, i32 99, i32 103, i32 107, i32 111, i32 115, i32 119, i32 123, i32 127>
|
|
|
|
|
|
|
|
%cmp1 = icmp eq <32 x i8> %v1, %v2
|
|
|
|
%cmp2 = icmp eq <32 x i8> %v3, %v4
|
|
|
|
%res = icmp eq <32 x i1> %cmp1, %cmp2
|
|
|
|
|
|
|
|
ret <32 x i1> %res
|
|
|
|
}
|