llvm-project/llvm/test/CodeGen/X86/vector-rotate-512.ll

1004 lines
56 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW
;
; Variable Rotates
;
define <8 x i64> @var_rotate_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
; AVX512-LABEL: var_rotate_v8i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vprolvq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
%b64 = sub <8 x i64> <i64 64, i64 64, i64 64, i64 64, i64 64, i64 64, i64 64, i64 64>, %b
%shl = shl <8 x i64> %a, %b
%lshr = lshr <8 x i64> %a, %b64
%or = or <8 x i64> %shl, %lshr
ret <8 x i64> %or
}
define <16 x i32> @var_rotate_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
; AVX512-LABEL: var_rotate_v16i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vprolvd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
%b32 = sub <16 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %b
%shl = shl <16 x i32> %a, %b
%lshr = lshr <16 x i32> %a, %b32
%or = or <16 x i32> %shl, %lshr
ret <16 x i32> %or
}
define <32 x i16> @var_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512F-LABEL: var_rotate_v32i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm4 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512F-NEXT: vpsllvd %zmm4, %zmm0, %zmm4
; AVX512F-NEXT: vpmovdw %zmm4, %ymm4
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512F-NEXT: vpsubw %ymm2, %ymm5, %ymm2
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512F-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: vpor %ymm0, %ymm4, %ymm0
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512F-NEXT: vpsllvd %zmm2, %zmm1, %zmm2
; AVX512F-NEXT: vpmovdw %zmm2, %ymm2
; AVX512F-NEXT: vpsubw %ymm3, %ymm5, %ymm3
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
; AVX512F-NEXT: vpsrlvd %zmm3, %zmm1, %zmm1
; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
; AVX512F-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v32i16:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm4 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512VL-NEXT: vpsllvd %zmm4, %zmm0, %zmm4
; AVX512VL-NEXT: vpmovdw %zmm4, %ymm4
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512VL-NEXT: vpsubw %ymm2, %ymm5, %ymm2
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512VL-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
; AVX512VL-NEXT: vpmovdw %zmm0, %ymm0
; AVX512VL-NEXT: vpor %ymm0, %ymm4, %ymm0
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512VL-NEXT: vpsllvd %zmm2, %zmm1, %zmm2
; AVX512VL-NEXT: vpmovdw %zmm2, %ymm2
; AVX512VL-NEXT: vpsubw %ymm3, %ymm5, %ymm3
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
; AVX512VL-NEXT: vpsrlvd %zmm3, %zmm1, %zmm1
; AVX512VL-NEXT: vpmovdw %zmm1, %ymm1
; AVX512VL-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: var_rotate_v32i16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512BW-NEXT: vpsubw %zmm1, %zmm2, %zmm2
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: var_rotate_v32i16:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512VLBW-NEXT: vpsubw %zmm1, %zmm2, %zmm2
; AVX512VLBW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
; AVX512VLBW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512VLBW-NEXT: retq
%b16 = sub <32 x i16> <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>, %b
%shl = shl <32 x i16> %a, %b
%lshr = lshr <32 x i16> %a, %b16
%or = or <32 x i16> %shl, %lshr
ret <32 x i16> %or
}
define <64 x i8> @var_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512F-LABEL: var_rotate_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm4
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm6
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6
; AVX512F-NEXT: vpor %ymm4, %ymm6, %ymm4
; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm4
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
; AVX512F-NEXT: vpand %ymm6, %ymm4, %ymm4
; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm8
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
; AVX512F-NEXT: vpand %ymm9, %ymm8, %ymm8
; AVX512F-NEXT: vpor %ymm4, %ymm8, %ymm4
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm4
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512F-NEXT: vpand %ymm8, %ymm4, %ymm4
; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm10
; AVX512F-NEXT: vpor %ymm4, %ymm10, %ymm4
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2
; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm4
; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vpsllw $5, %ymm3, %ymm3
; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $6, %ymm1, %ymm2
; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
; AVX512F-NEXT: vpsllw $2, %ymm1, %ymm4
; AVX512F-NEXT: vpand %ymm9, %ymm4, %ymm4
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $7, %ymm1, %ymm2
; AVX512F-NEXT: vpand %ymm8, %ymm2, %ymm2
; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm4
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v64i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm6
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512VL-NEXT: vpand %ymm7, %ymm6, %ymm6
; AVX512VL-NEXT: vpor %ymm4, %ymm6, %ymm4
; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm4
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
; AVX512VL-NEXT: vpand %ymm6, %ymm4, %ymm4
; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm8
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
; AVX512VL-NEXT: vpand %ymm9, %ymm8, %ymm8
; AVX512VL-NEXT: vpor %ymm4, %ymm8, %ymm4
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm4
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512VL-NEXT: vpand %ymm8, %ymm4, %ymm4
; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm10
; AVX512VL-NEXT: vpor %ymm4, %ymm10, %ymm4
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2
; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm4
; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512VL-NEXT: vpsllw $5, %ymm3, %ymm3
; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpsrlw $6, %ymm1, %ymm2
; AVX512VL-NEXT: vpand %ymm6, %ymm2, %ymm2
; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm4
; AVX512VL-NEXT: vpand %ymm9, %ymm4, %ymm4
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpsrlw $7, %ymm1, %ymm2
; AVX512VL-NEXT: vpand %ymm8, %ymm2, %ymm2
; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm4
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: var_rotate_v64i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512BW-NEXT: vpsubb %zmm1, %zmm2, %zmm2
; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm3
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm3, %zmm3
; AVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
; AVX512BW-NEXT: vpblendmb %zmm3, %zmm0, %zmm3 {%k1}
; AVX512BW-NEXT: vpsllw $2, %zmm3, %zmm4
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm4, %zmm4
; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm4, %zmm3 {%k1}
; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
; AVX512BW-NEXT: vpaddb %zmm3, %zmm3, %zmm3 {%k1}
; AVX512BW-NEXT: vpsllw $5, %zmm2, %zmm1
; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm2
; AVX512BW-NEXT: vpmovb2m %zmm2, %k1
; AVX512BW-NEXT: vpmovb2m %zmm1, %k2
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k2}
; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm1
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm1
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpaddb %zmm2, %zmm2, %zmm2
; AVX512BW-NEXT: vpmovb2m %zmm2, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: vporq %zmm0, %zmm3, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: var_rotate_v64i8:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VLBW-NEXT: vpsubb %zmm1, %zmm2, %zmm2
; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm3
; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm3, %zmm3
; AVX512VLBW-NEXT: vpsllw $5, %zmm1, %zmm1
; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
; AVX512VLBW-NEXT: vpblendmb %zmm3, %zmm0, %zmm3 {%k1}
; AVX512VLBW-NEXT: vpsllw $2, %zmm3, %zmm4
; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm4, %zmm4
; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
; AVX512VLBW-NEXT: vmovdqu8 %zmm4, %zmm3 {%k1}
; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
; AVX512VLBW-NEXT: vpaddb %zmm3, %zmm3, %zmm3 {%k1}
; AVX512VLBW-NEXT: vpsllw $5, %zmm2, %zmm1
; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm2
; AVX512VLBW-NEXT: vpmovb2m %zmm2, %k1
; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k2
; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm1
; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512VLBW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k2}
; AVX512VLBW-NEXT: vpsrlw $2, %zmm0, %zmm1
; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512VLBW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
; AVX512VLBW-NEXT: vpsrlw $1, %zmm0, %zmm1
; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512VLBW-NEXT: vpaddb %zmm2, %zmm2, %zmm2
; AVX512VLBW-NEXT: vpmovb2m %zmm2, %k1
; AVX512VLBW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
; AVX512VLBW-NEXT: vporq %zmm0, %zmm3, %zmm0
; AVX512VLBW-NEXT: retq
%b8 = sub <64 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %b
%shl = shl <64 x i8> %a, %b
%lshr = lshr <64 x i8> %a, %b8
%or = or <64 x i8> %shl, %lshr
ret <64 x i8> %or
}
;
; Uniform Variable Rotates
;
define <8 x i64> @splatvar_rotate_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
; AVX512-LABEL: splatvar_rotate_v8i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastq %xmm1, %zmm1
; AVX512-NEXT: vprolvq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
%splat = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
%splat64 = sub <8 x i64> <i64 64, i64 64, i64 64, i64 64, i64 64, i64 64, i64 64, i64 64>, %splat
%shl = shl <8 x i64> %a, %splat
%lshr = lshr <8 x i64> %a, %splat64
%or = or <8 x i64> %shl, %lshr
ret <8 x i64> %or
}
define <16 x i32> @splatvar_rotate_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
; AVX512-LABEL: splatvar_rotate_v16i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd %xmm1, %zmm1
; AVX512-NEXT: vprolvd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
%splat = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
%splat32 = sub <16 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %splat
%shl = shl <16 x i32> %a, %splat
%lshr = lshr <16 x i32> %a, %splat32
%or = or <16 x i32> %shl, %lshr
ret <16 x i32> %or
}
define <32 x i16> @splatvar_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512F-LABEL: splatvar_rotate_v32i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpbroadcastw %xmm2, %ymm3
; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512F-NEXT: vpsllw %xmm2, %ymm0, %ymm4
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
; AVX512F-NEXT: vpsubw %xmm3, %xmm5, %xmm3
; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
; AVX512F-NEXT: vpsrlw %xmm3, %ymm0, %ymm0
; AVX512F-NEXT: vpor %ymm0, %ymm4, %ymm0
; AVX512F-NEXT: vpsllw %xmm2, %ymm1, %ymm2
; AVX512F-NEXT: vpsrlw %xmm3, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatvar_rotate_v32i16:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpbroadcastw %xmm2, %ymm3
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm2, %ymm0, %ymm4
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
; AVX512VL-NEXT: vpsubw %xmm3, %xmm5, %xmm3
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
; AVX512VL-NEXT: vpsrlw %xmm3, %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm0, %ymm4, %ymm0
; AVX512VL-NEXT: vpsllw %xmm2, %ymm1, %ymm2
; AVX512VL-NEXT: vpsrlw %xmm3, %ymm1, %ymm1
; AVX512VL-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatvar_rotate_v32i16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512BW-NEXT: vpsllw %xmm2, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
; AVX512BW-NEXT: vpsubw %xmm1, %xmm3, %xmm1
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vporq %zmm0, %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: splatvar_rotate_v32i16:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512VLBW-NEXT: vpsllw %xmm2, %zmm0, %zmm2
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
; AVX512VLBW-NEXT: vpsubw %xmm1, %xmm3, %xmm1
; AVX512VLBW-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512VLBW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0
; AVX512VLBW-NEXT: vporq %zmm0, %zmm2, %zmm0
; AVX512VLBW-NEXT: retq
%splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
%splat16 = sub <32 x i16> <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>, %splat
%shl = shl <32 x i16> %a, %splat
%lshr = lshr <32 x i16> %a, %splat16
%or = or <32 x i16> %shl, %lshr
ret <32 x i16> %or
}
define <64 x i8> @splatvar_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512F-LABEL: splatvar_rotate_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm3
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm5
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512F-NEXT: vpand %ymm6, %ymm5, %ymm5
; AVX512F-NEXT: vpor %ymm3, %ymm5, %ymm3
; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm3
; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm7
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
; AVX512F-NEXT: vpand %ymm8, %ymm7, %ymm7
; AVX512F-NEXT: vpor %ymm3, %ymm7, %ymm3
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm7
; AVX512F-NEXT: vpblendvb %ymm7, %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512F-NEXT: vpand %ymm9, %ymm3, %ymm3
; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm10
; AVX512F-NEXT: vpor %ymm3, %ymm10, %ymm3
; AVX512F-NEXT: vpaddb %ymm7, %ymm7, %ymm10
; AVX512F-NEXT: vpblendvb %ymm10, %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm3
; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm3
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm4
; AVX512F-NEXT: vpand %ymm6, %ymm4, %ymm4
; AVX512F-NEXT: vpor %ymm3, %ymm4, %ymm3
; AVX512F-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $6, %ymm1, %ymm2
; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2
; AVX512F-NEXT: vpsllw $2, %ymm1, %ymm3
; AVX512F-NEXT: vpand %ymm8, %ymm3, %ymm3
; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $7, %ymm1, %ymm2
; AVX512F-NEXT: vpand %ymm9, %ymm2, %ymm2
; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm3
; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpblendvb %ymm10, %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatvar_rotate_v64i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm3
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm4, %ymm3, %ymm3
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm5
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512VL-NEXT: vpand %ymm6, %ymm5, %ymm5
; AVX512VL-NEXT: vpor %ymm3, %ymm5, %ymm3
; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm3
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
; AVX512VL-NEXT: vpand %ymm5, %ymm3, %ymm3
; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm7
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
; AVX512VL-NEXT: vpand %ymm8, %ymm7, %ymm7
; AVX512VL-NEXT: vpor %ymm3, %ymm7, %ymm3
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm7
; AVX512VL-NEXT: vpblendvb %ymm7, %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm3
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512VL-NEXT: vpand %ymm9, %ymm3, %ymm3
; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm10
; AVX512VL-NEXT: vpor %ymm3, %ymm10, %ymm3
; AVX512VL-NEXT: vpaddb %ymm7, %ymm7, %ymm10
; AVX512VL-NEXT: vpblendvb %ymm10, %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm3
; AVX512VL-NEXT: vpand %ymm4, %ymm3, %ymm3
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm4
; AVX512VL-NEXT: vpand %ymm6, %ymm4, %ymm4
; AVX512VL-NEXT: vpor %ymm3, %ymm4, %ymm3
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
; AVX512VL-NEXT: vpsrlw $6, %ymm1, %ymm2
; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2
; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm3
; AVX512VL-NEXT: vpand %ymm8, %ymm3, %ymm3
; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpsrlw $7, %ymm1, %ymm2
; AVX512VL-NEXT: vpand %ymm9, %ymm2, %ymm2
; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm3
; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm10, %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatvar_rotate_v64i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm2
; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm3
; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4
; AVX512BW-NEXT: vpsllw %xmm1, %zmm4, %zmm1
; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
; AVX512BW-NEXT: vpandq %zmm1, %zmm3, %zmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512BW-NEXT: vpsubb %xmm2, %xmm3, %xmm2
; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512BW-NEXT: vpsrlw %xmm2, %zmm0, %zmm0
; AVX512BW-NEXT: vpsrlw %xmm2, %zmm4, %zmm2
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
; AVX512BW-NEXT: vpbroadcastb %xmm2, %zmm2
; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0
; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: splatvar_rotate_v64i8:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpbroadcastb %xmm1, %zmm2
; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLBW-NEXT: vpsllw %xmm1, %zmm0, %zmm3
; AVX512VLBW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4
; AVX512VLBW-NEXT: vpsllw %xmm1, %zmm4, %zmm1
; AVX512VLBW-NEXT: vpbroadcastb %xmm1, %zmm1
; AVX512VLBW-NEXT: vpandq %zmm1, %zmm3, %zmm1
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm3, %xmm2
; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLBW-NEXT: vpsrlw %xmm2, %zmm0, %zmm0
; AVX512VLBW-NEXT: vpsrlw %xmm2, %zmm4, %zmm2
; AVX512VLBW-NEXT: vpsrlw $8, %zmm2, %zmm2
; AVX512VLBW-NEXT: vpbroadcastb %xmm2, %zmm2
; AVX512VLBW-NEXT: vpandq %zmm2, %zmm0, %zmm0
; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512VLBW-NEXT: retq
%splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
%splat8 = sub <64 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %splat
%shl = shl <64 x i8> %a, %splat
%lshr = lshr <64 x i8> %a, %splat8
%or = or <64 x i8> %shl, %lshr
ret <64 x i8> %or
}
;
; Constant Rotates
;
define <8 x i64> @constant_rotate_v8i64(<8 x i64> %a) nounwind {
; AVX512-LABEL: constant_rotate_v8i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vprolvq {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: retq
%shl = shl <8 x i64> %a, <i64 4, i64 14, i64 50, i64 60, i64 4, i64 14, i64 50, i64 60>
%lshr = lshr <8 x i64> %a, <i64 60, i64 50, i64 14, i64 4, i64 60, i64 50, i64 14, i64 4>
%or = or <8 x i64> %shl, %lshr
ret <8 x i64> %or
}
define <16 x i32> @constant_rotate_v16i32(<16 x i32> %a) nounwind {
; AVX512-LABEL: constant_rotate_v16i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vprolvd {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: retq
%shl = shl <16 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
%lshr = lshr <16 x i32> %a, <i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21>
%or = or <16 x i32> %shl, %lshr
ret <16 x i32> %or
}
define <32 x i16> @constant_rotate_v32i16(<32 x i16> %a) nounwind {
; AVX512F-LABEL: constant_rotate_v32i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
; AVX512F-NEXT: vpmulhuw %ymm2, %ymm0, %ymm3
; AVX512F-NEXT: vpmullw %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpor %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vpmulhuw %ymm2, %ymm1, %ymm3
; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm3, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v32i16:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
; AVX512VL-NEXT: vpmulhuw %ymm2, %ymm0, %ymm3
; AVX512VL-NEXT: vpmullw %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vpmulhuw %ymm2, %ymm1, %ymm3
; AVX512VL-NEXT: vpmullw %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpor %ymm3, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: constant_rotate_v32i16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm1
; AVX512BW-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: constant_rotate_v32i16:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm1
; AVX512VLBW-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512VLBW-NEXT: retq
%shl = shl <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
%lshr = lshr <32 x i16> %a, <i16 16, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 16, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1>
%or = or <32 x i16> %shl, %lshr
ret <32 x i16> %or
}
define <64 x i8> @constant_rotate_v64i8(<64 x i8> %a) nounwind {
; AVX512F-LABEL: constant_rotate_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm4
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
; AVX512F-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm7
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
; AVX512F-NEXT: vpand %ymm8, %ymm7, %ymm7
; AVX512F-NEXT: vpor %ymm2, %ymm7, %ymm2
; AVX512F-NEXT: vpaddb %ymm4, %ymm4, %ymm7
; AVX512F-NEXT: vpblendvb %ymm7, %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512F-NEXT: vpand %ymm9, %ymm2, %ymm2
; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm10
; AVX512F-NEXT: vpor %ymm2, %ymm10, %ymm2
; AVX512F-NEXT: vpaddb %ymm7, %ymm7, %ymm10
; AVX512F-NEXT: vpblendvb %ymm10, %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm3
; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm3
; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $6, %ymm1, %ymm2
; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
; AVX512F-NEXT: vpsllw $2, %ymm1, %ymm3
; AVX512F-NEXT: vpand %ymm8, %ymm3, %ymm3
; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $7, %ymm1, %ymm2
; AVX512F-NEXT: vpand %ymm9, %ymm2, %ymm2
; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm3
; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpblendvb %ymm10, %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v64i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm4
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
; AVX512VL-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
; AVX512VL-NEXT: vpand %ymm6, %ymm2, %ymm2
; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm7
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
; AVX512VL-NEXT: vpand %ymm8, %ymm7, %ymm7
; AVX512VL-NEXT: vpor %ymm2, %ymm7, %ymm2
; AVX512VL-NEXT: vpaddb %ymm4, %ymm4, %ymm7
; AVX512VL-NEXT: vpblendvb %ymm7, %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512VL-NEXT: vpand %ymm9, %ymm2, %ymm2
; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm10
; AVX512VL-NEXT: vpor %ymm2, %ymm10, %ymm2
; AVX512VL-NEXT: vpaddb %ymm7, %ymm7, %ymm10
; AVX512VL-NEXT: vpblendvb %ymm10, %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm3
; AVX512VL-NEXT: vpand %ymm5, %ymm3, %ymm3
; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpsrlw $6, %ymm1, %ymm2
; AVX512VL-NEXT: vpand %ymm6, %ymm2, %ymm2
; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm3
; AVX512VL-NEXT: vpand %ymm8, %ymm3, %ymm3
; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpsrlw $7, %ymm1, %ymm2
; AVX512VL-NEXT: vpand %ymm9, %ymm2, %ymm2
; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm3
; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm10, %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: constant_rotate_v64i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm2
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm2, %zmm2
; AVX512BW-NEXT: vpblendmb %zmm2, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vpsllw $2, %zmm2, %zmm3
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm3, %zmm3
; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
; AVX512BW-NEXT: vpaddb %zmm2, %zmm2, %zmm2 {%k1}
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
; AVX512BW-NEXT: vpsllvw {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
; AVX512BW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vporq %zmm0, %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: constant_rotate_v64i8:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm2
; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm2, %zmm2
; AVX512VLBW-NEXT: vpblendmb %zmm2, %zmm0, %zmm2 {%k1}
; AVX512VLBW-NEXT: vpsllw $2, %zmm2, %zmm3
; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm3, %zmm3
; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
; AVX512VLBW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
; AVX512VLBW-NEXT: vpaddb %zmm2, %zmm2, %zmm2 {%k1}
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512VLBW-NEXT: vpsrlw $8, %zmm1, %zmm1
; AVX512VLBW-NEXT: vpsllvw {{.*}}(%rip), %zmm1, %zmm1
; AVX512VLBW-NEXT: vpsrlw $8, %zmm1, %zmm1
; AVX512VLBW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
; AVX512VLBW-NEXT: vpsrlw $8, %zmm0, %zmm0
; AVX512VLBW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0
; AVX512VLBW-NEXT: vpsrlw $8, %zmm0, %zmm0
; AVX512VLBW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
; AVX512VLBW-NEXT: vporq %zmm0, %zmm2, %zmm0
; AVX512VLBW-NEXT: retq
%shl = shl <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>
%lshr = lshr <64 x i8> %a, <i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>
%or = or <64 x i8> %shl, %lshr
ret <64 x i8> %or
}
;
; Uniform Constant Rotates
;
define <8 x i64> @splatconstant_rotate_v8i64(<8 x i64> %a) nounwind {
; AVX512-LABEL: splatconstant_rotate_v8i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vprolq $14, %zmm0, %zmm0
; AVX512-NEXT: retq
%shl = shl <8 x i64> %a, <i64 14, i64 14, i64 14, i64 14, i64 14, i64 14, i64 14, i64 14>
%lshr = lshr <8 x i64> %a, <i64 50, i64 50, i64 50, i64 50, i64 50, i64 50, i64 50, i64 50>
%or = or <8 x i64> %shl, %lshr
ret <8 x i64> %or
}
define <16 x i32> @splatconstant_rotate_v16i32(<16 x i32> %a) nounwind {
; AVX512-LABEL: splatconstant_rotate_v16i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vprold $4, %zmm0, %zmm0
; AVX512-NEXT: retq
%shl = shl <16 x i32> %a, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
%lshr = lshr <16 x i32> %a, <i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28>
%or = or <16 x i32> %shl, %lshr
ret <16 x i32> %or
}
define <32 x i16> @splatconstant_rotate_v32i16(<32 x i16> %a) nounwind {
; AVX512F-LABEL: splatconstant_rotate_v32i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlw $9, %ymm0, %ymm2
; AVX512F-NEXT: vpsllw $7, %ymm0, %ymm0
; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $9, %ymm1, %ymm2
; AVX512F-NEXT: vpsllw $7, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_v32i16:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $9, %ymm0, %ymm2
; AVX512VL-NEXT: vpsllw $7, %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $9, %ymm1, %ymm2
; AVX512VL-NEXT: vpsllw $7, %ymm1, %ymm1
; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_v32i16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllw $7, %zmm0, %zmm1
; AVX512BW-NEXT: vpsrlw $9, %zmm0, %zmm0
; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: splatconstant_rotate_v32i16:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpsllw $7, %zmm0, %zmm1
; AVX512VLBW-NEXT: vpsrlw $9, %zmm0, %zmm0
; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512VLBW-NEXT: retq
%shl = shl <32 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
%lshr = lshr <32 x i16> %a, <i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9>
%or = or <32 x i16> %shl, %lshr
ret <32 x i16> %or
}
define <64 x i8> @splatconstant_rotate_v64i8(<64 x i8> %a) nounwind {
; AVX512F-LABEL: splatconstant_rotate_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1
; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_v64i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1
; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_v64i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm1
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: splatconstant_rotate_v64i8:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm1
; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm0
; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512VLBW-NEXT: retq
%shl = shl <64 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
%lshr = lshr <64 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
%or = or <64 x i8> %shl, %lshr
ret <64 x i8> %or
}
;
; Masked Uniform Constant Rotates
;
define <8 x i64> @splatconstant_rotate_mask_v8i64(<8 x i64> %a) nounwind {
; AVX512-LABEL: splatconstant_rotate_mask_v8i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vprolq $15, %zmm0, %zmm0
; AVX512-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: retq
%shl = shl <8 x i64> %a, <i64 15, i64 15, i64 15, i64 15, i64 15, i64 15, i64 15, i64 15>
%lshr = lshr <8 x i64> %a, <i64 49, i64 49, i64 49, i64 49, i64 49, i64 49, i64 49, i64 49>
%rmask = and <8 x i64> %lshr, <i64 255, i64 127, i64 127, i64 255, i64 255, i64 127, i64 127, i64 255>
%lmask = and <8 x i64> %shl, <i64 33, i64 65, i64 129, i64 257, i64 33, i64 65, i64 129, i64 257>
%or = or <8 x i64> %lmask, %rmask
ret <8 x i64> %or
}
define <16 x i32> @splatconstant_rotate_mask_v16i32(<16 x i32> %a) nounwind {
; AVX512-LABEL: splatconstant_rotate_mask_v16i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vprold $4, %zmm0, %zmm0
; AVX512-NEXT: vpandd {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: retq
%shl = shl <16 x i32> %a, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
%lshr = lshr <16 x i32> %a, <i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28>
%rmask = and <16 x i32> %lshr, <i32 3, i32 7, i32 15, i32 31, i32 63, i32 127, i32 255, i32 511, i32 3, i32 7, i32 15, i32 31, i32 63, i32 127, i32 255, i32 511>
%lmask = and <16 x i32> %shl, <i32 511, i32 255, i32 127, i32 63, i32 31, i32 15, i32 7, i32 3, i32 511, i32 255, i32 127, i32 63, i32 31, i32 15, i32 7, i32 3>
%or = or <16 x i32> %lmask, %rmask
ret <16 x i32> %or
}
define <32 x i16> @splatconstant_rotate_mask_v32i16(<32 x i16> %a) nounwind {
; AVX512F-LABEL: splatconstant_rotate_mask_v32i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55]
; AVX512F-NEXT: vpsrlw $11, %ymm0, %ymm3
; AVX512F-NEXT: vpsllw $5, %ymm0, %ymm0
; AVX512F-NEXT: vpor %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $11, %ymm1, %ymm3
; AVX512F-NEXT: vpsllw $5, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm3, %ymm1, %ymm1
; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_mask_v32i16:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55]
; AVX512VL-NEXT: vpsrlw $11, %ymm0, %ymm3
; AVX512VL-NEXT: vpsllw $5, %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $11, %ymm1, %ymm3
; AVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1
; AVX512VL-NEXT: vpor %ymm3, %ymm1, %ymm1
; AVX512VL-NEXT: vpand %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v32i16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllw $5, %zmm0, %zmm1
; AVX512BW-NEXT: vpsrlw $11, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: splatconstant_rotate_mask_v32i16:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpsllw $5, %zmm0, %zmm1
; AVX512VLBW-NEXT: vpsrlw $11, %zmm0, %zmm0
; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512VLBW-NEXT: retq
%shl = shl <32 x i16> %a, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
%lshr = lshr <32 x i16> %a, <i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11>
%rmask = and <32 x i16> %lshr, <i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55>
%lmask = and <32 x i16> %shl, <i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33>
%or = or <32 x i16> %lmask, %rmask
ret <32 x i16> %or
}
define <64 x i8> @splatconstant_rotate_mask_v64i8(<64 x i8> %a) nounwind {
; AVX512F-LABEL: splatconstant_rotate_mask_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39]
; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm5
; AVX512F-NEXT: vpand %ymm3, %ymm5, %ymm3
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1
; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm3, %ymm1, %ymm1
; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_mask_v64i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39]
; AVX512VL-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm5
; AVX512VL-NEXT: vpand %ymm3, %ymm5, %ymm3
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1
; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
; AVX512VL-NEXT: vpor %ymm3, %ymm1, %ymm1
; AVX512VL-NEXT: vpand %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v64i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm1
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: splatconstant_rotate_mask_v64i8:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm1
; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm0
; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512VLBW-NEXT: retq
%shl = shl <64 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
%lshr = lshr <64 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
%rmask = and <64 x i8> %lshr, <i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55>
%lmask = and <64 x i8> %shl, <i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33>
%or = or <64 x i8> %lmask, %rmask
ret <64 x i8> %or
}