From 1c91e63897d8832a7b634d9662b8dbcc3ba36efd Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Mon, 17 Jun 2019 14:38:17 +0000 Subject: [PATCH] [X86][SSE] Add tests for underaligned nt loads Test both 'unaligned' (which we should just use regular unaligned loads) and 'subvector aligned' (which we should split) llvm-svn: 363565 --- llvm/test/CodeGen/X86/nontemporal-loads-2.ll | 1395 ++++++++++++++++++ 1 file changed, 1395 insertions(+) create mode 100644 llvm/test/CodeGen/X86/nontemporal-loads-2.ll diff --git a/llvm/test/CodeGen/X86/nontemporal-loads-2.ll b/llvm/test/CodeGen/X86/nontemporal-loads-2.ll new file mode 100644 index 000000000000..0441d3a4922e --- /dev/null +++ b/llvm/test/CodeGen/X86/nontemporal-loads-2.ll @@ -0,0 +1,1395 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE,SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512DQ +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512BW + +; Test codegen for under aligned nontemporal vector loads + +; XMM versions. + +define <2 x double> @test_v2f64_align1(<2 x double>* %src) nounwind { +; SSE-LABEL: test_v2f64_align1: +; SSE: # %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_v2f64_align1: +; AVX: # %bb.0: +; AVX-NEXT: vmovups (%rdi), %xmm0 +; AVX-NEXT: retq + %1 = load <2 x double>, <2 x double>* %src, align 1, !nontemporal !1 + ret <2 x double> %1 +} + +define <4 x float> @test_v4f32_align1(<4 x float>* %src) nounwind { +; SSE-LABEL: test_v4f32_align1: +; SSE: # %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_v4f32_align1: +; AVX: # %bb.0: +; AVX-NEXT: vmovups (%rdi), %xmm0 +; AVX-NEXT: retq + %1 = load <4 x float>, <4 x float>* %src, align 1, !nontemporal !1 + ret <4 x float> %1 +} + +define <2 x i64> @test_v2i64_align1(<2 x i64>* %src) nounwind { +; SSE-LABEL: test_v2i64_align1: +; SSE: # %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_v2i64_align1: +; AVX: # %bb.0: +; AVX-NEXT: vmovups (%rdi), %xmm0 +; AVX-NEXT: retq + %1 = load <2 x i64>, <2 x i64>* %src, align 1, !nontemporal !1 + ret <2 x i64> %1 +} + +define <4 x i32> @test_v4i32_align1(<4 x i32>* %src) nounwind { +; SSE-LABEL: test_v4i32_align1: +; SSE: # %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_v4i32_align1: +; AVX: # %bb.0: +; AVX-NEXT: vmovups (%rdi), %xmm0 +; AVX-NEXT: retq + %1 = load <4 x i32>, <4 x i32>* %src, align 1, !nontemporal !1 + ret <4 x i32> %1 +} + +define <8 x i16> @test_v8i16_align1(<8 x i16>* %src) nounwind { +; SSE-LABEL: test_v8i16_align1: +; SSE: # %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_v8i16_align1: +; AVX: # %bb.0: +; AVX-NEXT: vmovups (%rdi), %xmm0 +; AVX-NEXT: retq + %1 = load <8 x i16>, <8 x i16>* %src, align 1, !nontemporal !1 + ret <8 x i16> %1 +} + +define <16 x i8> @test_v16i8_align1(<16 x i8>* %src) nounwind { +; SSE-LABEL: test_v16i8_align1: +; SSE: # %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_v16i8_align1: +; AVX: # %bb.0: +; AVX-NEXT: vmovups (%rdi), %xmm0 +; AVX-NEXT: retq + %1 = load <16 x i8>, <16 x i8>* %src, align 1, !nontemporal !1 + ret <16 x i8> %1 +} + +; YMM versions. + +define <4 x double> @test_v4f64_align1(<4 x double>* %src) nounwind { +; SSE-LABEL: test_v4f64_align1: +; SSE: # %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movups 16(%rdi), %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: test_v4f64_align1: +; AVX: # %bb.0: +; AVX-NEXT: vmovups (%rdi), %ymm0 +; AVX-NEXT: retq + %1 = load <4 x double>, <4 x double>* %src, align 1, !nontemporal !1 + ret <4 x double> %1 +} + +define <8 x float> @test_v8f32_align1(<8 x float>* %src) nounwind { +; SSE-LABEL: test_v8f32_align1: +; SSE: # %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movups 16(%rdi), %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: test_v8f32_align1: +; AVX: # %bb.0: +; AVX-NEXT: vmovups (%rdi), %ymm0 +; AVX-NEXT: retq + %1 = load <8 x float>, <8 x float>* %src, align 1, !nontemporal !1 + ret <8 x float> %1 +} + +define <4 x i64> @test_v4i64_align1(<4 x i64>* %src) nounwind { +; SSE-LABEL: test_v4i64_align1: +; SSE: # %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movups 16(%rdi), %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: test_v4i64_align1: +; AVX: # %bb.0: +; AVX-NEXT: vmovups (%rdi), %ymm0 +; AVX-NEXT: retq + %1 = load <4 x i64>, <4 x i64>* %src, align 1, !nontemporal !1 + ret <4 x i64> %1 +} + +define <8 x i32> @test_v8i32_align1(<8 x i32>* %src) nounwind { +; SSE-LABEL: test_v8i32_align1: +; SSE: # %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movups 16(%rdi), %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: test_v8i32_align1: +; AVX: # %bb.0: +; AVX-NEXT: vmovups (%rdi), %ymm0 +; AVX-NEXT: retq + %1 = load <8 x i32>, <8 x i32>* %src, align 1, !nontemporal !1 + ret <8 x i32> %1 +} + +define <16 x i16> @test_v16i16_align1(<16 x i16>* %src) nounwind { +; SSE-LABEL: test_v16i16_align1: +; SSE: # %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movups 16(%rdi), %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: test_v16i16_align1: +; AVX: # %bb.0: +; AVX-NEXT: vmovups (%rdi), %ymm0 +; AVX-NEXT: retq + %1 = load <16 x i16>, <16 x i16>* %src, align 1, !nontemporal !1 + ret <16 x i16> %1 +} + +define <32 x i8> @test_v32i8_align1(<32 x i8>* %src) nounwind { +; SSE-LABEL: test_v32i8_align1: +; SSE: # %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movups 16(%rdi), %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: test_v32i8_align1: +; AVX: # %bb.0: +; AVX-NEXT: vmovups (%rdi), %ymm0 +; AVX-NEXT: retq + %1 = load <32 x i8>, <32 x i8>* %src, align 1, !nontemporal !1 + ret <32 x i8> %1 +} + +define <4 x double> @test_v4f64_align16(<4 x double>* %src) nounwind { +; SSE2-LABEL: test_v4f64_align16: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps (%rdi), %xmm0 +; SSE2-NEXT: movaps 16(%rdi), %xmm1 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_v4f64_align16: +; SSE41: # %bb.0: +; SSE41-NEXT: movntdqa (%rdi), %xmm0 +; SSE41-NEXT: movntdqa 16(%rdi), %xmm1 +; SSE41-NEXT: retq +; +; AVX-LABEL: test_v4f64_align16: +; AVX: # %bb.0: +; AVX-NEXT: pushq %rbp +; AVX-NEXT: movq %rsp, %rbp +; AVX-NEXT: andq $-32, %rsp +; AVX-NEXT: subq $64, %rsp +; AVX-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX-NEXT: vmovdqa %xmm0, (%rsp) +; AVX-NEXT: vmovaps (%rsp), %ymm0 +; AVX-NEXT: movq %rbp, %rsp +; AVX-NEXT: popq %rbp +; AVX-NEXT: retq + %1 = load <4 x double>, <4 x double>* %src, align 16, !nontemporal !1 + ret <4 x double> %1 +} + +define <8 x float> @test_v8f32_align16(<8 x float>* %src) nounwind { +; SSE2-LABEL: test_v8f32_align16: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps (%rdi), %xmm0 +; SSE2-NEXT: movaps 16(%rdi), %xmm1 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_v8f32_align16: +; SSE41: # %bb.0: +; SSE41-NEXT: movntdqa (%rdi), %xmm0 +; SSE41-NEXT: movntdqa 16(%rdi), %xmm1 +; SSE41-NEXT: retq +; +; AVX-LABEL: test_v8f32_align16: +; AVX: # %bb.0: +; AVX-NEXT: pushq %rbp +; AVX-NEXT: movq %rsp, %rbp +; AVX-NEXT: andq $-32, %rsp +; AVX-NEXT: subq $64, %rsp +; AVX-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX-NEXT: vmovdqa %xmm0, (%rsp) +; AVX-NEXT: vmovaps (%rsp), %ymm0 +; AVX-NEXT: movq %rbp, %rsp +; AVX-NEXT: popq %rbp +; AVX-NEXT: retq + %1 = load <8 x float>, <8 x float>* %src, align 16, !nontemporal !1 + ret <8 x float> %1 +} + +define <4 x i64> @test_v4i64_align16(<4 x i64>* %src) nounwind { +; SSE2-LABEL: test_v4i64_align16: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps (%rdi), %xmm0 +; SSE2-NEXT: movaps 16(%rdi), %xmm1 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_v4i64_align16: +; SSE41: # %bb.0: +; SSE41-NEXT: movntdqa (%rdi), %xmm0 +; SSE41-NEXT: movntdqa 16(%rdi), %xmm1 +; SSE41-NEXT: retq +; +; AVX-LABEL: test_v4i64_align16: +; AVX: # %bb.0: +; AVX-NEXT: pushq %rbp +; AVX-NEXT: movq %rsp, %rbp +; AVX-NEXT: andq $-32, %rsp +; AVX-NEXT: subq $64, %rsp +; AVX-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX-NEXT: vmovdqa %xmm0, (%rsp) +; AVX-NEXT: vmovaps (%rsp), %ymm0 +; AVX-NEXT: movq %rbp, %rsp +; AVX-NEXT: popq %rbp +; AVX-NEXT: retq + %1 = load <4 x i64>, <4 x i64>* %src, align 16, !nontemporal !1 + ret <4 x i64> %1 +} + +define <8 x i32> @test_v8i32_align16(<8 x i32>* %src) nounwind { +; SSE2-LABEL: test_v8i32_align16: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps (%rdi), %xmm0 +; SSE2-NEXT: movaps 16(%rdi), %xmm1 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_v8i32_align16: +; SSE41: # %bb.0: +; SSE41-NEXT: movntdqa (%rdi), %xmm0 +; SSE41-NEXT: movntdqa 16(%rdi), %xmm1 +; SSE41-NEXT: retq +; +; AVX-LABEL: test_v8i32_align16: +; AVX: # %bb.0: +; AVX-NEXT: pushq %rbp +; AVX-NEXT: movq %rsp, %rbp +; AVX-NEXT: andq $-32, %rsp +; AVX-NEXT: subq $64, %rsp +; AVX-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX-NEXT: vmovdqa %xmm0, (%rsp) +; AVX-NEXT: vmovaps (%rsp), %ymm0 +; AVX-NEXT: movq %rbp, %rsp +; AVX-NEXT: popq %rbp +; AVX-NEXT: retq + %1 = load <8 x i32>, <8 x i32>* %src, align 16, !nontemporal !1 + ret <8 x i32> %1 +} + +define <16 x i16> @test_v16i16_align16(<16 x i16>* %src) nounwind { +; SSE2-LABEL: test_v16i16_align16: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps (%rdi), %xmm0 +; SSE2-NEXT: movaps 16(%rdi), %xmm1 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_v16i16_align16: +; SSE41: # %bb.0: +; SSE41-NEXT: movntdqa (%rdi), %xmm0 +; SSE41-NEXT: movntdqa 16(%rdi), %xmm1 +; SSE41-NEXT: retq +; +; AVX-LABEL: test_v16i16_align16: +; AVX: # %bb.0: +; AVX-NEXT: pushq %rbp +; AVX-NEXT: movq %rsp, %rbp +; AVX-NEXT: andq $-32, %rsp +; AVX-NEXT: subq $64, %rsp +; AVX-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX-NEXT: vmovdqa %xmm0, (%rsp) +; AVX-NEXT: vmovaps (%rsp), %ymm0 +; AVX-NEXT: movq %rbp, %rsp +; AVX-NEXT: popq %rbp +; AVX-NEXT: retq + %1 = load <16 x i16>, <16 x i16>* %src, align 16, !nontemporal !1 + ret <16 x i16> %1 +} + +define <32 x i8> @test_v32i8_align16(<32 x i8>* %src) nounwind { +; SSE2-LABEL: test_v32i8_align16: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps (%rdi), %xmm0 +; SSE2-NEXT: movaps 16(%rdi), %xmm1 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_v32i8_align16: +; SSE41: # %bb.0: +; SSE41-NEXT: movntdqa (%rdi), %xmm0 +; SSE41-NEXT: movntdqa 16(%rdi), %xmm1 +; SSE41-NEXT: retq +; +; AVX-LABEL: test_v32i8_align16: +; AVX: # %bb.0: +; AVX-NEXT: pushq %rbp +; AVX-NEXT: movq %rsp, %rbp +; AVX-NEXT: andq $-32, %rsp +; AVX-NEXT: subq $64, %rsp +; AVX-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX-NEXT: vmovdqa %xmm0, (%rsp) +; AVX-NEXT: vmovaps (%rsp), %ymm0 +; AVX-NEXT: movq %rbp, %rsp +; AVX-NEXT: popq %rbp +; AVX-NEXT: retq + %1 = load <32 x i8>, <32 x i8>* %src, align 16, !nontemporal !1 + ret <32 x i8> %1 +} + +; ZMM versions. + +define <8 x double> @test_v8f64_align1(<8 x double>* %src) nounwind { +; SSE-LABEL: test_v8f64_align1: +; SSE: # %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movups 16(%rdi), %xmm1 +; SSE-NEXT: movups 32(%rdi), %xmm2 +; SSE-NEXT: movups 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX1-LABEL: test_v8f64_align1: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovups (%rdi), %ymm0 +; AVX1-NEXT: vmovups 32(%rdi), %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_v8f64_align1: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovups (%rdi), %ymm0 +; AVX2-NEXT: vmovups 32(%rdi), %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_v8f64_align1: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovups (%rdi), %zmm0 +; AVX512-NEXT: retq + %1 = load <8 x double>, <8 x double>* %src, align 1, !nontemporal !1 + ret <8 x double> %1 +} + +define <16 x float> @test_v16f32_align1(<16 x float>* %src) nounwind { +; SSE-LABEL: test_v16f32_align1: +; SSE: # %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movups 16(%rdi), %xmm1 +; SSE-NEXT: movups 32(%rdi), %xmm2 +; SSE-NEXT: movups 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX1-LABEL: test_v16f32_align1: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovups (%rdi), %ymm0 +; AVX1-NEXT: vmovups 32(%rdi), %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_v16f32_align1: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovups (%rdi), %ymm0 +; AVX2-NEXT: vmovups 32(%rdi), %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_v16f32_align1: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovups (%rdi), %zmm0 +; AVX512-NEXT: retq + %1 = load <16 x float>, <16 x float>* %src, align 1, !nontemporal !1 + ret <16 x float> %1 +} + +define <8 x i64> @test_v8i64_align1(<8 x i64>* %src) nounwind { +; SSE-LABEL: test_v8i64_align1: +; SSE: # %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movups 16(%rdi), %xmm1 +; SSE-NEXT: movups 32(%rdi), %xmm2 +; SSE-NEXT: movups 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX1-LABEL: test_v8i64_align1: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovups (%rdi), %ymm0 +; AVX1-NEXT: vmovups 32(%rdi), %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_v8i64_align1: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovups (%rdi), %ymm0 +; AVX2-NEXT: vmovups 32(%rdi), %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_v8i64_align1: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovups (%rdi), %zmm0 +; AVX512-NEXT: retq + %1 = load <8 x i64>, <8 x i64>* %src, align 1, !nontemporal !1 + ret <8 x i64> %1 +} + +define <16 x i32> @test_v16i32_align1(<16 x i32>* %src) nounwind { +; SSE-LABEL: test_v16i32_align1: +; SSE: # %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movups 16(%rdi), %xmm1 +; SSE-NEXT: movups 32(%rdi), %xmm2 +; SSE-NEXT: movups 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX1-LABEL: test_v16i32_align1: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovups (%rdi), %ymm0 +; AVX1-NEXT: vmovups 32(%rdi), %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_v16i32_align1: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovups (%rdi), %ymm0 +; AVX2-NEXT: vmovups 32(%rdi), %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_v16i32_align1: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovups (%rdi), %zmm0 +; AVX512-NEXT: retq + %1 = load <16 x i32>, <16 x i32>* %src, align 1, !nontemporal !1 + ret <16 x i32> %1 +} + +define <32 x i16> @test_v32i16_align1(<32 x i16>* %src) nounwind { +; SSE-LABEL: test_v32i16_align1: +; SSE: # %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movups 16(%rdi), %xmm1 +; SSE-NEXT: movups 32(%rdi), %xmm2 +; SSE-NEXT: movups 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX1-LABEL: test_v32i16_align1: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovups (%rdi), %ymm0 +; AVX1-NEXT: vmovups 32(%rdi), %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_v32i16_align1: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovups (%rdi), %ymm0 +; AVX2-NEXT: vmovups 32(%rdi), %ymm1 +; AVX2-NEXT: retq +; +; AVX512DQ-LABEL: test_v32i16_align1: +; AVX512DQ: # %bb.0: +; AVX512DQ-NEXT: vmovups (%rdi), %ymm0 +; AVX512DQ-NEXT: vmovups 32(%rdi), %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: test_v32i16_align1: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vmovups (%rdi), %zmm0 +; AVX512BW-NEXT: retq + %1 = load <32 x i16>, <32 x i16>* %src, align 1, !nontemporal !1 + ret <32 x i16> %1 +} + +define <64 x i8> @test_v64i8_align1(<64 x i8>* %src) nounwind { +; SSE-LABEL: test_v64i8_align1: +; SSE: # %bb.0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movups 16(%rdi), %xmm1 +; SSE-NEXT: movups 32(%rdi), %xmm2 +; SSE-NEXT: movups 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX1-LABEL: test_v64i8_align1: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovups (%rdi), %ymm0 +; AVX1-NEXT: vmovups 32(%rdi), %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_v64i8_align1: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovups (%rdi), %ymm0 +; AVX2-NEXT: vmovups 32(%rdi), %ymm1 +; AVX2-NEXT: retq +; +; AVX512DQ-LABEL: test_v64i8_align1: +; AVX512DQ: # %bb.0: +; AVX512DQ-NEXT: vmovups (%rdi), %ymm0 +; AVX512DQ-NEXT: vmovups 32(%rdi), %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: test_v64i8_align1: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vmovups (%rdi), %zmm0 +; AVX512BW-NEXT: retq + %1 = load <64 x i8>, <64 x i8>* %src, align 1, !nontemporal !1 + ret <64 x i8> %1 +} + +define <8 x double> @test_v8f64_align16(<8 x double>* %src) nounwind { +; SSE2-LABEL: test_v8f64_align16: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps (%rdi), %xmm0 +; SSE2-NEXT: movaps 16(%rdi), %xmm1 +; SSE2-NEXT: movaps 32(%rdi), %xmm2 +; SSE2-NEXT: movaps 48(%rdi), %xmm3 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_v8f64_align16: +; SSE41: # %bb.0: +; SSE41-NEXT: movntdqa (%rdi), %xmm0 +; SSE41-NEXT: movntdqa 16(%rdi), %xmm1 +; SSE41-NEXT: movntdqa 32(%rdi), %xmm2 +; SSE41-NEXT: movntdqa 48(%rdi), %xmm3 +; SSE41-NEXT: retq +; +; AVX1-LABEL: test_v8f64_align16: +; AVX1: # %bb.0: +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: movq %rsp, %rbp +; AVX1-NEXT: andq $-32, %rsp +; AVX1-NEXT: subq $96, %rsp +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, (%rsp) +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovaps (%rsp), %ymm0 +; AVX1-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX1-NEXT: movq %rbp, %rsp +; AVX1-NEXT: popq %rbp +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_v8f64_align16: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $96, %rsp +; AVX2-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, (%rsp) +; AVX2-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_v8f64_align16: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: movq %rsp, %rbp +; AVX512-NEXT: andq $-64, %rsp +; AVX512-NEXT: subq $128, %rsp +; AVX512-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX512-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX512-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX512-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX512-NEXT: vmovdqa %xmm0, (%rsp) +; AVX512-NEXT: vmovaps (%rsp), %zmm0 +; AVX512-NEXT: movq %rbp, %rsp +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: retq + %1 = load <8 x double>, <8 x double>* %src, align 16, !nontemporal !1 + ret <8 x double> %1 +} + +define <16 x float> @test_v16f32_align16(<16 x float>* %src) nounwind { +; SSE2-LABEL: test_v16f32_align16: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps (%rdi), %xmm0 +; SSE2-NEXT: movaps 16(%rdi), %xmm1 +; SSE2-NEXT: movaps 32(%rdi), %xmm2 +; SSE2-NEXT: movaps 48(%rdi), %xmm3 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_v16f32_align16: +; SSE41: # %bb.0: +; SSE41-NEXT: movntdqa (%rdi), %xmm0 +; SSE41-NEXT: movntdqa 16(%rdi), %xmm1 +; SSE41-NEXT: movntdqa 32(%rdi), %xmm2 +; SSE41-NEXT: movntdqa 48(%rdi), %xmm3 +; SSE41-NEXT: retq +; +; AVX1-LABEL: test_v16f32_align16: +; AVX1: # %bb.0: +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: movq %rsp, %rbp +; AVX1-NEXT: andq $-32, %rsp +; AVX1-NEXT: subq $96, %rsp +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, (%rsp) +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovaps (%rsp), %ymm0 +; AVX1-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX1-NEXT: movq %rbp, %rsp +; AVX1-NEXT: popq %rbp +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_v16f32_align16: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $96, %rsp +; AVX2-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, (%rsp) +; AVX2-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_v16f32_align16: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: movq %rsp, %rbp +; AVX512-NEXT: andq $-64, %rsp +; AVX512-NEXT: subq $128, %rsp +; AVX512-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX512-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX512-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX512-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX512-NEXT: vmovdqa %xmm0, (%rsp) +; AVX512-NEXT: vmovaps (%rsp), %zmm0 +; AVX512-NEXT: movq %rbp, %rsp +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: retq + %1 = load <16 x float>, <16 x float>* %src, align 16, !nontemporal !1 + ret <16 x float> %1 +} + +define <8 x i64> @test_v8i64_align16(<8 x i64>* %src) nounwind { +; SSE2-LABEL: test_v8i64_align16: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps (%rdi), %xmm0 +; SSE2-NEXT: movaps 16(%rdi), %xmm1 +; SSE2-NEXT: movaps 32(%rdi), %xmm2 +; SSE2-NEXT: movaps 48(%rdi), %xmm3 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_v8i64_align16: +; SSE41: # %bb.0: +; SSE41-NEXT: movntdqa (%rdi), %xmm0 +; SSE41-NEXT: movntdqa 16(%rdi), %xmm1 +; SSE41-NEXT: movntdqa 32(%rdi), %xmm2 +; SSE41-NEXT: movntdqa 48(%rdi), %xmm3 +; SSE41-NEXT: retq +; +; AVX1-LABEL: test_v8i64_align16: +; AVX1: # %bb.0: +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: movq %rsp, %rbp +; AVX1-NEXT: andq $-32, %rsp +; AVX1-NEXT: subq $96, %rsp +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, (%rsp) +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovaps (%rsp), %ymm0 +; AVX1-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX1-NEXT: movq %rbp, %rsp +; AVX1-NEXT: popq %rbp +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_v8i64_align16: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $96, %rsp +; AVX2-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, (%rsp) +; AVX2-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_v8i64_align16: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: movq %rsp, %rbp +; AVX512-NEXT: andq $-64, %rsp +; AVX512-NEXT: subq $128, %rsp +; AVX512-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX512-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX512-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX512-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX512-NEXT: vmovdqa %xmm0, (%rsp) +; AVX512-NEXT: vmovaps (%rsp), %zmm0 +; AVX512-NEXT: movq %rbp, %rsp +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: retq + %1 = load <8 x i64>, <8 x i64>* %src, align 16, !nontemporal !1 + ret <8 x i64> %1 +} + +define <16 x i32> @test_v16i32_align16(<16 x i32>* %src) nounwind { +; SSE2-LABEL: test_v16i32_align16: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps (%rdi), %xmm0 +; SSE2-NEXT: movaps 16(%rdi), %xmm1 +; SSE2-NEXT: movaps 32(%rdi), %xmm2 +; SSE2-NEXT: movaps 48(%rdi), %xmm3 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_v16i32_align16: +; SSE41: # %bb.0: +; SSE41-NEXT: movntdqa (%rdi), %xmm0 +; SSE41-NEXT: movntdqa 16(%rdi), %xmm1 +; SSE41-NEXT: movntdqa 32(%rdi), %xmm2 +; SSE41-NEXT: movntdqa 48(%rdi), %xmm3 +; SSE41-NEXT: retq +; +; AVX1-LABEL: test_v16i32_align16: +; AVX1: # %bb.0: +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: movq %rsp, %rbp +; AVX1-NEXT: andq $-32, %rsp +; AVX1-NEXT: subq $96, %rsp +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, (%rsp) +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovaps (%rsp), %ymm0 +; AVX1-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX1-NEXT: movq %rbp, %rsp +; AVX1-NEXT: popq %rbp +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_v16i32_align16: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $96, %rsp +; AVX2-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, (%rsp) +; AVX2-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_v16i32_align16: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: movq %rsp, %rbp +; AVX512-NEXT: andq $-64, %rsp +; AVX512-NEXT: subq $128, %rsp +; AVX512-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX512-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX512-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX512-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX512-NEXT: vmovdqa %xmm0, (%rsp) +; AVX512-NEXT: vmovaps (%rsp), %zmm0 +; AVX512-NEXT: movq %rbp, %rsp +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: retq + %1 = load <16 x i32>, <16 x i32>* %src, align 16, !nontemporal !1 + ret <16 x i32> %1 +} + +define <32 x i16> @test_v32i16_align16(<32 x i16>* %src) nounwind { +; SSE2-LABEL: test_v32i16_align16: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps (%rdi), %xmm0 +; SSE2-NEXT: movaps 16(%rdi), %xmm1 +; SSE2-NEXT: movaps 32(%rdi), %xmm2 +; SSE2-NEXT: movaps 48(%rdi), %xmm3 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_v32i16_align16: +; SSE41: # %bb.0: +; SSE41-NEXT: movntdqa (%rdi), %xmm0 +; SSE41-NEXT: movntdqa 16(%rdi), %xmm1 +; SSE41-NEXT: movntdqa 32(%rdi), %xmm2 +; SSE41-NEXT: movntdqa 48(%rdi), %xmm3 +; SSE41-NEXT: retq +; +; AVX1-LABEL: test_v32i16_align16: +; AVX1: # %bb.0: +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: movq %rsp, %rbp +; AVX1-NEXT: andq $-32, %rsp +; AVX1-NEXT: subq $96, %rsp +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, (%rsp) +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovaps (%rsp), %ymm0 +; AVX1-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX1-NEXT: movq %rbp, %rsp +; AVX1-NEXT: popq %rbp +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_v32i16_align16: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $96, %rsp +; AVX2-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, (%rsp) +; AVX2-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512DQ-LABEL: test_v32i16_align16: +; AVX512DQ: # %bb.0: +; AVX512DQ-NEXT: pushq %rbp +; AVX512DQ-NEXT: movq %rsp, %rbp +; AVX512DQ-NEXT: andq $-32, %rsp +; AVX512DQ-NEXT: subq $96, %rsp +; AVX512DQ-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX512DQ-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512DQ-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX512DQ-NEXT: vmovdqa %xmm0, (%rsp) +; AVX512DQ-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX512DQ-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512DQ-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX512DQ-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512DQ-NEXT: vmovaps (%rsp), %ymm0 +; AVX512DQ-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX512DQ-NEXT: movq %rbp, %rsp +; AVX512DQ-NEXT: popq %rbp +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: test_v32i16_align16: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: pushq %rbp +; AVX512BW-NEXT: movq %rsp, %rbp +; AVX512BW-NEXT: andq $-64, %rsp +; AVX512BW-NEXT: subq $128, %rsp +; AVX512BW-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX512BW-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512BW-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX512BW-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512BW-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX512BW-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512BW-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX512BW-NEXT: vmovdqa %xmm0, (%rsp) +; AVX512BW-NEXT: vmovaps (%rsp), %zmm0 +; AVX512BW-NEXT: movq %rbp, %rsp +; AVX512BW-NEXT: popq %rbp +; AVX512BW-NEXT: retq + %1 = load <32 x i16>, <32 x i16>* %src, align 16, !nontemporal !1 + ret <32 x i16> %1 +} + +define <64 x i8> @test_v64i8_align16(<64 x i8>* %src) nounwind { +; SSE2-LABEL: test_v64i8_align16: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps (%rdi), %xmm0 +; SSE2-NEXT: movaps 16(%rdi), %xmm1 +; SSE2-NEXT: movaps 32(%rdi), %xmm2 +; SSE2-NEXT: movaps 48(%rdi), %xmm3 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_v64i8_align16: +; SSE41: # %bb.0: +; SSE41-NEXT: movntdqa (%rdi), %xmm0 +; SSE41-NEXT: movntdqa 16(%rdi), %xmm1 +; SSE41-NEXT: movntdqa 32(%rdi), %xmm2 +; SSE41-NEXT: movntdqa 48(%rdi), %xmm3 +; SSE41-NEXT: retq +; +; AVX1-LABEL: test_v64i8_align16: +; AVX1: # %bb.0: +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: movq %rsp, %rbp +; AVX1-NEXT: andq $-32, %rsp +; AVX1-NEXT: subq $96, %rsp +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, (%rsp) +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovaps (%rsp), %ymm0 +; AVX1-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX1-NEXT: movq %rbp, %rsp +; AVX1-NEXT: popq %rbp +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_v64i8_align16: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $96, %rsp +; AVX2-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, (%rsp) +; AVX2-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512DQ-LABEL: test_v64i8_align16: +; AVX512DQ: # %bb.0: +; AVX512DQ-NEXT: pushq %rbp +; AVX512DQ-NEXT: movq %rsp, %rbp +; AVX512DQ-NEXT: andq $-32, %rsp +; AVX512DQ-NEXT: subq $96, %rsp +; AVX512DQ-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX512DQ-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512DQ-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX512DQ-NEXT: vmovdqa %xmm0, (%rsp) +; AVX512DQ-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX512DQ-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512DQ-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX512DQ-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512DQ-NEXT: vmovaps (%rsp), %ymm0 +; AVX512DQ-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 +; AVX512DQ-NEXT: movq %rbp, %rsp +; AVX512DQ-NEXT: popq %rbp +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: test_v64i8_align16: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: pushq %rbp +; AVX512BW-NEXT: movq %rsp, %rbp +; AVX512BW-NEXT: andq $-64, %rsp +; AVX512BW-NEXT: subq $128, %rsp +; AVX512BW-NEXT: vmovntdqa 48(%rdi), %xmm0 +; AVX512BW-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512BW-NEXT: vmovntdqa 32(%rdi), %xmm0 +; AVX512BW-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512BW-NEXT: vmovntdqa 16(%rdi), %xmm0 +; AVX512BW-NEXT: vmovdqa %xmm0, {{[0-9]+}}(%rsp) +; AVX512BW-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX512BW-NEXT: vmovdqa %xmm0, (%rsp) +; AVX512BW-NEXT: vmovaps (%rsp), %zmm0 +; AVX512BW-NEXT: movq %rbp, %rsp +; AVX512BW-NEXT: popq %rbp +; AVX512BW-NEXT: retq + %1 = load <64 x i8>, <64 x i8>* %src, align 16, !nontemporal !1 + ret <64 x i8> %1 +} + +define <8 x double> @test_v8f64_align32(<8 x double>* %src) nounwind { +; SSE2-LABEL: test_v8f64_align32: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps (%rdi), %xmm0 +; SSE2-NEXT: movaps 16(%rdi), %xmm1 +; SSE2-NEXT: movaps 32(%rdi), %xmm2 +; SSE2-NEXT: movaps 48(%rdi), %xmm3 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_v8f64_align32: +; SSE41: # %bb.0: +; SSE41-NEXT: movntdqa (%rdi), %xmm0 +; SSE41-NEXT: movntdqa 16(%rdi), %xmm1 +; SSE41-NEXT: movntdqa 32(%rdi), %xmm2 +; SSE41-NEXT: movntdqa 48(%rdi), %xmm3 +; SSE41-NEXT: retq +; +; AVX1-LABEL: test_v8f64_align32: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_v8f64_align32: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovntdqa (%rdi), %ymm0 +; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_v8f64_align32: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: movq %rsp, %rbp +; AVX512-NEXT: andq $-64, %rsp +; AVX512-NEXT: subq $128, %rsp +; AVX512-NEXT: vmovntdqa 32(%rdi), %ymm0 +; AVX512-NEXT: vmovdqa %ymm0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovntdqa (%rdi), %ymm0 +; AVX512-NEXT: vmovdqa %ymm0, (%rsp) +; AVX512-NEXT: vmovaps (%rsp), %zmm0 +; AVX512-NEXT: movq %rbp, %rsp +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: retq + %1 = load <8 x double>, <8 x double>* %src, align 32, !nontemporal !1 + ret <8 x double> %1 +} + +define <16 x float> @test_v16f32_align32(<16 x float>* %src) nounwind { +; SSE2-LABEL: test_v16f32_align32: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps (%rdi), %xmm0 +; SSE2-NEXT: movaps 16(%rdi), %xmm1 +; SSE2-NEXT: movaps 32(%rdi), %xmm2 +; SSE2-NEXT: movaps 48(%rdi), %xmm3 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_v16f32_align32: +; SSE41: # %bb.0: +; SSE41-NEXT: movntdqa (%rdi), %xmm0 +; SSE41-NEXT: movntdqa 16(%rdi), %xmm1 +; SSE41-NEXT: movntdqa 32(%rdi), %xmm2 +; SSE41-NEXT: movntdqa 48(%rdi), %xmm3 +; SSE41-NEXT: retq +; +; AVX1-LABEL: test_v16f32_align32: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_v16f32_align32: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovntdqa (%rdi), %ymm0 +; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_v16f32_align32: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: movq %rsp, %rbp +; AVX512-NEXT: andq $-64, %rsp +; AVX512-NEXT: subq $128, %rsp +; AVX512-NEXT: vmovntdqa 32(%rdi), %ymm0 +; AVX512-NEXT: vmovdqa %ymm0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovntdqa (%rdi), %ymm0 +; AVX512-NEXT: vmovdqa %ymm0, (%rsp) +; AVX512-NEXT: vmovaps (%rsp), %zmm0 +; AVX512-NEXT: movq %rbp, %rsp +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: retq + %1 = load <16 x float>, <16 x float>* %src, align 32, !nontemporal !1 + ret <16 x float> %1 +} + +define <8 x i64> @test_v8i64_align32(<8 x i64>* %src) nounwind { +; SSE2-LABEL: test_v8i64_align32: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps (%rdi), %xmm0 +; SSE2-NEXT: movaps 16(%rdi), %xmm1 +; SSE2-NEXT: movaps 32(%rdi), %xmm2 +; SSE2-NEXT: movaps 48(%rdi), %xmm3 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_v8i64_align32: +; SSE41: # %bb.0: +; SSE41-NEXT: movntdqa (%rdi), %xmm0 +; SSE41-NEXT: movntdqa 16(%rdi), %xmm1 +; SSE41-NEXT: movntdqa 32(%rdi), %xmm2 +; SSE41-NEXT: movntdqa 48(%rdi), %xmm3 +; SSE41-NEXT: retq +; +; AVX1-LABEL: test_v8i64_align32: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_v8i64_align32: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovntdqa (%rdi), %ymm0 +; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_v8i64_align32: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: movq %rsp, %rbp +; AVX512-NEXT: andq $-64, %rsp +; AVX512-NEXT: subq $128, %rsp +; AVX512-NEXT: vmovntdqa 32(%rdi), %ymm0 +; AVX512-NEXT: vmovdqa %ymm0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovntdqa (%rdi), %ymm0 +; AVX512-NEXT: vmovdqa %ymm0, (%rsp) +; AVX512-NEXT: vmovaps (%rsp), %zmm0 +; AVX512-NEXT: movq %rbp, %rsp +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: retq + %1 = load <8 x i64>, <8 x i64>* %src, align 32, !nontemporal !1 + ret <8 x i64> %1 +} + +define <16 x i32> @test_v16i32_align32(<16 x i32>* %src) nounwind { +; SSE2-LABEL: test_v16i32_align32: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps (%rdi), %xmm0 +; SSE2-NEXT: movaps 16(%rdi), %xmm1 +; SSE2-NEXT: movaps 32(%rdi), %xmm2 +; SSE2-NEXT: movaps 48(%rdi), %xmm3 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_v16i32_align32: +; SSE41: # %bb.0: +; SSE41-NEXT: movntdqa (%rdi), %xmm0 +; SSE41-NEXT: movntdqa 16(%rdi), %xmm1 +; SSE41-NEXT: movntdqa 32(%rdi), %xmm2 +; SSE41-NEXT: movntdqa 48(%rdi), %xmm3 +; SSE41-NEXT: retq +; +; AVX1-LABEL: test_v16i32_align32: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_v16i32_align32: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovntdqa (%rdi), %ymm0 +; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_v16i32_align32: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: movq %rsp, %rbp +; AVX512-NEXT: andq $-64, %rsp +; AVX512-NEXT: subq $128, %rsp +; AVX512-NEXT: vmovntdqa 32(%rdi), %ymm0 +; AVX512-NEXT: vmovdqa %ymm0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovntdqa (%rdi), %ymm0 +; AVX512-NEXT: vmovdqa %ymm0, (%rsp) +; AVX512-NEXT: vmovaps (%rsp), %zmm0 +; AVX512-NEXT: movq %rbp, %rsp +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: retq + %1 = load <16 x i32>, <16 x i32>* %src, align 32, !nontemporal !1 + ret <16 x i32> %1 +} + +define <32 x i16> @test_v32i16_align32(<32 x i16>* %src) nounwind { +; SSE2-LABEL: test_v32i16_align32: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps (%rdi), %xmm0 +; SSE2-NEXT: movaps 16(%rdi), %xmm1 +; SSE2-NEXT: movaps 32(%rdi), %xmm2 +; SSE2-NEXT: movaps 48(%rdi), %xmm3 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_v32i16_align32: +; SSE41: # %bb.0: +; SSE41-NEXT: movntdqa (%rdi), %xmm0 +; SSE41-NEXT: movntdqa 16(%rdi), %xmm1 +; SSE41-NEXT: movntdqa 32(%rdi), %xmm2 +; SSE41-NEXT: movntdqa 48(%rdi), %xmm3 +; SSE41-NEXT: retq +; +; AVX1-LABEL: test_v32i16_align32: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_v32i16_align32: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovntdqa (%rdi), %ymm0 +; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1 +; AVX2-NEXT: retq +; +; AVX512DQ-LABEL: test_v32i16_align32: +; AVX512DQ: # %bb.0: +; AVX512DQ-NEXT: vmovntdqa (%rdi), %ymm0 +; AVX512DQ-NEXT: vmovntdqa 32(%rdi), %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: test_v32i16_align32: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: pushq %rbp +; AVX512BW-NEXT: movq %rsp, %rbp +; AVX512BW-NEXT: andq $-64, %rsp +; AVX512BW-NEXT: subq $128, %rsp +; AVX512BW-NEXT: vmovntdqa 32(%rdi), %ymm0 +; AVX512BW-NEXT: vmovdqa %ymm0, {{[0-9]+}}(%rsp) +; AVX512BW-NEXT: vmovntdqa (%rdi), %ymm0 +; AVX512BW-NEXT: vmovdqa %ymm0, (%rsp) +; AVX512BW-NEXT: vmovaps (%rsp), %zmm0 +; AVX512BW-NEXT: movq %rbp, %rsp +; AVX512BW-NEXT: popq %rbp +; AVX512BW-NEXT: retq + %1 = load <32 x i16>, <32 x i16>* %src, align 32, !nontemporal !1 + ret <32 x i16> %1 +} + +define <64 x i8> @test_v64i8_align32(<64 x i8>* %src) nounwind { +; SSE2-LABEL: test_v64i8_align32: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps (%rdi), %xmm0 +; SSE2-NEXT: movaps 16(%rdi), %xmm1 +; SSE2-NEXT: movaps 32(%rdi), %xmm2 +; SSE2-NEXT: movaps 48(%rdi), %xmm3 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_v64i8_align32: +; SSE41: # %bb.0: +; SSE41-NEXT: movntdqa (%rdi), %xmm0 +; SSE41-NEXT: movntdqa 16(%rdi), %xmm1 +; SSE41-NEXT: movntdqa 32(%rdi), %xmm2 +; SSE41-NEXT: movntdqa 48(%rdi), %xmm3 +; SSE41-NEXT: retq +; +; AVX1-LABEL: test_v64i8_align32: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovntdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1 +; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_v64i8_align32: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovntdqa (%rdi), %ymm0 +; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1 +; AVX2-NEXT: retq +; +; AVX512DQ-LABEL: test_v64i8_align32: +; AVX512DQ: # %bb.0: +; AVX512DQ-NEXT: vmovntdqa (%rdi), %ymm0 +; AVX512DQ-NEXT: vmovntdqa 32(%rdi), %ymm1 +; AVX512DQ-NEXT: retq +; +; AVX512BW-LABEL: test_v64i8_align32: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: pushq %rbp +; AVX512BW-NEXT: movq %rsp, %rbp +; AVX512BW-NEXT: andq $-64, %rsp +; AVX512BW-NEXT: subq $128, %rsp +; AVX512BW-NEXT: vmovntdqa 32(%rdi), %ymm0 +; AVX512BW-NEXT: vmovdqa %ymm0, {{[0-9]+}}(%rsp) +; AVX512BW-NEXT: vmovntdqa (%rdi), %ymm0 +; AVX512BW-NEXT: vmovdqa %ymm0, (%rsp) +; AVX512BW-NEXT: vmovaps (%rsp), %zmm0 +; AVX512BW-NEXT: movq %rbp, %rsp +; AVX512BW-NEXT: popq %rbp +; AVX512BW-NEXT: retq + %1 = load <64 x i8>, <64 x i8>* %src, align 32, !nontemporal !1 + ret <64 x i8> %1 +} + +!1 = !{i32 1}