2016-06-03 19:00:55 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
|
2016-06-03 21:42:49 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512BW
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512VL
|
2016-06-03 19:00:55 +08:00
|
|
|
|
|
|
|
define <4 x float> @test_v4f32(<4 x float>* %src) {
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-LABEL: test_v4f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v4f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
|
|
|
; AVX-LABEL: test_v4f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-LABEL: test_v4f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <4 x float>, <4 x float>* %src, align 16, !nontemporal !1
|
|
|
|
ret <4 x float> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i32> @test_v4i32(<4 x i32>* %src) {
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-LABEL: test_v4i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v4i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
|
|
|
; AVX-LABEL: test_v4i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-08-11 14:04:00 +08:00
|
|
|
; AVX512-LABEL: test_v4i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-08-11 14:04:00 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <4 x i32>, <4 x i32>* %src, align 16, !nontemporal !1
|
|
|
|
ret <4 x i32> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_v2f64(<2 x double>* %src) {
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-LABEL: test_v2f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v2f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
|
|
|
; AVX-LABEL: test_v2f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512-LABEL: test_v2f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <2 x double>, <2 x double>* %src, align 16, !nontemporal !1
|
|
|
|
ret <2 x double> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i64> @test_v2i64(<2 x i64>* %src) {
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-LABEL: test_v2i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v2i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
|
|
|
; AVX-LABEL: test_v2i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512-LABEL: test_v2i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <2 x i64>, <2 x i64>* %src, align 16, !nontemporal !1
|
|
|
|
ret <2 x i64> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i16> @test_v8i16(<8 x i16>* %src) {
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-LABEL: test_v8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
|
|
|
; AVX-LABEL: test_v8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512-LABEL: test_v8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <8 x i16>, <8 x i16>* %src, align 16, !nontemporal !1
|
|
|
|
ret <8 x i16> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i8> @test_v16i8(<16 x i8>* %src) {
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-LABEL: test_v16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
|
|
|
; AVX-LABEL: test_v16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512-LABEL: test_v16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <16 x i8>, <16 x i8>* %src, align 16, !nontemporal !1
|
|
|
|
ret <16 x i8> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
; And now YMM versions.
|
|
|
|
|
|
|
|
define <8 x float> @test_v8f32(<8 x float>* %src) {
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-LABEL: test_v8f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v8f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v8f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2017-06-06 00:02:01 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v8f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-LABEL: test_v8f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <8 x float>, <8 x float>* %src, align 32, !nontemporal !1
|
|
|
|
ret <8 x float> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i32> @test_v8i32(<8 x i32>* %src) {
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-LABEL: test_v8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2017-06-06 00:02:01 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
2017-07-21 08:40:42 +08:00
|
|
|
; AVX512-LABEL: test_v8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-07-21 08:40:42 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <8 x i32>, <8 x i32>* %src, align 32, !nontemporal !1
|
|
|
|
ret <8 x i32> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_v4f64(<4 x double>* %src) {
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-LABEL: test_v4f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v4f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v4f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2017-06-06 00:02:01 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v4f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v4f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <4 x double>, <4 x double>* %src, align 32, !nontemporal !1
|
|
|
|
ret <4 x double> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @test_v4i64(<4 x i64>* %src) {
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-LABEL: test_v4i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v4i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v4i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2017-06-06 00:02:01 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v4i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v4i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <4 x i64>, <4 x i64>* %src, align 32, !nontemporal !1
|
|
|
|
ret <4 x i64> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i16> @test_v16i16(<16 x i16>* %src) {
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-LABEL: test_v16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2017-06-06 00:02:01 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <16 x i16>, <16 x i16>* %src, align 32, !nontemporal !1
|
|
|
|
ret <16 x i16> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <32 x i8> @test_v32i8(<32 x i8>* %src) {
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-LABEL: test_v32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2017-06-06 00:02:01 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <32 x i8>, <32 x i8>* %src, align 32, !nontemporal !1
|
|
|
|
ret <32 x i8> %1
|
|
|
|
}
|
|
|
|
|
2016-06-03 21:42:49 +08:00
|
|
|
; And now ZMM versions.
|
|
|
|
|
|
|
|
define <16 x float> @test_v16f32(<16 x float>* %src) {
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-LABEL: test_v16f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: movaps 32(%rdi), %xmm2
|
|
|
|
; SSE2-NEXT: movaps 48(%rdi), %xmm3
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v16f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
|
|
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm3
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v16f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2017-06-06 00:02:01 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v16f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
|
|
|
|
; AVX2-NEXT: retq
|
2016-06-03 21:42:49 +08:00
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v16f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 22:12:43 +08:00
|
|
|
%1 = load <16 x float>, <16 x float>* %src, align 64, !nontemporal !1
|
2016-06-03 21:42:49 +08:00
|
|
|
ret <16 x float> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i32> @test_v16i32(<16 x i32>* %src) {
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-LABEL: test_v16i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: movaps 32(%rdi), %xmm2
|
|
|
|
; SSE2-NEXT: movaps 48(%rdi), %xmm3
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v16i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
|
|
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm3
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v16i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2017-06-06 00:02:01 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v16i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
|
|
|
|
; AVX2-NEXT: retq
|
2016-06-03 21:42:49 +08:00
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v16i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 22:12:43 +08:00
|
|
|
%1 = load <16 x i32>, <16 x i32>* %src, align 64, !nontemporal !1
|
2016-06-03 21:42:49 +08:00
|
|
|
ret <16 x i32> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x double> @test_v8f64(<8 x double>* %src) {
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-LABEL: test_v8f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: movaps 32(%rdi), %xmm2
|
|
|
|
; SSE2-NEXT: movaps 48(%rdi), %xmm3
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v8f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
|
|
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm3
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v8f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2017-06-06 00:02:01 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v8f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
|
|
|
|
; AVX2-NEXT: retq
|
2016-06-03 21:42:49 +08:00
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v8f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 22:12:43 +08:00
|
|
|
%1 = load <8 x double>, <8 x double>* %src, align 64, !nontemporal !1
|
2016-06-03 21:42:49 +08:00
|
|
|
ret <8 x double> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i64> @test_v8i64(<8 x i64>* %src) {
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-LABEL: test_v8i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: movaps 32(%rdi), %xmm2
|
|
|
|
; SSE2-NEXT: movaps 48(%rdi), %xmm3
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v8i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
|
|
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm3
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v8i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2017-06-06 00:02:01 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v8i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
|
|
|
|
; AVX2-NEXT: retq
|
2016-06-03 21:42:49 +08:00
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v8i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 22:12:43 +08:00
|
|
|
%1 = load <8 x i64>, <8 x i64>* %src, align 64, !nontemporal !1
|
2016-06-03 21:42:49 +08:00
|
|
|
ret <8 x i64> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <32 x i16> @test_v32i16(<32 x i16>* %src) {
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-LABEL: test_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: movaps 32(%rdi), %xmm2
|
|
|
|
; SSE2-NEXT: movaps 48(%rdi), %xmm3
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
|
|
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm3
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2017-06-06 00:02:01 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
|
|
|
|
; AVX2-NEXT: retq
|
2016-06-03 21:42:49 +08:00
|
|
|
;
|
|
|
|
; AVX512F-LABEL: test_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512F-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX512F-NEXT: vmovntdqa 32(%rdi), %ymm1
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: test_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BW: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512BW-NEXT: vmovntdqa (%rdi), %zmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: test_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512VL: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512VL-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX512VL-NEXT: vmovntdqa 32(%rdi), %ymm1
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512VL-NEXT: retq
|
2016-06-03 22:12:43 +08:00
|
|
|
%1 = load <32 x i16>, <32 x i16>* %src, align 64, !nontemporal !1
|
2016-06-03 21:42:49 +08:00
|
|
|
ret <32 x i16> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <64 x i8> @test_v64i8(<64 x i8>* %src) {
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-LABEL: test_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: movaps 32(%rdi), %xmm2
|
|
|
|
; SSE2-NEXT: movaps 48(%rdi), %xmm3
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
|
|
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm3
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2017-06-06 00:02:01 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
|
|
|
|
; AVX2-NEXT: retq
|
2016-06-03 21:42:49 +08:00
|
|
|
;
|
|
|
|
; AVX512F-LABEL: test_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512F-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX512F-NEXT: vmovntdqa 32(%rdi), %ymm1
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: test_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BW: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512BW-NEXT: vmovntdqa (%rdi), %zmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: test_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512VL: # %bb.0:
|
2016-06-07 21:34:24 +08:00
|
|
|
; AVX512VL-NEXT: vmovntdqa (%rdi), %ymm0
|
|
|
|
; AVX512VL-NEXT: vmovntdqa 32(%rdi), %ymm1
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512VL-NEXT: retq
|
2016-06-03 22:12:43 +08:00
|
|
|
%1 = load <64 x i8>, <64 x i8>* %src, align 64, !nontemporal !1
|
2016-06-03 21:42:49 +08:00
|
|
|
ret <64 x i8> %1
|
|
|
|
}
|
|
|
|
|
2016-06-03 19:00:55 +08:00
|
|
|
|
|
|
|
; Check cases where the load would be folded.
|
|
|
|
|
|
|
|
define <4 x float> @test_arg_v4f32(<4 x float> %arg, <4 x float>* %src) {
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-LABEL: test_arg_v4f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-NEXT: addps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_arg_v4f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: addps %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
|
|
|
; AVX-LABEL: test_arg_v4f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm1
|
|
|
|
; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-LABEL: test_arg_v4f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm1
|
|
|
|
; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <4 x float>, <4 x float>* %src, align 16, !nontemporal !1
|
|
|
|
%2 = fadd <4 x float> %arg, %1
|
|
|
|
ret <4 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i32> @test_arg_v4i32(<4 x i32> %arg, <4 x i32>* %src) {
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-LABEL: test_arg_v4i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-NEXT: paddd (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_arg_v4i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: paddd %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
|
|
|
; AVX-LABEL: test_arg_v4i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm1
|
|
|
|
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-LABEL: test_arg_v4i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm1
|
|
|
|
; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <4 x i32>, <4 x i32>* %src, align 16, !nontemporal !1
|
|
|
|
%2 = add <4 x i32> %arg, %1
|
|
|
|
ret <4 x i32> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_arg_v2f64(<2 x double> %arg, <2 x double>* %src) {
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-LABEL: test_arg_v2f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-NEXT: addpd (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_arg_v2f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: addpd %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
|
|
|
; AVX-LABEL: test_arg_v2f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm1
|
|
|
|
; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-LABEL: test_arg_v2f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm1
|
|
|
|
; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <2 x double>, <2 x double>* %src, align 16, !nontemporal !1
|
|
|
|
%2 = fadd <2 x double> %arg, %1
|
|
|
|
ret <2 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i64> @test_arg_v2i64(<2 x i64> %arg, <2 x i64>* %src) {
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-LABEL: test_arg_v2i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-NEXT: paddq (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_arg_v2i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: paddq %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
|
|
|
; AVX-LABEL: test_arg_v2i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm1
|
|
|
|
; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-LABEL: test_arg_v2i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm1
|
|
|
|
; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <2 x i64>, <2 x i64>* %src, align 16, !nontemporal !1
|
|
|
|
%2 = add <2 x i64> %arg, %1
|
|
|
|
ret <2 x i64> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i16> @test_arg_v8i16(<8 x i16> %arg, <8 x i16>* %src) {
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-LABEL: test_arg_v8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-NEXT: paddw (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_arg_v8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: paddw %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
|
|
|
; AVX-LABEL: test_arg_v8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm1
|
|
|
|
; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-LABEL: test_arg_v8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm1
|
|
|
|
; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <8 x i16>, <8 x i16>* %src, align 16, !nontemporal !1
|
|
|
|
%2 = add <8 x i16> %arg, %1
|
|
|
|
ret <8 x i16> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i8> @test_arg_v16i8(<16 x i8> %arg, <16 x i8>* %src) {
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-LABEL: test_arg_v16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-NEXT: paddb (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_arg_v16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: paddb %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
|
|
|
; AVX-LABEL: test_arg_v16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm1
|
|
|
|
; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-LABEL: test_arg_v16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm1
|
|
|
|
; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <16 x i8>, <16 x i8>* %src, align 16, !nontemporal !1
|
|
|
|
%2 = add <16 x i8> %arg, %1
|
|
|
|
ret <16 x i8> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
; And now YMM versions.
|
|
|
|
|
|
|
|
define <8 x float> @test_arg_v8f32(<8 x float> %arg, <8 x float>* %src) {
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-LABEL: test_arg_v8f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-NEXT: addps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: addps 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_arg_v8f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm2
|
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm3
|
|
|
|
; SSE41-NEXT: addps %xmm3, %xmm0
|
|
|
|
; SSE41-NEXT: addps %xmm2, %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX1-LABEL: test_arg_v8f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2017-06-06 00:02:01 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
|
|
|
; AVX1-NEXT: vaddps %ymm1, %ymm0, %ymm0
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_arg_v8f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm1
|
|
|
|
; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-LABEL: test_arg_v8f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm1
|
|
|
|
; AVX512-NEXT: vaddps %ymm1, %ymm0, %ymm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <8 x float>, <8 x float>* %src, align 32, !nontemporal !1
|
|
|
|
%2 = fadd <8 x float> %arg, %1
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i32> @test_arg_v8i32(<8 x i32> %arg, <8 x i32>* %src) {
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-LABEL: test_arg_v8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-NEXT: paddd (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: paddd 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_arg_v8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm2
|
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm3
|
|
|
|
; SSE41-NEXT: paddd %xmm3, %xmm0
|
|
|
|
; SSE41-NEXT: paddd %xmm2, %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_arg_v8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
[x86] allow vector load narrowing with multi-use values
This is a long-awaited follow-up suggested in D33578. Since then, we've picked up even more
opportunities for vector narrowing from changes like D53784, so there are a lot of test diffs.
Apart from 2-3 strange cases, these are all wins.
I've structured this to be no-functional-change-intended for any target except for x86
because I couldn't tell if AArch64, ARM, and AMDGPU would improve or not. All of those
targets have existing regression tests (4, 4, 10 files respectively) that would be
affected. Also, Hexagon overrides the shouldReduceLoadWidth() hook, but doesn't show
any regression test diffs. The trade-off is deciding if an extra vector load is better
than a single wide load + extract_subvector.
For x86, this is almost always better (on paper at least) because we often can fold
loads into subsequent ops and not increase the official instruction count. There's also
some unknown -- but potentially large -- benefit from using narrower vector ops if wide
ops are implemented with multiple uops and/or frequency throttling is avoided.
Differential Revision: https://reviews.llvm.org/D54073
llvm-svn: 346595
2018-11-11 04:05:31 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
|
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_arg_v8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm1
|
|
|
|
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
2017-07-21 08:40:42 +08:00
|
|
|
; AVX512-LABEL: test_arg_v8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-07-21 08:40:42 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm1
|
|
|
|
; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <8 x i32>, <8 x i32>* %src, align 32, !nontemporal !1
|
|
|
|
%2 = add <8 x i32> %arg, %1
|
|
|
|
ret <8 x i32> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_arg_v4f64(<4 x double> %arg, <4 x double>* %src) {
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-LABEL: test_arg_v4f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-NEXT: addpd (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: addpd 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_arg_v4f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm2
|
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm3
|
|
|
|
; SSE41-NEXT: addpd %xmm3, %xmm0
|
|
|
|
; SSE41-NEXT: addpd %xmm2, %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX1-LABEL: test_arg_v4f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2017-06-06 00:02:01 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
|
|
|
; AVX1-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_arg_v4f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm1
|
|
|
|
; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-LABEL: test_arg_v4f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm1
|
|
|
|
; AVX512-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <4 x double>, <4 x double>* %src, align 32, !nontemporal !1
|
|
|
|
%2 = fadd <4 x double> %arg, %1
|
|
|
|
ret <4 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @test_arg_v4i64(<4 x i64> %arg, <4 x i64>* %src) {
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-LABEL: test_arg_v4i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-NEXT: paddq (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: paddq 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_arg_v4i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm2
|
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm3
|
|
|
|
; SSE41-NEXT: paddq %xmm3, %xmm0
|
|
|
|
; SSE41-NEXT: paddq %xmm2, %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_arg_v4i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
[x86] allow vector load narrowing with multi-use values
This is a long-awaited follow-up suggested in D33578. Since then, we've picked up even more
opportunities for vector narrowing from changes like D53784, so there are a lot of test diffs.
Apart from 2-3 strange cases, these are all wins.
I've structured this to be no-functional-change-intended for any target except for x86
because I couldn't tell if AArch64, ARM, and AMDGPU would improve or not. All of those
targets have existing regression tests (4, 4, 10 files respectively) that would be
affected. Also, Hexagon overrides the shouldReduceLoadWidth() hook, but doesn't show
any regression test diffs. The trade-off is deciding if an extra vector load is better
than a single wide load + extract_subvector.
For x86, this is almost always better (on paper at least) because we often can fold
loads into subsequent ops and not increase the official instruction count. There's also
some unknown -- but potentially large -- benefit from using narrower vector ops if wide
ops are implemented with multiple uops and/or frequency throttling is avoided.
Differential Revision: https://reviews.llvm.org/D54073
llvm-svn: 346595
2018-11-11 04:05:31 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
|
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_arg_v4i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm1
|
|
|
|
; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-LABEL: test_arg_v4i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm1
|
|
|
|
; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <4 x i64>, <4 x i64>* %src, align 32, !nontemporal !1
|
|
|
|
%2 = add <4 x i64> %arg, %1
|
|
|
|
ret <4 x i64> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i16> @test_arg_v16i16(<16 x i16> %arg, <16 x i16>* %src) {
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-LABEL: test_arg_v16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-NEXT: paddw (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: paddw 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_arg_v16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm2
|
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm3
|
|
|
|
; SSE41-NEXT: paddw %xmm3, %xmm0
|
|
|
|
; SSE41-NEXT: paddw %xmm2, %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_arg_v16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
[x86] allow vector load narrowing with multi-use values
This is a long-awaited follow-up suggested in D33578. Since then, we've picked up even more
opportunities for vector narrowing from changes like D53784, so there are a lot of test diffs.
Apart from 2-3 strange cases, these are all wins.
I've structured this to be no-functional-change-intended for any target except for x86
because I couldn't tell if AArch64, ARM, and AMDGPU would improve or not. All of those
targets have existing regression tests (4, 4, 10 files respectively) that would be
affected. Also, Hexagon overrides the shouldReduceLoadWidth() hook, but doesn't show
any regression test diffs. The trade-off is deciding if an extra vector load is better
than a single wide load + extract_subvector.
For x86, this is almost always better (on paper at least) because we often can fold
loads into subsequent ops and not increase the official instruction count. There's also
some unknown -- but potentially large -- benefit from using narrower vector ops if wide
ops are implemented with multiple uops and/or frequency throttling is avoided.
Differential Revision: https://reviews.llvm.org/D54073
llvm-svn: 346595
2018-11-11 04:05:31 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX1-NEXT: vpaddw %xmm1, %xmm2, %xmm1
|
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_arg_v16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm1
|
|
|
|
; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-LABEL: test_arg_v16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm1
|
|
|
|
; AVX512-NEXT: vpaddw %ymm1, %ymm0, %ymm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <16 x i16>, <16 x i16>* %src, align 32, !nontemporal !1
|
|
|
|
%2 = add <16 x i16> %arg, %1
|
|
|
|
ret <16 x i16> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <32 x i8> @test_arg_v32i8(<32 x i8> %arg, <32 x i8>* %src) {
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-LABEL: test_arg_v32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-NEXT: paddb (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: paddb 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_arg_v32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm2
|
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm3
|
|
|
|
; SSE41-NEXT: paddb %xmm3, %xmm0
|
|
|
|
; SSE41-NEXT: paddb %xmm2, %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_arg_v32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
[x86] allow vector load narrowing with multi-use values
This is a long-awaited follow-up suggested in D33578. Since then, we've picked up even more
opportunities for vector narrowing from changes like D53784, so there are a lot of test diffs.
Apart from 2-3 strange cases, these are all wins.
I've structured this to be no-functional-change-intended for any target except for x86
because I couldn't tell if AArch64, ARM, and AMDGPU would improve or not. All of those
targets have existing regression tests (4, 4, 10 files respectively) that would be
affected. Also, Hexagon overrides the shouldReduceLoadWidth() hook, but doesn't show
any regression test diffs. The trade-off is deciding if an extra vector load is better
than a single wide load + extract_subvector.
For x86, this is almost always better (on paper at least) because we often can fold
loads into subsequent ops and not increase the official instruction count. There's also
some unknown -- but potentially large -- benefit from using narrower vector ops if wide
ops are implemented with multiple uops and/or frequency throttling is avoided.
Differential Revision: https://reviews.llvm.org/D54073
llvm-svn: 346595
2018-11-11 04:05:31 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1
|
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_arg_v32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm1
|
|
|
|
; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-LABEL: test_arg_v32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm1
|
|
|
|
; AVX512-NEXT: vpaddb %ymm1, %ymm0, %ymm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <32 x i8>, <32 x i8>* %src, align 32, !nontemporal !1
|
|
|
|
%2 = add <32 x i8> %arg, %1
|
|
|
|
ret <32 x i8> %2
|
|
|
|
}
|
|
|
|
|
2016-06-03 21:42:49 +08:00
|
|
|
; And now ZMM versions.
|
|
|
|
|
|
|
|
define <16 x float> @test_arg_v16f32(<16 x float> %arg, <16 x float>* %src) {
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-LABEL: test_arg_v16f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-NEXT: addps (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: addps 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: addps 32(%rdi), %xmm2
|
|
|
|
; SSE2-NEXT: addps 48(%rdi), %xmm3
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_arg_v16f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
|
|
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm5
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
|
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm7
|
|
|
|
; SSE41-NEXT: addps %xmm7, %xmm0
|
|
|
|
; SSE41-NEXT: addps %xmm6, %xmm1
|
|
|
|
; SSE41-NEXT: addps %xmm5, %xmm2
|
|
|
|
; SSE41-NEXT: addps %xmm4, %xmm3
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 21:42:49 +08:00
|
|
|
;
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX1-LABEL: test_arg_v16f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2017-06-06 00:02:01 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm3
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
|
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm3
|
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm4
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
|
|
; AVX1-NEXT: vaddps %ymm3, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vaddps %ymm2, %ymm1, %ymm1
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_arg_v16f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2
|
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
|
|
|
|
; AVX2-NEXT: vaddps %ymm3, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vaddps %ymm2, %ymm1, %ymm1
|
|
|
|
; AVX2-NEXT: retq
|
2016-06-03 21:42:49 +08:00
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_arg_v16f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm1
|
|
|
|
; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 22:12:43 +08:00
|
|
|
%1 = load <16 x float>, <16 x float>* %src, align 64, !nontemporal !1
|
2016-06-03 21:42:49 +08:00
|
|
|
%2 = fadd <16 x float> %arg, %1
|
|
|
|
ret <16 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i32> @test_arg_v16i32(<16 x i32> %arg, <16 x i32>* %src) {
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-LABEL: test_arg_v16i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-NEXT: paddd (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: paddd 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: paddd 32(%rdi), %xmm2
|
|
|
|
; SSE2-NEXT: paddd 48(%rdi), %xmm3
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_arg_v16i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
|
|
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm5
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
|
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm7
|
|
|
|
; SSE41-NEXT: paddd %xmm7, %xmm0
|
|
|
|
; SSE41-NEXT: paddd %xmm6, %xmm1
|
|
|
|
; SSE41-NEXT: paddd %xmm5, %xmm2
|
|
|
|
; SSE41-NEXT: paddd %xmm4, %xmm3
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 21:42:49 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_arg_v16i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
[x86] allow vector load narrowing with multi-use values
This is a long-awaited follow-up suggested in D33578. Since then, we've picked up even more
opportunities for vector narrowing from changes like D53784, so there are a lot of test diffs.
Apart from 2-3 strange cases, these are all wins.
I've structured this to be no-functional-change-intended for any target except for x86
because I couldn't tell if AArch64, ARM, and AMDGPU would improve or not. All of those
targets have existing regression tests (4, 4, 10 files respectively) that would be
affected. Also, Hexagon overrides the shouldReduceLoadWidth() hook, but doesn't show
any regression test diffs. The trade-off is deciding if an extra vector load is better
than a single wide load + extract_subvector.
For x86, this is almost always better (on paper at least) because we often can fold
loads into subsequent ops and not increase the official instruction count. There's also
some unknown -- but potentially large -- benefit from using narrower vector ops if wide
ops are implemented with multiple uops and/or frequency throttling is avoided.
Differential Revision: https://reviews.llvm.org/D54073
llvm-svn: 346595
2018-11-11 04:05:31 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
|
|
; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm2
|
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm3
|
|
|
|
; AVX1-NEXT: vpaddd %xmm3, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
|
|
; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm2
|
|
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm3
|
|
|
|
; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_arg_v16i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2
|
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
|
|
|
|
; AVX2-NEXT: vpaddd %ymm3, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpaddd %ymm2, %ymm1, %ymm1
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_arg_v16i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm1
|
|
|
|
; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 22:12:43 +08:00
|
|
|
%1 = load <16 x i32>, <16 x i32>* %src, align 64, !nontemporal !1
|
2016-06-03 21:42:49 +08:00
|
|
|
%2 = add <16 x i32> %arg, %1
|
|
|
|
ret <16 x i32> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x double> @test_arg_v8f64(<8 x double> %arg, <8 x double>* %src) {
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-LABEL: test_arg_v8f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-NEXT: addpd (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: addpd 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: addpd 32(%rdi), %xmm2
|
|
|
|
; SSE2-NEXT: addpd 48(%rdi), %xmm3
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_arg_v8f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
|
|
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm5
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
|
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm7
|
|
|
|
; SSE41-NEXT: addpd %xmm7, %xmm0
|
|
|
|
; SSE41-NEXT: addpd %xmm6, %xmm1
|
|
|
|
; SSE41-NEXT: addpd %xmm5, %xmm2
|
|
|
|
; SSE41-NEXT: addpd %xmm4, %xmm3
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 21:42:49 +08:00
|
|
|
;
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX1-LABEL: test_arg_v8f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2017-06-06 00:02:01 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm3
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
|
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm3
|
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm4
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
|
|
; AVX1-NEXT: vaddpd %ymm3, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vaddpd %ymm2, %ymm1, %ymm1
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_arg_v8f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2
|
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
|
|
|
|
; AVX2-NEXT: vaddpd %ymm3, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vaddpd %ymm2, %ymm1, %ymm1
|
|
|
|
; AVX2-NEXT: retq
|
2016-06-03 21:42:49 +08:00
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_arg_v8f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm1
|
|
|
|
; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 22:12:43 +08:00
|
|
|
%1 = load <8 x double>, <8 x double>* %src, align 64, !nontemporal !1
|
2016-06-03 21:42:49 +08:00
|
|
|
%2 = fadd <8 x double> %arg, %1
|
|
|
|
ret <8 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i64> @test_arg_v8i64(<8 x i64> %arg, <8 x i64>* %src) {
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-LABEL: test_arg_v8i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-NEXT: paddq (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: paddq 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: paddq 32(%rdi), %xmm2
|
|
|
|
; SSE2-NEXT: paddq 48(%rdi), %xmm3
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_arg_v8i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
|
|
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm5
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
|
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm7
|
|
|
|
; SSE41-NEXT: paddq %xmm7, %xmm0
|
|
|
|
; SSE41-NEXT: paddq %xmm6, %xmm1
|
|
|
|
; SSE41-NEXT: paddq %xmm5, %xmm2
|
|
|
|
; SSE41-NEXT: paddq %xmm4, %xmm3
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 21:42:49 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_arg_v8i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
[x86] allow vector load narrowing with multi-use values
This is a long-awaited follow-up suggested in D33578. Since then, we've picked up even more
opportunities for vector narrowing from changes like D53784, so there are a lot of test diffs.
Apart from 2-3 strange cases, these are all wins.
I've structured this to be no-functional-change-intended for any target except for x86
because I couldn't tell if AArch64, ARM, and AMDGPU would improve or not. All of those
targets have existing regression tests (4, 4, 10 files respectively) that would be
affected. Also, Hexagon overrides the shouldReduceLoadWidth() hook, but doesn't show
any regression test diffs. The trade-off is deciding if an extra vector load is better
than a single wide load + extract_subvector.
For x86, this is almost always better (on paper at least) because we often can fold
loads into subsequent ops and not increase the official instruction count. There's also
some unknown -- but potentially large -- benefit from using narrower vector ops if wide
ops are implemented with multiple uops and/or frequency throttling is avoided.
Differential Revision: https://reviews.llvm.org/D54073
llvm-svn: 346595
2018-11-11 04:05:31 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm3
|
|
|
|
; AVX1-NEXT: vpaddq %xmm3, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm3
|
|
|
|
; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_arg_v8i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2
|
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
|
|
|
|
; AVX2-NEXT: vpaddq %ymm3, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_arg_v8i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm1
|
|
|
|
; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 22:12:43 +08:00
|
|
|
%1 = load <8 x i64>, <8 x i64>* %src, align 64, !nontemporal !1
|
2016-06-03 21:42:49 +08:00
|
|
|
%2 = add <8 x i64> %arg, %1
|
|
|
|
ret <8 x i64> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <32 x i16> @test_arg_v32i16(<32 x i16> %arg, <32 x i16>* %src) {
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-LABEL: test_arg_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-NEXT: paddw (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: paddw 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: paddw 32(%rdi), %xmm2
|
|
|
|
; SSE2-NEXT: paddw 48(%rdi), %xmm3
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_arg_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
|
|
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm5
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
|
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm7
|
|
|
|
; SSE41-NEXT: paddw %xmm7, %xmm0
|
|
|
|
; SSE41-NEXT: paddw %xmm6, %xmm1
|
|
|
|
; SSE41-NEXT: paddw %xmm5, %xmm2
|
|
|
|
; SSE41-NEXT: paddw %xmm4, %xmm3
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 21:42:49 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_arg_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
[x86] allow vector load narrowing with multi-use values
This is a long-awaited follow-up suggested in D33578. Since then, we've picked up even more
opportunities for vector narrowing from changes like D53784, so there are a lot of test diffs.
Apart from 2-3 strange cases, these are all wins.
I've structured this to be no-functional-change-intended for any target except for x86
because I couldn't tell if AArch64, ARM, and AMDGPU would improve or not. All of those
targets have existing regression tests (4, 4, 10 files respectively) that would be
affected. Also, Hexagon overrides the shouldReduceLoadWidth() hook, but doesn't show
any regression test diffs. The trade-off is deciding if an extra vector load is better
than a single wide load + extract_subvector.
For x86, this is almost always better (on paper at least) because we often can fold
loads into subsequent ops and not increase the official instruction count. There's also
some unknown -- but potentially large -- benefit from using narrower vector ops if wide
ops are implemented with multiple uops and/or frequency throttling is avoided.
Differential Revision: https://reviews.llvm.org/D54073
llvm-svn: 346595
2018-11-11 04:05:31 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
|
|
; AVX1-NEXT: vpaddw %xmm2, %xmm3, %xmm2
|
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm3
|
|
|
|
; AVX1-NEXT: vpaddw %xmm3, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
|
|
; AVX1-NEXT: vpaddw %xmm2, %xmm3, %xmm2
|
|
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm3
|
|
|
|
; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_arg_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2
|
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
|
|
|
|
; AVX2-NEXT: vpaddw %ymm3, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpaddw %ymm2, %ymm1, %ymm1
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512F-LABEL: test_arg_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512F-NEXT: vmovntdqa 32(%rdi), %ymm2
|
|
|
|
; AVX512F-NEXT: vmovntdqa (%rdi), %ymm3
|
|
|
|
; AVX512F-NEXT: vpaddw %ymm3, %ymm0, %ymm0
|
|
|
|
; AVX512F-NEXT: vpaddw %ymm2, %ymm1, %ymm1
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: test_arg_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BW: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512BW-NEXT: vmovntdqa (%rdi), %zmm1
|
|
|
|
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: test_arg_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512VL: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512VL-NEXT: vmovntdqa 32(%rdi), %ymm2
|
|
|
|
; AVX512VL-NEXT: vmovntdqa (%rdi), %ymm3
|
|
|
|
; AVX512VL-NEXT: vpaddw %ymm3, %ymm0, %ymm0
|
|
|
|
; AVX512VL-NEXT: vpaddw %ymm2, %ymm1, %ymm1
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512VL-NEXT: retq
|
2016-06-03 22:12:43 +08:00
|
|
|
%1 = load <32 x i16>, <32 x i16>* %src, align 64, !nontemporal !1
|
2016-06-03 21:42:49 +08:00
|
|
|
%2 = add <32 x i16> %arg, %1
|
|
|
|
ret <32 x i16> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <64 x i8> @test_arg_v64i8(<64 x i8> %arg, <64 x i8>* %src) {
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-LABEL: test_arg_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE2-NEXT: paddb (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: paddb 16(%rdi), %xmm1
|
|
|
|
; SSE2-NEXT: paddb 32(%rdi), %xmm2
|
|
|
|
; SSE2-NEXT: paddb 48(%rdi), %xmm3
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_arg_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2017-06-06 00:45:32 +08:00
|
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
|
|
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm5
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
|
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm7
|
|
|
|
; SSE41-NEXT: paddb %xmm7, %xmm0
|
|
|
|
; SSE41-NEXT: paddb %xmm6, %xmm1
|
|
|
|
; SSE41-NEXT: paddb %xmm5, %xmm2
|
|
|
|
; SSE41-NEXT: paddb %xmm4, %xmm3
|
|
|
|
; SSE41-NEXT: retq
|
2016-06-03 21:42:49 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_arg_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
[x86] allow vector load narrowing with multi-use values
This is a long-awaited follow-up suggested in D33578. Since then, we've picked up even more
opportunities for vector narrowing from changes like D53784, so there are a lot of test diffs.
Apart from 2-3 strange cases, these are all wins.
I've structured this to be no-functional-change-intended for any target except for x86
because I couldn't tell if AArch64, ARM, and AMDGPU would improve or not. All of those
targets have existing regression tests (4, 4, 10 files respectively) that would be
affected. Also, Hexagon overrides the shouldReduceLoadWidth() hook, but doesn't show
any regression test diffs. The trade-off is deciding if an extra vector load is better
than a single wide load + extract_subvector.
For x86, this is almost always better (on paper at least) because we often can fold
loads into subsequent ops and not increase the official instruction count. There's also
some unknown -- but potentially large -- benefit from using narrower vector ops if wide
ops are implemented with multiple uops and/or frequency throttling is avoided.
Differential Revision: https://reviews.llvm.org/D54073
llvm-svn: 346595
2018-11-11 04:05:31 +08:00
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
|
|
; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2
|
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm3
|
|
|
|
; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
|
|
; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2
|
|
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm3
|
|
|
|
; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_arg_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2
|
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
|
|
|
|
; AVX2-NEXT: vpaddb %ymm3, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpaddb %ymm2, %ymm1, %ymm1
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512F-LABEL: test_arg_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512F-NEXT: vmovntdqa 32(%rdi), %ymm2
|
|
|
|
; AVX512F-NEXT: vmovntdqa (%rdi), %ymm3
|
|
|
|
; AVX512F-NEXT: vpaddb %ymm3, %ymm0, %ymm0
|
|
|
|
; AVX512F-NEXT: vpaddb %ymm2, %ymm1, %ymm1
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: test_arg_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BW: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512BW-NEXT: vmovntdqa (%rdi), %zmm1
|
|
|
|
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: test_arg_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512VL: # %bb.0:
|
2017-06-05 23:43:03 +08:00
|
|
|
; AVX512VL-NEXT: vmovntdqa 32(%rdi), %ymm2
|
|
|
|
; AVX512VL-NEXT: vmovntdqa (%rdi), %ymm3
|
|
|
|
; AVX512VL-NEXT: vpaddb %ymm3, %ymm0, %ymm0
|
|
|
|
; AVX512VL-NEXT: vpaddb %ymm2, %ymm1, %ymm1
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512VL-NEXT: retq
|
2016-06-03 22:12:43 +08:00
|
|
|
%1 = load <64 x i8>, <64 x i8>* %src, align 64, !nontemporal !1
|
2016-06-03 21:42:49 +08:00
|
|
|
%2 = add <64 x i8> %arg, %1
|
|
|
|
ret <64 x i8> %2
|
|
|
|
}
|
|
|
|
|
2016-06-03 19:00:55 +08:00
|
|
|
|
|
|
|
; Unaligned non-temporal loads (not supported)
|
|
|
|
|
|
|
|
define <4 x float> @test_unaligned_v4f32(<4 x float>* %src) {
|
|
|
|
; SSE-LABEL: test_unaligned_v4f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_unaligned_v4f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: vmovups (%rdi), %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-LABEL: test_unaligned_v4f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: vmovups (%rdi), %xmm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <4 x float>, <4 x float>* %src, align 1, !nontemporal !1
|
|
|
|
ret <4 x float> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i32> @test_unaligned_v4i32(<4 x i32>* %src) {
|
|
|
|
; SSE-LABEL: test_unaligned_v4i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_unaligned_v4i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: vmovups (%rdi), %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512-LABEL: test_unaligned_v4i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512-NEXT: vmovups (%rdi), %xmm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <4 x i32>, <4 x i32>* %src, align 1, !nontemporal !1
|
|
|
|
ret <4 x i32> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_unaligned_v2f64(<2 x double>* %src) {
|
|
|
|
; SSE-LABEL: test_unaligned_v2f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_unaligned_v2f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: vmovups (%rdi), %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-08-01 04:20:01 +08:00
|
|
|
; AVX512-LABEL: test_unaligned_v2f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-08-01 04:20:01 +08:00
|
|
|
; AVX512-NEXT: vmovups (%rdi), %xmm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <2 x double>, <2 x double>* %src, align 1, !nontemporal !1
|
|
|
|
ret <2 x double> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i64> @test_unaligned_v2i64(<2 x i64>* %src) {
|
|
|
|
; SSE-LABEL: test_unaligned_v2i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_unaligned_v2i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: vmovups (%rdi), %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512-LABEL: test_unaligned_v2i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512-NEXT: vmovups (%rdi), %xmm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <2 x i64>, <2 x i64>* %src, align 1, !nontemporal !1
|
|
|
|
ret <2 x i64> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i16> @test_unaligned_v8i16(<8 x i16>* %src) {
|
|
|
|
; SSE-LABEL: test_unaligned_v8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_unaligned_v8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: vmovups (%rdi), %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512-LABEL: test_unaligned_v8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512-NEXT: vmovups (%rdi), %xmm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <8 x i16>, <8 x i16>* %src, align 1, !nontemporal !1
|
|
|
|
ret <8 x i16> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i8> @test_unaligned_v16i8(<16 x i8>* %src) {
|
|
|
|
; SSE-LABEL: test_unaligned_v16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_unaligned_v16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: vmovups (%rdi), %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512-LABEL: test_unaligned_v16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512-NEXT: vmovups (%rdi), %xmm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <16 x i8>, <16 x i8>* %src, align 1, !nontemporal !1
|
|
|
|
ret <16 x i8> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
; And now YMM versions.
|
|
|
|
|
|
|
|
define <8 x float> @test_unaligned_v8f32(<8 x float>* %src) {
|
|
|
|
; SSE-LABEL: test_unaligned_v8f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_unaligned_v8f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-LABEL: test_unaligned_v8f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <8 x float>, <8 x float>* %src, align 1, !nontemporal !1
|
|
|
|
ret <8 x float> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i32> @test_unaligned_v8i32(<8 x i32>* %src) {
|
|
|
|
; SSE-LABEL: test_unaligned_v8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_unaligned_v8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512-LABEL: test_unaligned_v8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <8 x i32>, <8 x i32>* %src, align 1, !nontemporal !1
|
|
|
|
ret <8 x i32> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_unaligned_v4f64(<4 x double>* %src) {
|
|
|
|
; SSE-LABEL: test_unaligned_v4f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_unaligned_v4f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-08-01 04:20:01 +08:00
|
|
|
; AVX512-LABEL: test_unaligned_v4f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-08-01 04:20:01 +08:00
|
|
|
; AVX512-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <4 x double>, <4 x double>* %src, align 1, !nontemporal !1
|
|
|
|
ret <4 x double> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @test_unaligned_v4i64(<4 x i64>* %src) {
|
|
|
|
; SSE-LABEL: test_unaligned_v4i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_unaligned_v4i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512-LABEL: test_unaligned_v4i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <4 x i64>, <4 x i64>* %src, align 1, !nontemporal !1
|
|
|
|
ret <4 x i64> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i16> @test_unaligned_v16i16(<16 x i16>* %src) {
|
|
|
|
; SSE-LABEL: test_unaligned_v16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_unaligned_v16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512-LABEL: test_unaligned_v16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <16 x i16>, <16 x i16>* %src, align 1, !nontemporal !1
|
|
|
|
ret <16 x i16> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <32 x i8> @test_unaligned_v32i8(<32 x i8>* %src) {
|
|
|
|
; SSE-LABEL: test_unaligned_v32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_unaligned_v32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-03 19:00:55 +08:00
|
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512-LABEL: test_unaligned_v32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX512-NEXT: retq
|
2016-06-03 19:00:55 +08:00
|
|
|
%1 = load <32 x i8>, <32 x i8>* %src, align 1, !nontemporal !1
|
|
|
|
ret <32 x i8> %1
|
|
|
|
}
|
|
|
|
|
2016-06-03 21:42:49 +08:00
|
|
|
; And now ZMM versions.
|
|
|
|
|
|
|
|
define <16 x float> @test_unaligned_v16f32(<16 x float>* %src) {
|
|
|
|
; SSE-LABEL: test_unaligned_v16f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-06-03 21:42:49 +08:00
|
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
|
|
; SSE-NEXT: movups 32(%rdi), %xmm2
|
|
|
|
; SSE-NEXT: movups 48(%rdi), %xmm3
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_unaligned_v16f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX-NEXT: vmovups 32(%rdi), %ymm1
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_unaligned_v16f32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: vmovups (%rdi), %zmm0
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = load <16 x float>, <16 x float>* %src, align 1, !nontemporal !1
|
|
|
|
ret <16 x float> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i32> @test_unaligned_v16i32(<16 x i32>* %src) {
|
|
|
|
; SSE-LABEL: test_unaligned_v16i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-06-03 21:42:49 +08:00
|
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
|
|
; SSE-NEXT: movups 32(%rdi), %xmm2
|
|
|
|
; SSE-NEXT: movups 48(%rdi), %xmm3
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_unaligned_v16i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX-NEXT: vmovups 32(%rdi), %ymm1
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_unaligned_v16i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512-NEXT: vmovups (%rdi), %zmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = load <16 x i32>, <16 x i32>* %src, align 1, !nontemporal !1
|
|
|
|
ret <16 x i32> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x double> @test_unaligned_v8f64(<8 x double>* %src) {
|
|
|
|
; SSE-LABEL: test_unaligned_v8f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-06-03 21:42:49 +08:00
|
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
|
|
; SSE-NEXT: movups 32(%rdi), %xmm2
|
|
|
|
; SSE-NEXT: movups 48(%rdi), %xmm3
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_unaligned_v8f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX-NEXT: vmovups 32(%rdi), %ymm1
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_unaligned_v8f64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-07-22 13:00:52 +08:00
|
|
|
; AVX512-NEXT: vmovups (%rdi), %zmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = load <8 x double>, <8 x double>* %src, align 1, !nontemporal !1
|
|
|
|
ret <8 x double> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i64> @test_unaligned_v8i64(<8 x i64>* %src) {
|
|
|
|
; SSE-LABEL: test_unaligned_v8i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-06-03 21:42:49 +08:00
|
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
|
|
; SSE-NEXT: movups 32(%rdi), %xmm2
|
|
|
|
; SSE-NEXT: movups 48(%rdi), %xmm3
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_unaligned_v8i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX-NEXT: vmovups 32(%rdi), %ymm1
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_unaligned_v8i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512-NEXT: vmovups (%rdi), %zmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = load <8 x i64>, <8 x i64>* %src, align 1, !nontemporal !1
|
|
|
|
ret <8 x i64> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <32 x i16> @test_unaligned_v32i16(<32 x i16>* %src) {
|
|
|
|
; SSE-LABEL: test_unaligned_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-06-03 21:42:49 +08:00
|
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
|
|
; SSE-NEXT: movups 32(%rdi), %xmm2
|
|
|
|
; SSE-NEXT: movups 48(%rdi), %xmm3
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_unaligned_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX-NEXT: vmovups 32(%rdi), %ymm1
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512F-LABEL: test_unaligned_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0:
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512F-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX512F-NEXT: vmovups 32(%rdi), %ymm1
|
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: test_unaligned_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BW: # %bb.0:
|
2017-08-01 01:35:44 +08:00
|
|
|
; AVX512BW-NEXT: vmovups (%rdi), %zmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: test_unaligned_v32i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512VL: # %bb.0:
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512VL-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX512VL-NEXT: vmovups 32(%rdi), %ymm1
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
%1 = load <32 x i16>, <32 x i16>* %src, align 1, !nontemporal !1
|
|
|
|
ret <32 x i16> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <64 x i8> @test_unaligned_v64i8(<64 x i8>* %src) {
|
|
|
|
; SSE-LABEL: test_unaligned_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-06-03 21:42:49 +08:00
|
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
|
|
; SSE-NEXT: movups 32(%rdi), %xmm2
|
|
|
|
; SSE-NEXT: movups 48(%rdi), %xmm3
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_unaligned_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX-NEXT: vmovups 32(%rdi), %ymm1
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512F-LABEL: test_unaligned_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0:
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512F-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX512F-NEXT: vmovups 32(%rdi), %ymm1
|
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: test_unaligned_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BW: # %bb.0:
|
2017-08-01 01:35:44 +08:00
|
|
|
; AVX512BW-NEXT: vmovups (%rdi), %zmm0
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: test_unaligned_v64i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512VL: # %bb.0:
|
2016-08-01 15:55:33 +08:00
|
|
|
; AVX512VL-NEXT: vmovups (%rdi), %ymm0
|
|
|
|
; AVX512VL-NEXT: vmovups 32(%rdi), %ymm1
|
2016-06-03 21:42:49 +08:00
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
%1 = load <64 x i8>, <64 x i8>* %src, align 1, !nontemporal !1
|
|
|
|
ret <64 x i8> %1
|
|
|
|
}
|
|
|
|
|
2017-07-26 12:31:04 +08:00
|
|
|
define <16 x i32> @test_masked_v16i32(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
|
|
|
|
; SSE2-LABEL: test_masked_v16i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0:
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm8
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm8, %xmm7
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm8, %xmm6
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm8, %xmm5
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm8, %xmm4
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
2017-07-26 12:31:04 +08:00
|
|
|
; SSE2-NEXT: pandn (%rdi), %xmm4
|
|
|
|
; SSE2-NEXT: por %xmm4, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pand %xmm5, %xmm1
|
2017-07-26 12:31:04 +08:00
|
|
|
; SSE2-NEXT: pandn 16(%rdi), %xmm5
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: por %xmm5, %xmm1
|
|
|
|
; SSE2-NEXT: pand %xmm6, %xmm2
|
2017-07-26 12:31:04 +08:00
|
|
|
; SSE2-NEXT: pandn 32(%rdi), %xmm6
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: por %xmm6, %xmm2
|
|
|
|
; SSE2-NEXT: pand %xmm7, %xmm3
|
2017-07-26 12:31:04 +08:00
|
|
|
; SSE2-NEXT: pandn 48(%rdi), %xmm7
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: por %xmm7, %xmm3
|
2017-07-26 12:31:04 +08:00
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_masked_v16i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0:
|
2019-03-06 18:54:43 +08:00
|
|
|
; SSE41-NEXT: movdqa %xmm7, %xmm9
|
|
|
|
; SSE41-NEXT: movdqa %xmm6, %xmm10
|
|
|
|
; SSE41-NEXT: movdqa %xmm5, %xmm11
|
2017-07-26 12:31:04 +08:00
|
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm8
|
|
|
|
; SSE41-NEXT: pxor %xmm0, %xmm0
|
2019-03-06 18:54:43 +08:00
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm9
|
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm10
|
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm11
|
2017-07-26 12:31:04 +08:00
|
|
|
; SSE41-NEXT: pcmpeqd %xmm4, %xmm0
|
2019-03-06 18:54:43 +08:00
|
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
|
|
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm7
|
|
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
|
|
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm5
|
|
|
|
; SSE41-NEXT: blendvps %xmm0, %xmm8, %xmm5
|
|
|
|
; SSE41-NEXT: movdqa %xmm11, %xmm0
|
|
|
|
; SSE41-NEXT: blendvps %xmm0, %xmm1, %xmm6
|
|
|
|
; SSE41-NEXT: movdqa %xmm10, %xmm0
|
|
|
|
; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm7
|
|
|
|
; SSE41-NEXT: movdqa %xmm9, %xmm0
|
|
|
|
; SSE41-NEXT: blendvps %xmm0, %xmm3, %xmm4
|
|
|
|
; SSE41-NEXT: movaps %xmm5, %xmm0
|
|
|
|
; SSE41-NEXT: movaps %xmm6, %xmm1
|
|
|
|
; SSE41-NEXT: movaps %xmm7, %xmm2
|
|
|
|
; SSE41-NEXT: movaps %xmm4, %xmm3
|
2017-07-26 12:31:04 +08:00
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_masked_v16i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2017-07-26 12:31:04 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
|
|
|
|
; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
|
|
|
|
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm4, %xmm4
|
|
|
|
; AVX1-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6
|
|
|
|
; AVX1-NEXT: vpxor %xmm6, %xmm4, %xmm4
|
|
|
|
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm3, %xmm3
|
|
|
|
; AVX1-NEXT: vpxor %xmm6, %xmm3, %xmm3
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
|
|
|
|
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm4, %xmm4
|
|
|
|
; AVX1-NEXT: vpxor %xmm6, %xmm4, %xmm4
|
|
|
|
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm2, %xmm2
|
|
|
|
; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm2
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2
|
|
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm4
|
|
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm5
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm4
|
|
|
|
; AVX1-NEXT: vblendvps %ymm3, %ymm4, %ymm1, %ymm1
|
|
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm3
|
|
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm4
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
|
|
; AVX1-NEXT: vblendvps %ymm2, %ymm3, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_masked_v16i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2017-07-28 01:47:01 +08:00
|
|
|
; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
2017-07-26 12:31:04 +08:00
|
|
|
; AVX2-NEXT: vpcmpeqd %ymm4, %ymm3, %ymm3
|
|
|
|
; AVX2-NEXT: vpcmpeqd %ymm4, %ymm2, %ymm2
|
|
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm4
|
2019-03-06 18:54:43 +08:00
|
|
|
; AVX2-NEXT: vblendvps %ymm3, %ymm1, %ymm4, %ymm1
|
2017-07-26 12:31:04 +08:00
|
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
|
2019-03-06 18:54:43 +08:00
|
|
|
; AVX2-NEXT: vblendvps %ymm2, %ymm0, %ymm3, %ymm0
|
2017-07-26 12:31:04 +08:00
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_masked_v16i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2018-01-28 04:19:09 +08:00
|
|
|
; AVX512-NEXT: vptestmd %zmm1, %zmm1, %k1
|
2017-07-26 12:31:04 +08:00
|
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm1
|
|
|
|
; AVX512-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
|
|
|
|
%vaddr = bitcast i8* %addr to <16 x i32>*
|
|
|
|
%r = load <16 x i32>, <16 x i32>* %vaddr, align 64, !nontemporal !1
|
|
|
|
%res = select <16 x i1> %mask, <16 x i32> %r, <16 x i32> %old
|
|
|
|
ret <16 x i32>%res
|
|
|
|
}
|
|
|
|
|
2018-10-12 18:20:16 +08:00
|
|
|
; Reduced from https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=10895
|
|
|
|
define i32 @PR39256(float* %ptr) {
|
|
|
|
; SSE-LABEL: PR39256:
|
|
|
|
; SSE: # %bb.0: # %entry
|
|
|
|
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
|
|
; SSE-NEXT: ucomiss {{.*}}(%rip), %xmm0
|
|
|
|
; SSE-NEXT: setb (%rax)
|
|
|
|
; SSE-NEXT: movl $-2147483648, %eax # imm = 0x80000000
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: PR39256:
|
|
|
|
; AVX: # %bb.0: # %entry
|
|
|
|
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
|
|
; AVX-NEXT: vucomiss {{.*}}(%rip), %xmm0
|
|
|
|
; AVX-NEXT: setb (%rax)
|
|
|
|
; AVX-NEXT: movl $-2147483648, %eax # imm = 0x80000000
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: PR39256:
|
|
|
|
; AVX512: # %bb.0: # %entry
|
|
|
|
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
|
|
; AVX512-NEXT: vucomiss {{.*}}(%rip), %xmm0
|
|
|
|
; AVX512-NEXT: setb (%rax)
|
|
|
|
; AVX512-NEXT: movl $-2147483648, %eax # imm = 0x80000000
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
entry:
|
|
|
|
%l = load float, float* %ptr, !nontemporal !1
|
|
|
|
%C = fcmp ult float %l, 0x36A0000000000000
|
|
|
|
store i1 %C, i1* undef
|
|
|
|
ret i32 -2147483648
|
|
|
|
}
|
|
|
|
|
2016-06-03 19:00:55 +08:00
|
|
|
!1 = !{i32 1}
|