From f5a5785632d0e8fe317ba56e563241421b870341 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Fri, 21 Jun 2019 20:16:26 +0000 Subject: [PATCH] [X86] Add test cases for incorrect shrinking of volatile vector loads from 128-bits to 32 or 64 bits. NFC This is caused by isel patterns that look for vzmovl+load and treat it the same as vzload. llvm-svn: 364101 --- llvm/test/CodeGen/X86/vector-zmov.ll | 34 ++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/llvm/test/CodeGen/X86/vector-zmov.ll b/llvm/test/CodeGen/X86/vector-zmov.ll index 6f2f78263b28..5dfdf4fe442b 100644 --- a/llvm/test/CodeGen/X86/vector-zmov.ll +++ b/llvm/test/CodeGen/X86/vector-zmov.ll @@ -36,3 +36,37 @@ entry: %Y = shufflevector <2 x i64> %X, <2 x i64> zeroinitializer, <2 x i32> ret <2 x i64>%Y } + +; FIXME: We shouldn't shrink the load to movss here since it is volatile. +define <4 x i32> @load_zmov_4i32_to_0zzz_volatile(<4 x i32> *%ptr) { +; SSE-LABEL: load_zmov_4i32_to_0zzz_volatile: +; SSE: # %bb.0: # %entry +; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: retq +; +; AVX-LABEL: load_zmov_4i32_to_0zzz_volatile: +; AVX: # %bb.0: # %entry +; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-NEXT: retq +entry: + %X = load volatile <4 x i32>, <4 x i32>* %ptr + %Y = shufflevector <4 x i32> %X, <4 x i32> zeroinitializer, <4 x i32> + ret <4 x i32>%Y +} + +; FIXME: We shouldn't shrink the load to movsd here since it is volatile. +define <2 x i64> @load_zmov_2i64_to_0z_volatile(<2 x i64> *%ptr) { +; SSE-LABEL: load_zmov_2i64_to_0z_volatile: +; SSE: # %bb.0: # %entry +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: retq +; +; AVX-LABEL: load_zmov_2i64_to_0z_volatile: +; AVX: # %bb.0: # %entry +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: retq +entry: + %X = load volatile <2 x i64>, <2 x i64>* %ptr + %Y = shufflevector <2 x i64> %X, <2 x i64> zeroinitializer, <2 x i32> + ret <2 x i64>%Y +}