From 41d0666391131ddee451085c72ba6513872e7f6c Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Tue, 8 Dec 2020 12:19:43 +0000 Subject: [PATCH] [SLP][X86] Extend PR46983 tests to include SSE2,SSE42,AVX512BW test coverage Noticed while reviewing D92824 --- .../Transforms/SLPVectorizer/X86/pr46983.ll | 122 ++++++++++++------ 1 file changed, 82 insertions(+), 40 deletions(-) diff --git a/llvm/test/Transforms/SLPVectorizer/X86/pr46983.ll b/llvm/test/Transforms/SLPVectorizer/X86/pr46983.ll index 7df32e665805..ca74d9d71631 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/pr46983.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/pr46983.ll @@ -1,6 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -slp-vectorizer -instcombine -S -mtriple=x86_64-unknown-linux-gnu -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX -; RUN: opt < %s -slp-vectorizer -instcombine -S -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX2 +; RUN: opt < %s -slp-vectorizer -instcombine -S -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE +; RUN: opt < %s -slp-vectorizer -instcombine -S -mtriple=x86_64-unknown-linux-gnu -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE +; RUN: opt < %s -slp-vectorizer -instcombine -S -mtriple=x86_64-unknown-linux-gnu -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX1 +; RUN: opt < %s -slp-vectorizer -instcombine -S -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX2 +; RUN: opt < %s -slp-vectorizer -instcombine -S -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX2 define void @store_i32(i32* nocapture %0, i32 %1, i32 %2) { ; CHECK-LABEL: @store_i32( @@ -124,44 +127,83 @@ define void @store_i8(i8* nocapture %0, i32 %1, i32 %2) { } define void @store_i64(i64* nocapture %0, i32 %1, i32 %2) { -; AVX-LABEL: @store_i64( -; AVX-NEXT: [[TMP4:%.*]] = zext i32 [[TMP1:%.*]] to i64 -; AVX-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP0:%.*]], align 8, [[TBAA5:!tbaa !.*]] -; AVX-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], [[TMP4]] -; AVX-NEXT: [[TMP7:%.*]] = lshr i64 [[TMP6]], 15 -; AVX-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32 -; AVX-NEXT: [[TMP9:%.*]] = icmp ult i32 [[TMP8]], 255 -; AVX-NEXT: [[TMP10:%.*]] = and i64 [[TMP7]], 4294967295 -; AVX-NEXT: [[TMP11:%.*]] = select i1 [[TMP9]], i64 [[TMP10]], i64 255 -; AVX-NEXT: store i64 [[TMP11]], i64* [[TMP0]], align 8, [[TBAA5]] -; AVX-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, i64* [[TMP0]], i64 1 -; AVX-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8, [[TBAA5]] -; AVX-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], [[TMP4]] -; AVX-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP14]], 15 -; AVX-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i32 -; AVX-NEXT: [[TMP17:%.*]] = icmp ult i32 [[TMP16]], 255 -; AVX-NEXT: [[TMP18:%.*]] = and i64 [[TMP15]], 4294967295 -; AVX-NEXT: [[TMP19:%.*]] = select i1 [[TMP17]], i64 [[TMP18]], i64 255 -; AVX-NEXT: store i64 [[TMP19]], i64* [[TMP12]], align 8, [[TBAA5]] -; AVX-NEXT: [[TMP20:%.*]] = getelementptr inbounds i64, i64* [[TMP0]], i64 2 -; AVX-NEXT: [[TMP21:%.*]] = load i64, i64* [[TMP20]], align 8, [[TBAA5]] -; AVX-NEXT: [[TMP22:%.*]] = mul i64 [[TMP21]], [[TMP4]] -; AVX-NEXT: [[TMP23:%.*]] = lshr i64 [[TMP22]], 15 -; AVX-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i32 -; AVX-NEXT: [[TMP25:%.*]] = icmp ult i32 [[TMP24]], 255 -; AVX-NEXT: [[TMP26:%.*]] = and i64 [[TMP23]], 4294967295 -; AVX-NEXT: [[TMP27:%.*]] = select i1 [[TMP25]], i64 [[TMP26]], i64 255 -; AVX-NEXT: store i64 [[TMP27]], i64* [[TMP20]], align 8, [[TBAA5]] -; AVX-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, i64* [[TMP0]], i64 3 -; AVX-NEXT: [[TMP29:%.*]] = load i64, i64* [[TMP28]], align 8, [[TBAA5]] -; AVX-NEXT: [[TMP30:%.*]] = mul i64 [[TMP29]], [[TMP4]] -; AVX-NEXT: [[TMP31:%.*]] = lshr i64 [[TMP30]], 15 -; AVX-NEXT: [[TMP32:%.*]] = trunc i64 [[TMP31]] to i32 -; AVX-NEXT: [[TMP33:%.*]] = icmp ult i32 [[TMP32]], 255 -; AVX-NEXT: [[TMP34:%.*]] = and i64 [[TMP31]], 4294967295 -; AVX-NEXT: [[TMP35:%.*]] = select i1 [[TMP33]], i64 [[TMP34]], i64 255 -; AVX-NEXT: store i64 [[TMP35]], i64* [[TMP28]], align 8, [[TBAA5]] -; AVX-NEXT: ret void +; SSE-LABEL: @store_i64( +; SSE-NEXT: [[TMP4:%.*]] = zext i32 [[TMP1:%.*]] to i64 +; SSE-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP0:%.*]], align 8, [[TBAA5:!tbaa !.*]] +; SSE-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], [[TMP4]] +; SSE-NEXT: [[TMP7:%.*]] = lshr i64 [[TMP6]], 15 +; SSE-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32 +; SSE-NEXT: [[TMP9:%.*]] = icmp ult i32 [[TMP8]], 255 +; SSE-NEXT: [[TMP10:%.*]] = and i64 [[TMP7]], 4294967295 +; SSE-NEXT: [[TMP11:%.*]] = select i1 [[TMP9]], i64 [[TMP10]], i64 255 +; SSE-NEXT: store i64 [[TMP11]], i64* [[TMP0]], align 8, [[TBAA5]] +; SSE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, i64* [[TMP0]], i64 1 +; SSE-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8, [[TBAA5]] +; SSE-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], [[TMP4]] +; SSE-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP14]], 15 +; SSE-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i32 +; SSE-NEXT: [[TMP17:%.*]] = icmp ult i32 [[TMP16]], 255 +; SSE-NEXT: [[TMP18:%.*]] = and i64 [[TMP15]], 4294967295 +; SSE-NEXT: [[TMP19:%.*]] = select i1 [[TMP17]], i64 [[TMP18]], i64 255 +; SSE-NEXT: store i64 [[TMP19]], i64* [[TMP12]], align 8, [[TBAA5]] +; SSE-NEXT: [[TMP20:%.*]] = getelementptr inbounds i64, i64* [[TMP0]], i64 2 +; SSE-NEXT: [[TMP21:%.*]] = load i64, i64* [[TMP20]], align 8, [[TBAA5]] +; SSE-NEXT: [[TMP22:%.*]] = mul i64 [[TMP21]], [[TMP4]] +; SSE-NEXT: [[TMP23:%.*]] = lshr i64 [[TMP22]], 15 +; SSE-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i32 +; SSE-NEXT: [[TMP25:%.*]] = icmp ult i32 [[TMP24]], 255 +; SSE-NEXT: [[TMP26:%.*]] = and i64 [[TMP23]], 4294967295 +; SSE-NEXT: [[TMP27:%.*]] = select i1 [[TMP25]], i64 [[TMP26]], i64 255 +; SSE-NEXT: store i64 [[TMP27]], i64* [[TMP20]], align 8, [[TBAA5]] +; SSE-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, i64* [[TMP0]], i64 3 +; SSE-NEXT: [[TMP29:%.*]] = load i64, i64* [[TMP28]], align 8, [[TBAA5]] +; SSE-NEXT: [[TMP30:%.*]] = mul i64 [[TMP29]], [[TMP4]] +; SSE-NEXT: [[TMP31:%.*]] = lshr i64 [[TMP30]], 15 +; SSE-NEXT: [[TMP32:%.*]] = trunc i64 [[TMP31]] to i32 +; SSE-NEXT: [[TMP33:%.*]] = icmp ult i32 [[TMP32]], 255 +; SSE-NEXT: [[TMP34:%.*]] = and i64 [[TMP31]], 4294967295 +; SSE-NEXT: [[TMP35:%.*]] = select i1 [[TMP33]], i64 [[TMP34]], i64 255 +; SSE-NEXT: store i64 [[TMP35]], i64* [[TMP28]], align 8, [[TBAA5]] +; SSE-NEXT: ret void +; +; AVX1-LABEL: @store_i64( +; AVX1-NEXT: [[TMP4:%.*]] = zext i32 [[TMP1:%.*]] to i64 +; AVX1-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP0:%.*]], align 8, [[TBAA5:!tbaa !.*]] +; AVX1-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], [[TMP4]] +; AVX1-NEXT: [[TMP7:%.*]] = lshr i64 [[TMP6]], 15 +; AVX1-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32 +; AVX1-NEXT: [[TMP9:%.*]] = icmp ult i32 [[TMP8]], 255 +; AVX1-NEXT: [[TMP10:%.*]] = and i64 [[TMP7]], 4294967295 +; AVX1-NEXT: [[TMP11:%.*]] = select i1 [[TMP9]], i64 [[TMP10]], i64 255 +; AVX1-NEXT: store i64 [[TMP11]], i64* [[TMP0]], align 8, [[TBAA5]] +; AVX1-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, i64* [[TMP0]], i64 1 +; AVX1-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8, [[TBAA5]] +; AVX1-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], [[TMP4]] +; AVX1-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP14]], 15 +; AVX1-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i32 +; AVX1-NEXT: [[TMP17:%.*]] = icmp ult i32 [[TMP16]], 255 +; AVX1-NEXT: [[TMP18:%.*]] = and i64 [[TMP15]], 4294967295 +; AVX1-NEXT: [[TMP19:%.*]] = select i1 [[TMP17]], i64 [[TMP18]], i64 255 +; AVX1-NEXT: store i64 [[TMP19]], i64* [[TMP12]], align 8, [[TBAA5]] +; AVX1-NEXT: [[TMP20:%.*]] = getelementptr inbounds i64, i64* [[TMP0]], i64 2 +; AVX1-NEXT: [[TMP21:%.*]] = load i64, i64* [[TMP20]], align 8, [[TBAA5]] +; AVX1-NEXT: [[TMP22:%.*]] = mul i64 [[TMP21]], [[TMP4]] +; AVX1-NEXT: [[TMP23:%.*]] = lshr i64 [[TMP22]], 15 +; AVX1-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i32 +; AVX1-NEXT: [[TMP25:%.*]] = icmp ult i32 [[TMP24]], 255 +; AVX1-NEXT: [[TMP26:%.*]] = and i64 [[TMP23]], 4294967295 +; AVX1-NEXT: [[TMP27:%.*]] = select i1 [[TMP25]], i64 [[TMP26]], i64 255 +; AVX1-NEXT: store i64 [[TMP27]], i64* [[TMP20]], align 8, [[TBAA5]] +; AVX1-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, i64* [[TMP0]], i64 3 +; AVX1-NEXT: [[TMP29:%.*]] = load i64, i64* [[TMP28]], align 8, [[TBAA5]] +; AVX1-NEXT: [[TMP30:%.*]] = mul i64 [[TMP29]], [[TMP4]] +; AVX1-NEXT: [[TMP31:%.*]] = lshr i64 [[TMP30]], 15 +; AVX1-NEXT: [[TMP32:%.*]] = trunc i64 [[TMP31]] to i32 +; AVX1-NEXT: [[TMP33:%.*]] = icmp ult i32 [[TMP32]], 255 +; AVX1-NEXT: [[TMP34:%.*]] = and i64 [[TMP31]], 4294967295 +; AVX1-NEXT: [[TMP35:%.*]] = select i1 [[TMP33]], i64 [[TMP34]], i64 255 +; AVX1-NEXT: store i64 [[TMP35]], i64* [[TMP28]], align 8, [[TBAA5]] +; AVX1-NEXT: ret void ; ; AVX2-LABEL: @store_i64( ; AVX2-NEXT: [[TMP4:%.*]] = zext i32 [[TMP1:%.*]] to i64