From 0205be8f1213833bf545a49cb087959f89fb68d1 Mon Sep 17 00:00:00 2001 From: Roman Lebedev Date: Mon, 30 Sep 2019 19:15:51 +0000 Subject: [PATCH] [NFC][InstCombine] Redundant-left-shift-input-masking: add some more undef tests llvm-svn: 373248 --- ...dant-left-shift-input-masking-variant-a.ll | 27 +++++++++++++++++++ ...dant-left-shift-input-masking-variant-b.ll | 27 +++++++++++++++++++ ...dant-left-shift-input-masking-variant-c.ll | 19 +++++++++++++ ...dant-left-shift-input-masking-variant-d.ll | 23 ++++++++++++++++ ...dant-left-shift-input-masking-variant-e.ll | 19 +++++++++++++ 5 files changed, 115 insertions(+) diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-a.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-a.ll index 5445275ad1c7..bcaf6440efc7 100644 --- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-a.ll +++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-a.ll @@ -71,6 +71,33 @@ define <8 x i32> @t1_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { ret <8 x i32> %t5 } +define <8 x i32> @t1_vec_splat_undef(<8 x i32> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t1_vec_splat_undef( +; CHECK-NEXT: [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], +; CHECK-NEXT: [[T1:%.*]] = shl <8 x i32> , [[T0]] +; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[T1]], +; CHECK-NEXT: [[T4:%.*]] = sub <8 x i32> , [[NBITS]] +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T4]]) +; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[X:%.*]], [[T4]] +; CHECK-NEXT: [[T5:%.*]] = and <8 x i32> [[TMP1]], +; CHECK-NEXT: ret <8 x i32> [[T5]] +; + %t0 = add <8 x i32> %nbits, + %t1 = shl <8 x i32> , %t0 + %t2 = add <8 x i32> %t1, + %t3 = and <8 x i32> %t2, %x + %t4 = sub <8 x i32> , %nbits + call void @use8xi32(<8 x i32> %t0) + call void @use8xi32(<8 x i32> %t1) + call void @use8xi32(<8 x i32> %t2) + call void @use8xi32(<8 x i32> %t4) + %t5 = shl <8 x i32> %t3, %t4 + ret <8 x i32> %t5 +} + define <8 x i32> @t2_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t2_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll index 6165b5796614..faf069cfec9d 100644 --- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll +++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll @@ -71,6 +71,33 @@ define <8 x i32> @t1_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { ret <8 x i32> %t5 } +define <8 x i32> @t1_vec_splat_undef(<8 x i32> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t1_vec_splat_undef( +; CHECK-NEXT: [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], +; CHECK-NEXT: [[T1:%.*]] = shl <8 x i32> , [[T0]] +; CHECK-NEXT: [[T2:%.*]] = xor <8 x i32> [[T1]], +; CHECK-NEXT: [[T4:%.*]] = sub <8 x i32> , [[NBITS]] +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T4]]) +; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[X:%.*]], [[T4]] +; CHECK-NEXT: [[T5:%.*]] = and <8 x i32> [[TMP1]], +; CHECK-NEXT: ret <8 x i32> [[T5]] +; + %t0 = add <8 x i32> %nbits, + %t1 = shl <8 x i32> , %t0 + %t2 = xor <8 x i32> %t1, + %t3 = and <8 x i32> %t2, %x + %t4 = sub <8 x i32> , %nbits + call void @use8xi32(<8 x i32> %t0) + call void @use8xi32(<8 x i32> %t1) + call void @use8xi32(<8 x i32> %t2) + call void @use8xi32(<8 x i32> %t4) + %t5 = shl <8 x i32> %t3, %t4 + ret <8 x i32> %t5 +} + define <8 x i32> @t2_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t2_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll index 00154bba29ce..2c6bb31ebf77 100644 --- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll +++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll @@ -55,6 +55,25 @@ define <8 x i32> @t1_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { ret <8 x i32> %t3 } +define <8 x i32> @t1_vec_splat_undef(<8 x i32> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t1_vec_splat_undef( +; CHECK-NEXT: [[T0:%.*]] = lshr <8 x i32> , [[NBITS:%.*]] +; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) +; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[X:%.*]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = and <8 x i32> [[TMP1]], +; CHECK-NEXT: ret <8 x i32> [[T3]] +; + %t0 = lshr <8 x i32> , %nbits + %t1 = and <8 x i32> %t0, %x + %t2 = add <8 x i32> %nbits, + call void @use8xi32(<8 x i32> %t0) + call void @use8xi32(<8 x i32> %t2) + %t3 = shl <8 x i32> %t1, %t2 ; shift is smaller than mask + ret <8 x i32> %t3 +} + define <8 x i32> @t1_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t1_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = lshr <8 x i32> , [[NBITS:%.*]] diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll index 6e5cb0e91931..fcbb7eb50d5d 100644 --- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll +++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll @@ -63,6 +63,29 @@ define <8 x i32> @t2_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { ret <8 x i32> %t4 } +define <8 x i32> @t2_vec_splat_undef(<8 x i32> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t2_vec_splat_undef( +; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> , [[NBITS:%.*]] +; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> [[T0]], [[NBITS]] +; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T3]]) +; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[X:%.*]], [[T3]] +; CHECK-NEXT: [[T4:%.*]] = and <8 x i32> [[TMP1]], +; CHECK-NEXT: ret <8 x i32> [[T4]] +; + %t0 = shl <8 x i32> , %nbits + %t1 = lshr <8 x i32> %t0, %nbits + %t2 = and <8 x i32> %t1, %x + %t3 = add <8 x i32> %nbits, + call void @use8xi32(<8 x i32> %t0) + call void @use8xi32(<8 x i32> %t1) + call void @use8xi32(<8 x i32> %t3) + %t4 = shl <8 x i32> %t2, %t3 ; shift is smaller than mask + ret <8 x i32> %t4 +} + define <8 x i32> @t2_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t2_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> , [[NBITS:%.*]] diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-e.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-e.ll index bbe35605a711..2b3fb5098aac 100644 --- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-e.ll +++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-e.ll @@ -55,6 +55,25 @@ define <8 x i32> @t1_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { ret <8 x i32> %t3 } +define <8 x i32> @t1_vec_splat_undef(<8 x i32> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t1_vec_splat_undef( +; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> [[X:%.*]], [[NBITS:%.*]] +; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) +; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[X]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = and <8 x i32> [[TMP1]], +; CHECK-NEXT: ret <8 x i32> [[T3]] +; + %t0 = shl <8 x i32> %x, %nbits + %t1 = lshr <8 x i32> %t0, %nbits + %t2 = add <8 x i32> %nbits, + call void @use8xi32(<8 x i32> %t0) + call void @use8xi32(<8 x i32> %t2) + %t3 = shl <8 x i32> %t1, %t2 ; shift is smaller than mask + ret <8 x i32> %t3 +} + define <8 x i32> @t1_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t1_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> [[X:%.*]], [[NBITS:%.*]]