[PowerPC] Exploitate the Vector Integer Average Instructions

PowerPC has instruction to do the semantics of this piece of code:

vector int foo(vector int m, vector int n) {
  return (m + n + 1) >> 1;
}
This patch is adding the match rule to select it.

Differential Revision: https://reviews.llvm.org/D71002
This commit is contained in:
QingShan Zhang 2019-12-11 07:25:57 +00:00
parent d4345636e6
commit f99297176c
2 changed files with 142 additions and 72 deletions

View File

@ -261,6 +261,11 @@ def vecspltisw : PatLeaf<(build_vector), [{
return PPC::get_VSPLTI_elt(N, 4, *CurDAG).getNode() != nullptr;
}], VSPLTISW_get_imm>;
def immEQOneV : PatLeaf<(build_vector), [{
if (ConstantSDNode *C = cast<BuildVectorSDNode>(N)->getConstantSplatNode())
return C->isOne();
return false;
}]>;
//===----------------------------------------------------------------------===//
// Helpers for defining instructions that directly correspond to intrinsics.
@ -1092,6 +1097,20 @@ def : Pat<(v4f32 (vselect v4i32:$vA, v4f32:$vB, v4f32:$vC)),
def : Pat<(v2f64 (vselect v2i64:$vA, v2f64:$vB, v2f64:$vC)),
(VSEL $vC, $vB, $vA)>;
// Vector Integer Average Instructions
def : Pat<(v4i32 (sra (sub v4i32:$vA, (vnot_ppc v4i32:$vB)),
(v4i32 (immEQOneV)))), (v4i32 (VAVGSW $vA, $vB))>;
def : Pat<(v8i16 (sra (sub v8i16:$vA, (v8i16 (bitconvert(vnot_ppc v4i32:$vB)))),
(v8i16 (immEQOneV)))), (v8i16 (VAVGSH $vA, $vB))>;
def : Pat<(v16i8 (sra (sub v16i8:$vA, (v16i8 (bitconvert(vnot_ppc v4i32:$vB)))),
(v16i8 (immEQOneV)))), (v16i8 (VAVGSB $vA, $vB))>;
def : Pat<(v4i32 (srl (sub v4i32:$vA, (vnot_ppc v4i32:$vB)),
(v4i32 (immEQOneV)))), (v4i32 (VAVGUW $vA, $vB))>;
def : Pat<(v8i16 (srl (sub v8i16:$vA, (v8i16 (bitconvert(vnot_ppc v4i32:$vB)))),
(v8i16 (immEQOneV)))), (v8i16 (VAVGUH $vA, $vB))>;
def : Pat<(v16i8 (srl (sub v16i8:$vA, (v16i8 (bitconvert(vnot_ppc v4i32:$vB)))),
(v16i8 (immEQOneV)))), (v16i8 (VAVGUB $vA, $vB))>;
} // end HasAltivec
def HasP8Altivec : Predicate<"PPCSubTarget->hasP8Altivec()">;

View File

@ -5,26 +5,17 @@
define <8 x i16> @test_v8i16(<8 x i16> %m, <8 x i16> %n) {
; CHECK-P9-LABEL: test_v8i16:
; CHECK-P9: # %bb.0: # %entry
; CHECK-P9-NEXT: xxlnor 34, 34, 34
; CHECK-P9-NEXT: vspltish 4, 1
; CHECK-P9-NEXT: vsubuhm 2, 3, 2
; CHECK-P9-NEXT: vsrh 2, 2, 4
; CHECK-P9-NEXT: vavguh 2, 3, 2
; CHECK-P9-NEXT: blr
;
; CHECK-P8-LABEL: test_v8i16:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: xxlnor 34, 34, 34
; CHECK-P8-NEXT: vspltish 4, 1
; CHECK-P8-NEXT: vsubuhm 2, 3, 2
; CHECK-P8-NEXT: vsrh 2, 2, 4
; CHECK-P8-NEXT: vavguh 2, 3, 2
; CHECK-P8-NEXT: blr
;
; CHECK-P7-LABEL: test_v8i16:
; CHECK-P7: # %bb.0: # %entry
; CHECK-P7-NEXT: xxlnor 34, 34, 34
; CHECK-P7-NEXT: vspltish 4, 1
; CHECK-P7-NEXT: vsubuhm 2, 3, 2
; CHECK-P7-NEXT: vsrh 2, 2, 4
; CHECK-P7-NEXT: vavguh 2, 3, 2
; CHECK-P7-NEXT: blr
entry:
%add = add <8 x i16> %m, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
@ -36,26 +27,17 @@ entry:
define <8 x i16> @test_v8i16_sign(<8 x i16> %m, <8 x i16> %n) {
; CHECK-P9-LABEL: test_v8i16_sign:
; CHECK-P9: # %bb.0: # %entry
; CHECK-P9-NEXT: xxlnor 34, 34, 34
; CHECK-P9-NEXT: vspltish 4, 1
; CHECK-P9-NEXT: vsubuhm 2, 3, 2
; CHECK-P9-NEXT: vsrah 2, 2, 4
; CHECK-P9-NEXT: vavgsh 2, 3, 2
; CHECK-P9-NEXT: blr
;
; CHECK-P8-LABEL: test_v8i16_sign:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: xxlnor 34, 34, 34
; CHECK-P8-NEXT: vspltish 4, 1
; CHECK-P8-NEXT: vsubuhm 2, 3, 2
; CHECK-P8-NEXT: vsrah 2, 2, 4
; CHECK-P8-NEXT: vavgsh 2, 3, 2
; CHECK-P8-NEXT: blr
;
; CHECK-P7-LABEL: test_v8i16_sign:
; CHECK-P7: # %bb.0: # %entry
; CHECK-P7-NEXT: xxlnor 34, 34, 34
; CHECK-P7-NEXT: vspltish 4, 1
; CHECK-P7-NEXT: vsubuhm 2, 3, 2
; CHECK-P7-NEXT: vsrah 2, 2, 4
; CHECK-P7-NEXT: vavgsh 2, 3, 2
; CHECK-P7-NEXT: blr
entry:
%add = add <8 x i16> %m, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
@ -67,26 +49,17 @@ entry:
define <4 x i32> @test_v4i32(<4 x i32> %m, <4 x i32> %n) {
; CHECK-P9-LABEL: test_v4i32:
; CHECK-P9: # %bb.0: # %entry
; CHECK-P9-NEXT: xxlnor 34, 34, 34
; CHECK-P9-NEXT: vspltisw 4, 1
; CHECK-P9-NEXT: vsubuwm 2, 3, 2
; CHECK-P9-NEXT: vsrw 2, 2, 4
; CHECK-P9-NEXT: vavguw 2, 3, 2
; CHECK-P9-NEXT: blr
;
; CHECK-P8-LABEL: test_v4i32:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: xxlnor 34, 34, 34
; CHECK-P8-NEXT: vspltisw 4, 1
; CHECK-P8-NEXT: vsubuwm 2, 3, 2
; CHECK-P8-NEXT: vsrw 2, 2, 4
; CHECK-P8-NEXT: vavguw 2, 3, 2
; CHECK-P8-NEXT: blr
;
; CHECK-P7-LABEL: test_v4i32:
; CHECK-P7: # %bb.0: # %entry
; CHECK-P7-NEXT: xxlnor 34, 34, 34
; CHECK-P7-NEXT: vspltisw 4, 1
; CHECK-P7-NEXT: vsubuwm 2, 3, 2
; CHECK-P7-NEXT: vsrw 2, 2, 4
; CHECK-P7-NEXT: vavguw 2, 3, 2
; CHECK-P7-NEXT: blr
entry:
%add = add <4 x i32> %m, <i32 1, i32 1, i32 1, i32 1>
@ -98,26 +71,17 @@ entry:
define <4 x i32> @test_v4i32_sign(<4 x i32> %m, <4 x i32> %n) {
; CHECK-P9-LABEL: test_v4i32_sign:
; CHECK-P9: # %bb.0: # %entry
; CHECK-P9-NEXT: xxlnor 34, 34, 34
; CHECK-P9-NEXT: vspltisw 4, 1
; CHECK-P9-NEXT: vsubuwm 2, 3, 2
; CHECK-P9-NEXT: vsraw 2, 2, 4
; CHECK-P9-NEXT: vavgsw 2, 3, 2
; CHECK-P9-NEXT: blr
;
; CHECK-P8-LABEL: test_v4i32_sign:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: xxlnor 34, 34, 34
; CHECK-P8-NEXT: vspltisw 4, 1
; CHECK-P8-NEXT: vsubuwm 2, 3, 2
; CHECK-P8-NEXT: vsraw 2, 2, 4
; CHECK-P8-NEXT: vavgsw 2, 3, 2
; CHECK-P8-NEXT: blr
;
; CHECK-P7-LABEL: test_v4i32_sign:
; CHECK-P7: # %bb.0: # %entry
; CHECK-P7-NEXT: xxlnor 34, 34, 34
; CHECK-P7-NEXT: vspltisw 4, 1
; CHECK-P7-NEXT: vsubuwm 2, 3, 2
; CHECK-P7-NEXT: vsraw 2, 2, 4
; CHECK-P7-NEXT: vavgsw 2, 3, 2
; CHECK-P7-NEXT: blr
entry:
%add = add <4 x i32> %m, <i32 1, i32 1, i32 1, i32 1>
@ -129,26 +93,17 @@ entry:
define <16 x i8> @test_v16i8(<16 x i8> %m, <16 x i8> %n) {
; CHECK-P9-LABEL: test_v16i8:
; CHECK-P9: # %bb.0: # %entry
; CHECK-P9-NEXT: xxlnor 34, 34, 34
; CHECK-P9-NEXT: xxspltib 36, 1
; CHECK-P9-NEXT: vsububm 2, 3, 2
; CHECK-P9-NEXT: vsrb 2, 2, 4
; CHECK-P9-NEXT: vavgub 2, 3, 2
; CHECK-P9-NEXT: blr
;
; CHECK-P8-LABEL: test_v16i8:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: xxlnor 34, 34, 34
; CHECK-P8-NEXT: vspltisb 4, 1
; CHECK-P8-NEXT: vsububm 2, 3, 2
; CHECK-P8-NEXT: vsrb 2, 2, 4
; CHECK-P8-NEXT: vavgub 2, 3, 2
; CHECK-P8-NEXT: blr
;
; CHECK-P7-LABEL: test_v16i8:
; CHECK-P7: # %bb.0: # %entry
; CHECK-P7-NEXT: xxlnor 34, 34, 34
; CHECK-P7-NEXT: vspltisb 4, 1
; CHECK-P7-NEXT: vsububm 2, 3, 2
; CHECK-P7-NEXT: vsrb 2, 2, 4
; CHECK-P7-NEXT: vavgub 2, 3, 2
; CHECK-P7-NEXT: blr
entry:
%add = add <16 x i8> %m, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@ -160,26 +115,17 @@ entry:
define <16 x i8> @test_v16i8_sign(<16 x i8> %m, <16 x i8> %n) {
; CHECK-P9-LABEL: test_v16i8_sign:
; CHECK-P9: # %bb.0: # %entry
; CHECK-P9-NEXT: xxlnor 34, 34, 34
; CHECK-P9-NEXT: xxspltib 36, 1
; CHECK-P9-NEXT: vsububm 2, 3, 2
; CHECK-P9-NEXT: vsrab 2, 2, 4
; CHECK-P9-NEXT: vavgsb 2, 3, 2
; CHECK-P9-NEXT: blr
;
; CHECK-P8-LABEL: test_v16i8_sign:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: xxlnor 34, 34, 34
; CHECK-P8-NEXT: vspltisb 4, 1
; CHECK-P8-NEXT: vsububm 2, 3, 2
; CHECK-P8-NEXT: vsrab 2, 2, 4
; CHECK-P8-NEXT: vavgsb 2, 3, 2
; CHECK-P8-NEXT: blr
;
; CHECK-P7-LABEL: test_v16i8_sign:
; CHECK-P7: # %bb.0: # %entry
; CHECK-P7-NEXT: xxlnor 34, 34, 34
; CHECK-P7-NEXT: vspltisb 4, 1
; CHECK-P7-NEXT: vsububm 2, 3, 2
; CHECK-P7-NEXT: vsrab 2, 2, 4
; CHECK-P7-NEXT: vavgsb 2, 3, 2
; CHECK-P7-NEXT: blr
entry:
%add = add <16 x i8> %m, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@ -187,3 +133,108 @@ entry:
%shr = ashr <16 x i8> %add1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
ret <16 x i8> %shr
}
define <8 x i16> @test_v8i16_sign_negative(<8 x i16> %m, <8 x i16> %n) {
; CHECK-P9-LABEL: test_v8i16_sign_negative:
; CHECK-P9: # %bb.0: # %entry
; CHECK-P9-NEXT: addis 3, 2, .LCPI6_0@toc@ha
; CHECK-P9-NEXT: addi 3, 3, .LCPI6_0@toc@l
; CHECK-P9-NEXT: vadduhm 2, 2, 3
; CHECK-P9-NEXT: lxvx 35, 0, 3
; CHECK-P9-NEXT: vadduhm 2, 2, 3
; CHECK-P9-NEXT: vspltish 3, 1
; CHECK-P9-NEXT: vsrah 2, 2, 3
; CHECK-P9-NEXT: blr
;
; CHECK-P8-LABEL: test_v8i16_sign_negative:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis 3, 2, .LCPI6_0@toc@ha
; CHECK-P8-NEXT: vadduhm 2, 2, 3
; CHECK-P8-NEXT: vspltish 4, 1
; CHECK-P8-NEXT: addi 3, 3, .LCPI6_0@toc@l
; CHECK-P8-NEXT: lvx 3, 0, 3
; CHECK-P8-NEXT: vadduhm 2, 2, 3
; CHECK-P8-NEXT: vsrah 2, 2, 4
; CHECK-P8-NEXT: blr
;
; CHECK-P7-LABEL: test_v8i16_sign_negative:
; CHECK-P7: # %bb.0: # %entry
; CHECK-P7-NEXT: addis 3, 2, .LCPI6_0@toc@ha
; CHECK-P7-NEXT: vadduhm 2, 2, 3
; CHECK-P7-NEXT: vspltish 4, 1
; CHECK-P7-NEXT: addi 3, 3, .LCPI6_0@toc@l
; CHECK-P7-NEXT: lvx 3, 0, 3
; CHECK-P7-NEXT: vadduhm 2, 2, 3
; CHECK-P7-NEXT: vsrah 2, 2, 4
; CHECK-P7-NEXT: blr
entry:
%add = add <8 x i16> %m, <i16 1, i16 1, i16 1, i16 -1, i16 1, i16 1, i16 1, i16 1>
%add1 = add <8 x i16> %add, %n
%shr = ashr <8 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
ret <8 x i16> %shr
}
define <4 x i32> @test_v4i32_negative(<4 x i32> %m, <4 x i32> %n) {
; CHECK-P9-LABEL: test_v4i32_negative:
; CHECK-P9: # %bb.0: # %entry
; CHECK-P9-NEXT: xxlnor 34, 34, 34
; CHECK-P9-NEXT: vsubuwm 2, 3, 2
; CHECK-P9-NEXT: vspltisw 3, 2
; CHECK-P9-NEXT: vsrw 2, 2, 3
; CHECK-P9-NEXT: blr
;
; CHECK-P8-LABEL: test_v4i32_negative:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: xxlnor 34, 34, 34
; CHECK-P8-NEXT: vspltisw 4, 2
; CHECK-P8-NEXT: vsubuwm 2, 3, 2
; CHECK-P8-NEXT: vsrw 2, 2, 4
; CHECK-P8-NEXT: blr
;
; CHECK-P7-LABEL: test_v4i32_negative:
; CHECK-P7: # %bb.0: # %entry
; CHECK-P7-NEXT: xxlnor 34, 34, 34
; CHECK-P7-NEXT: vspltisw 4, 2
; CHECK-P7-NEXT: vsubuwm 2, 3, 2
; CHECK-P7-NEXT: vsrw 2, 2, 4
; CHECK-P7-NEXT: blr
entry:
%add = add <4 x i32> %m, <i32 1, i32 1, i32 1, i32 1>
%add1 = add <4 x i32> %add, %n
%shr = lshr <4 x i32> %add1, <i32 2, i32 2, i32 2, i32 2>
ret <4 x i32> %shr
}
define <4 x i32> @test_v4i32_sign_negative(<4 x i32> %m, <4 x i32> %n) {
; CHECK-P9-LABEL: test_v4i32_sign_negative:
; CHECK-P9: # %bb.0: # %entry
; CHECK-P9-NEXT: vadduwm 2, 2, 3
; CHECK-P9-NEXT: xxleqv 35, 35, 35
; CHECK-P9-NEXT: vadduwm 2, 2, 3
; CHECK-P9-NEXT: vspltisw 3, 1
; CHECK-P9-NEXT: vsraw 2, 2, 3
; CHECK-P9-NEXT: blr
;
; CHECK-P8-LABEL: test_v4i32_sign_negative:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: xxleqv 36, 36, 36
; CHECK-P8-NEXT: vadduwm 2, 2, 3
; CHECK-P8-NEXT: vspltisw 3, 1
; CHECK-P8-NEXT: vadduwm 2, 2, 4
; CHECK-P8-NEXT: vsraw 2, 2, 3
; CHECK-P8-NEXT: blr
;
; CHECK-P7-LABEL: test_v4i32_sign_negative:
; CHECK-P7: # %bb.0: # %entry
; CHECK-P7-NEXT: vspltisb 4, -1
; CHECK-P7-NEXT: vadduwm 2, 2, 3
; CHECK-P7-NEXT: vspltisw 3, 1
; CHECK-P7-NEXT: vadduwm 2, 2, 4
; CHECK-P7-NEXT: vsraw 2, 2, 3
; CHECK-P7-NEXT: blr
entry:
%add = add <4 x i32> %m, <i32 -1, i32 -1, i32 -1, i32 -1>
%add1 = add <4 x i32> %add, %n
%shr = ashr <4 x i32> %add1, <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %shr
}