[ARM,MVE] Add an InstCombine rule permitting VPNOT.

Summary:
If a user writing C code using the ACLE MVE intrinsics generates a
predicate and then complements it, then the resulting IR will use the
`pred_v2i` IR intrinsic to turn some `<n x i1>` vector into a 16-bit
integer; complement that integer; and convert back. This will generate
machine code that moves the predicate out of the `P0` register,
complements it in an integer GPR, and moves it back in again.

This InstCombine rule replaces `i2v(~v2i(x))` with a direct complement
of the original predicate vector, which we can already instruction-
select as the VPNOT instruction which complements P0 in place.

Reviewers: ostannard, MarkMurrayARM, dmgreen

Reviewed By: dmgreen

Subscribers: kristof.beyls, hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D70484
This commit is contained in:
Simon Tatham 2019-12-02 16:18:34 +00:00
parent effcdc3a82
commit 01aefae4a1
3 changed files with 129 additions and 0 deletions

View File

@ -3329,6 +3329,19 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (match(Arg, m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(m_Value(ArgArg))) &&
II->getType() == ArgArg->getType())
return replaceInstUsesWith(*II, ArgArg);
Constant *XorMask;
if (match(Arg,
m_Xor(m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(m_Value(ArgArg)),
m_Constant(XorMask))) &&
II->getType() == ArgArg->getType()) {
if (auto *CI = dyn_cast<ConstantInt>(XorMask)) {
if (CI->getValue().trunc(16).isAllOnesValue()) {
auto TrueVector = Builder.CreateVectorSplat(
II->getType()->getVectorNumElements(), Builder.getTrue());
return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector);
}
}
}
KnownBits ScalarKnown(32);
if (SimplifyDemandedBits(II, 0, APInt::getLowBitsSet(32, 16),
ScalarKnown, 0))

View File

@ -1,5 +1,7 @@
; RUN: opt -instcombine %s | llc -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve --verify-machineinstrs -o - | FileCheck %s
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
define arm_aapcs_vfpcc <8 x i16> @test_vpt_block(<8 x i16> %v_inactive, <8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3) {
; CHECK-LABEL: test_vpt_block:
; CHECK: @ %bb.0: @ %entry
@ -16,7 +18,27 @@ entry:
ret <8 x i16> %5
}
define arm_aapcs_vfpcc <8 x i16> @test_vpnot(<8 x i16> %v, <8 x i16> %w, <8 x i16> %x, i32 %n) {
; CHECK-LABEL: test_vpnot:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vctp.16 r0
; CHECK-NEXT: vpnot
; CHECK-NEXT: vpst
; CHECK-NEXT: vaddt.i16 q0, q1, q2
; CHECK-NEXT: bx lr
entry:
%0 = call <8 x i1> @llvm.arm.vctp16(i32 %n)
%1 = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %0)
%2 = trunc i32 %1 to i16
%3 = xor i16 %2, -1
%4 = zext i16 %3 to i32
%5 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %4)
%6 = call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %w, <8 x i16> %x, <8 x i1> %5, <8 x i16> %v)
ret <8 x i16> %6
}
declare i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1>)
declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32)
declare <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>)
declare <8 x i1> @llvm.arm.vctp16(i32)

View File

@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -instcombine -S -o - %s | FileCheck %s
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
declare i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1>)
declare i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1>)
declare i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1>)
@ -234,3 +236,95 @@ entry:
%vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %wide2)
ret <4 x i1> %vout
}
; If a predicate vector is round-tripped to an integer and back, and
; complemented while it's in integer form, we should collapse that to
; a complement of the vector itself. (Rationale: this is likely to
; allow it to be code-generated as MVE VPNOT.)
define <4 x i1> @vpnot_4(<4 x i1> %vin) {
; CHECK-LABEL: @vpnot_4(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VOUT:%.*]] = xor <4 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true>
; CHECK-NEXT: ret <4 x i1> [[VOUT]]
;
entry:
%int = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin)
%flipped = xor i32 %int, 65535
%vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %flipped)
ret <4 x i1> %vout
}
define <8 x i1> @vpnot_8(<8 x i1> %vin) {
; CHECK-LABEL: @vpnot_8(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VOUT:%.*]] = xor <8 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
; CHECK-NEXT: ret <8 x i1> [[VOUT]]
;
entry:
%int = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vin)
%flipped = xor i32 %int, 65535
%vout = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %flipped)
ret <8 x i1> %vout
}
define <16 x i1> @vpnot_16(<16 x i1> %vin) {
; CHECK-LABEL: @vpnot_16(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VOUT:%.*]] = xor <16 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
; CHECK-NEXT: ret <16 x i1> [[VOUT]]
;
entry:
%int = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vin)
%flipped = xor i32 %int, 65535
%vout = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %flipped)
ret <16 x i1> %vout
}
; And this still works even if the i32 is narrowed to i16 and back on
; opposite sides of the xor.
define <4 x i1> @vpnot_narrow_4(<4 x i1> %vin) {
; CHECK-LABEL: @vpnot_narrow_4(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VOUT:%.*]] = xor <4 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true>
; CHECK-NEXT: ret <4 x i1> [[VOUT]]
;
entry:
%int = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin)
%narrow = trunc i32 %int to i16
%flipped_narrow = xor i16 %narrow, -1
%flipped = zext i16 %flipped_narrow to i32
%vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %flipped)
ret <4 x i1> %vout
}
define <8 x i1> @vpnot_narrow_8(<8 x i1> %vin) {
; CHECK-LABEL: @vpnot_narrow_8(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VOUT:%.*]] = xor <8 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
; CHECK-NEXT: ret <8 x i1> [[VOUT]]
;
entry:
%int = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vin)
%narrow = trunc i32 %int to i16
%flipped_narrow = xor i16 %narrow, -1
%flipped = zext i16 %flipped_narrow to i32
%vout = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %flipped)
ret <8 x i1> %vout
}
define <16 x i1> @vpnot_narrow_16(<16 x i1> %vin) {
; CHECK-LABEL: @vpnot_narrow_16(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VOUT:%.*]] = xor <16 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
; CHECK-NEXT: ret <16 x i1> [[VOUT]]
;
entry:
%int = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vin)
%narrow = trunc i32 %int to i16
%flipped_narrow = xor i16 %narrow, -1
%flipped = zext i16 %flipped_narrow to i32
%vout = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %flipped)
ret <16 x i1> %vout
}