[DAGCombiner] Fold an AND of a masked load into a zext_masked_load

This patch folds an AND of a masked load and build vector into a zero
extended masked load.

Differential Revision: https://reviews.llvm.org/D86789
This commit is contained in:
Sam Tebbs 2020-09-01 15:36:47 +01:00
parent ca2227c1b3
commit 15e880a04f
2 changed files with 133 additions and 0 deletions

View File

@ -5283,6 +5283,31 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
return N1;
if (ISD::isBuildVectorAllOnes(N1.getNode()))
return N0;
// fold (and (masked_load) (build_vec (x, ...))) to zext_masked_load
auto *MLoad = dyn_cast<MaskedLoadSDNode>(N0);
auto *BVec = dyn_cast<BuildVectorSDNode>(N1);
if (MLoad && BVec && MLoad->getExtensionType() == ISD::EXTLOAD &&
N0.hasOneUse() && N1.hasOneUse()) {
EVT LoadVT = MLoad->getMemoryVT();
EVT ExtVT = VT;
if (TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT, LoadVT)) {
// For this AND to be a zero extension of the masked load the elements
// of the BuildVec must mask the bottom bits of the extended element
// type
if (ConstantSDNode *Splat = BVec->getConstantSplatNode()) {
TypeSize ElementSize =
LoadVT.getVectorElementType().getScalarSizeInBits();
if (Splat->getAPIntValue().isMask((uint64_t)ElementSize)) {
return DAG.getMaskedLoad(
ExtVT, SDLoc(N), MLoad->getChain(), MLoad->getBasePtr(),
MLoad->getOffset(), MLoad->getMask(), MLoad->getPassThru(),
LoadVT, MLoad->getMemOperand(), MLoad->getAddressingMode(),
ISD::ZEXTLOAD, MLoad->isExpandingLoad());
}
}
}
}
}
// fold (and c1, c2) -> c1&c2

View File

@ -0,0 +1,108 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp,+fp64 -verify-machineinstrs -o - %s | FileCheck %s
define arm_aapcs_vfpcc <4 x float> @foo_v4i16(<4 x i16>* nocapture readonly %pSrc, <4 x i16> %a) {
; CHECK-LABEL: foo_v4i16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovlb.s16 q0, q0
; CHECK-NEXT: vpt.s32 lt, q0, zr
; CHECK-NEXT: vldrht.u32 q0, [r0]
; CHECK-NEXT: vcvt.f32.u32 q0, q0
; CHECK-NEXT: bx lr
entry:
%active.lane.mask = icmp slt <4 x i16> %a, zeroinitializer
%wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %pSrc, i32 2, <4 x i1> %active.lane.mask, <4 x i16> undef)
%0 = uitofp <4 x i16> %wide.masked.load to <4 x float>
ret <4 x float> %0
}
define arm_aapcs_vfpcc <8 x half> @foo_v8i8(<8 x i8>* nocapture readonly %pSrc, i32 %blockSize, <8 x i8> %a) {
; CHECK-LABEL: foo_v8i8:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovlb.s8 q0, q0
; CHECK-NEXT: vpt.s16 lt, q0, zr
; CHECK-NEXT: vldrbt.u16 q0, [r0]
; CHECK-NEXT: vcvt.f16.u16 q0, q0
; CHECK-NEXT: bx lr
entry:
%active.lane.mask = icmp slt <8 x i8> %a, zeroinitializer
%wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %pSrc, i32 1, <8 x i1> %active.lane.mask, <8 x i8> undef)
%0 = uitofp <8 x i8> %wide.masked.load to <8 x half>
ret <8 x half> %0
}
define arm_aapcs_vfpcc <4 x float> @foo_v4i8(<4 x i8>* nocapture readonly %pSrc, i32 %blockSize, <4 x i8> %a) {
; CHECK-LABEL: foo_v4i8:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovlb.s8 q0, q0
; CHECK-NEXT: vmovlb.s16 q0, q0
; CHECK-NEXT: vpt.s32 lt, q0, zr
; CHECK-NEXT: vldrbt.u32 q0, [r0]
; CHECK-NEXT: vcvt.f32.u32 q0, q0
; CHECK-NEXT: bx lr
entry:
%active.lane.mask = icmp slt <4 x i8> %a, zeroinitializer
%wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %pSrc, i32 1, <4 x i1> %active.lane.mask, <4 x i8> undef)
%0 = uitofp <4 x i8> %wide.masked.load to <4 x float>
ret <4 x float> %0
}
define arm_aapcs_vfpcc <4 x double> @foo_v4i32(<4 x i32>* nocapture readonly %pSrc, i32 %blockSize, <4 x i32> %a) {
; CHECK-LABEL: foo_v4i32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r4, r5, r6, r7, lr}
; CHECK-NEXT: push {r4, r5, r6, r7, lr}
; CHECK-NEXT: .pad #4
; CHECK-NEXT: sub sp, #4
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: vpt.s32 lt, q0, zr
; CHECK-NEXT: vldrwt.u32 q4, [r0]
; CHECK-NEXT: vmov.f64 d0, d8
; CHECK-NEXT: vmov.i64 q5, #0xffffffff
; CHECK-NEXT: vmov.f32 s2, s17
; CHECK-NEXT: vand q6, q0, q5
; CHECK-NEXT: vmov r0, s24
; CHECK-NEXT: vmov r1, s25
; CHECK-NEXT: bl __aeabi_ul2d
; CHECK-NEXT: mov r4, r0
; CHECK-NEXT: mov r5, r1
; CHECK-NEXT: vmov r0, s26
; CHECK-NEXT: vmov r1, s27
; CHECK-NEXT: bl __aeabi_ul2d
; CHECK-NEXT: vmov.f64 d0, d9
; CHECK-NEXT: vmov.f32 s2, s19
; CHECK-NEXT: vand q0, q0, q5
; CHECK-NEXT: vmov d9, r0, r1
; CHECK-NEXT: vmov r2, s2
; CHECK-NEXT: vmov r3, s3
; CHECK-NEXT: vmov r6, s0
; CHECK-NEXT: vmov r7, s1
; CHECK-NEXT: vmov d8, r4, r5
; CHECK-NEXT: mov r0, r2
; CHECK-NEXT: mov r1, r3
; CHECK-NEXT: bl __aeabi_ul2d
; CHECK-NEXT: vmov d11, r0, r1
; CHECK-NEXT: mov r0, r6
; CHECK-NEXT: mov r1, r7
; CHECK-NEXT: bl __aeabi_ul2d
; CHECK-NEXT: vmov d10, r0, r1
; CHECK-NEXT: vmov q0, q4
; CHECK-NEXT: vmov q1, q5
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: add sp, #4
; CHECK-NEXT: pop {r4, r5, r6, r7, pc}
entry:
%active.lane.mask = icmp slt <4 x i32> %a, zeroinitializer
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %pSrc, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
%0 = uitofp <4 x i32> %wide.masked.load to <4 x double>
ret <4 x double> %0
}
declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>)
declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32 immarg, <4 x i1>, <4 x i8>)
declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)