forked from OSchip/llvm-project
[LegalizeTypes][VP] Add splitting and widening support for VP_FNEG.
Reviewed By: frasercrmck Differential Revision: https://reviews.llvm.org/D120785
This commit is contained in:
parent
eb6a3c0c0c
commit
c392b9924e
|
@ -995,7 +995,7 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
|
|||
case ISD::FLOG10:
|
||||
case ISD::FLOG2:
|
||||
case ISD::FNEARBYINT:
|
||||
case ISD::FNEG:
|
||||
case ISD::FNEG: case ISD::VP_FNEG:
|
||||
case ISD::FREEZE:
|
||||
case ISD::ARITH_FENCE:
|
||||
case ISD::FP_EXTEND:
|
||||
|
@ -2069,15 +2069,33 @@ void DAGTypeLegalizer::SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo,
|
|||
else
|
||||
std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
|
||||
|
||||
if (N->getOpcode() == ISD::FP_ROUND) {
|
||||
Lo = DAG.getNode(N->getOpcode(), dl, LoVT, Lo, N->getOperand(1),
|
||||
N->getFlags());
|
||||
Hi = DAG.getNode(N->getOpcode(), dl, HiVT, Hi, N->getOperand(1),
|
||||
N->getFlags());
|
||||
} else {
|
||||
Lo = DAG.getNode(N->getOpcode(), dl, LoVT, Lo, N->getFlags());
|
||||
Hi = DAG.getNode(N->getOpcode(), dl, HiVT, Hi, N->getFlags());
|
||||
const SDNodeFlags Flags = N->getFlags();
|
||||
unsigned Opcode = N->getOpcode();
|
||||
if (N->getNumOperands() <= 2) {
|
||||
if (Opcode == ISD::FP_ROUND) {
|
||||
Lo = DAG.getNode(Opcode, dl, LoVT, Lo, N->getOperand(1), Flags);
|
||||
Hi = DAG.getNode(Opcode, dl, HiVT, Hi, N->getOperand(1), Flags);
|
||||
} else {
|
||||
Lo = DAG.getNode(Opcode, dl, LoVT, Lo, Flags);
|
||||
Hi = DAG.getNode(Opcode, dl, HiVT, Hi, Flags);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
|
||||
assert(N->isVPOpcode() && "Expected VP opcode");
|
||||
|
||||
SDValue MaskLo, MaskHi;
|
||||
std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(1));
|
||||
|
||||
SDValue EVLLo, EVLHi;
|
||||
std::tie(EVLLo, EVLHi) =
|
||||
DAG.SplitEVL(N->getOperand(2), N->getValueType(0), dl);
|
||||
|
||||
Lo = DAG.getNode(Opcode, dl, Lo.getValueType(),
|
||||
{Lo, MaskLo, EVLLo}, Flags);
|
||||
Hi = DAG.getNode(Opcode, dl, Hi.getValueType(),
|
||||
{Hi, MaskHi, EVLHi}, Flags);
|
||||
}
|
||||
|
||||
void DAGTypeLegalizer::SplitVecRes_ExtendOp(SDNode *N, SDValue &Lo,
|
||||
|
@ -3417,7 +3435,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
|
|||
case ISD::CTPOP:
|
||||
case ISD::CTTZ:
|
||||
case ISD::CTTZ_ZERO_UNDEF:
|
||||
case ISD::FNEG:
|
||||
case ISD::FNEG: case ISD::VP_FNEG:
|
||||
case ISD::FREEZE:
|
||||
case ISD::ARITH_FENCE:
|
||||
case ISD::FCANONICALIZE:
|
||||
|
@ -4028,7 +4046,16 @@ SDValue DAGTypeLegalizer::WidenVecRes_Unary(SDNode *N) {
|
|||
// Unary op widening.
|
||||
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
|
||||
SDValue InOp = GetWidenedVector(N->getOperand(0));
|
||||
return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, InOp);
|
||||
if (N->getNumOperands() == 1)
|
||||
return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, InOp);
|
||||
|
||||
assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
|
||||
assert(N->isVPOpcode() && "Expected VP opcode");
|
||||
|
||||
SDValue Mask =
|
||||
GetWidenedMask(N->getOperand(1), WidenVT.getVectorElementCount());
|
||||
return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT,
|
||||
{InOp, Mask, N->getOperand(2)});
|
||||
}
|
||||
|
||||
SDValue DAGTypeLegalizer::WidenVecRes_InregOp(SDNode *N) {
|
||||
|
|
|
@ -268,6 +268,30 @@ define <8 x double> @vfneg_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
|
|||
ret <8 x double> %v
|
||||
}
|
||||
|
||||
declare <15 x double> @llvm.vp.fneg.v15f64(<15 x double>, <15 x i1>, i32)
|
||||
|
||||
define <15 x double> @vfneg_vv_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vfneg_vv_v15f64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vfneg.v v8, v8, v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <15 x double> @llvm.vp.fneg.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
|
||||
ret <15 x double> %v
|
||||
}
|
||||
|
||||
define <15 x double> @vfneg_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vfneg_vv_v15f64_unmasked:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
|
||||
; CHECK-NEXT: ret
|
||||
%head = insertelement <15 x i1> poison, i1 true, i32 0
|
||||
%m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
|
||||
%v = call <15 x double> @llvm.vp.fneg.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
|
||||
ret <15 x double> %v
|
||||
}
|
||||
|
||||
declare <16 x double> @llvm.vp.fneg.v16f64(<16 x double>, <16 x i1>, i32)
|
||||
|
||||
define <16 x double> @vfneg_vv_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
|
||||
|
@ -291,3 +315,57 @@ define <16 x double> @vfneg_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
|
|||
%v = call <16 x double> @llvm.vp.fneg.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
|
||||
ret <16 x double> %v
|
||||
}
|
||||
|
||||
declare <32 x double> @llvm.vp.fneg.v32f64(<32 x double>, <32 x i1>, i32)
|
||||
|
||||
define <32 x double> @vfneg_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vfneg_vv_v32f64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v24, v0
|
||||
; CHECK-NEXT: li a1, 0
|
||||
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: addi a2, a0, -16
|
||||
; CHECK-NEXT: vslidedown.vi v0, v0, 2
|
||||
; CHECK-NEXT: bltu a0, a2, .LBB26_2
|
||||
; CHECK-NEXT: # %bb.1:
|
||||
; CHECK-NEXT: mv a1, a2
|
||||
; CHECK-NEXT: .LBB26_2:
|
||||
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
|
||||
; CHECK-NEXT: li a1, 16
|
||||
; CHECK-NEXT: vfneg.v v16, v16, v0.t
|
||||
; CHECK-NEXT: bltu a0, a1, .LBB26_4
|
||||
; CHECK-NEXT: # %bb.3:
|
||||
; CHECK-NEXT: li a0, 16
|
||||
; CHECK-NEXT: .LBB26_4:
|
||||
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vmv1r.v v0, v24
|
||||
; CHECK-NEXT: vfneg.v v8, v8, v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <32 x double> @llvm.vp.fneg.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
|
||||
ret <32 x double> %v
|
||||
}
|
||||
|
||||
define <32 x double> @vfneg_vv_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vfneg_vv_v32f64_unmasked:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a1, a0, -16
|
||||
; CHECK-NEXT: li a2, 0
|
||||
; CHECK-NEXT: bltu a0, a1, .LBB27_2
|
||||
; CHECK-NEXT: # %bb.1:
|
||||
; CHECK-NEXT: mv a2, a1
|
||||
; CHECK-NEXT: .LBB27_2:
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: li a1, 16
|
||||
; CHECK-NEXT: vfsgnjn.vv v16, v16, v16
|
||||
; CHECK-NEXT: bltu a0, a1, .LBB27_4
|
||||
; CHECK-NEXT: # %bb.3:
|
||||
; CHECK-NEXT: li a0, 16
|
||||
; CHECK-NEXT: .LBB27_4:
|
||||
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
|
||||
; CHECK-NEXT: ret
|
||||
%head = insertelement <32 x i1> poison, i1 true, i32 0
|
||||
%m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
|
||||
%v = call <32 x double> @llvm.vp.fneg.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
|
||||
ret <32 x double> %v
|
||||
}
|
||||
|
|
|
@ -340,6 +340,30 @@ define <vscale x 4 x double> @vfneg_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
|
|||
ret <vscale x 4 x double> %v
|
||||
}
|
||||
|
||||
declare <vscale x 7 x double> @llvm.vp.fneg.nxv7f64(<vscale x 7 x double>, <vscale x 7 x i1>, i32)
|
||||
|
||||
define <vscale x 7 x double> @vfneg_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vfneg_vv_nxv7f64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vfneg.v v8, v8, v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 7 x double> @llvm.vp.fneg.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
|
||||
ret <vscale x 7 x double> %v
|
||||
}
|
||||
|
||||
define <vscale x 7 x double> @vfneg_vv_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vfneg_vv_nxv7f64_unmasked:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
|
||||
; CHECK-NEXT: ret
|
||||
%head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
|
||||
%m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
|
||||
%v = call <vscale x 7 x double> @llvm.vp.fneg.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
|
||||
ret <vscale x 7 x double> %v
|
||||
}
|
||||
|
||||
declare <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
|
||||
|
||||
define <vscale x 8 x double> @vfneg_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
|
||||
|
@ -363,3 +387,60 @@ define <vscale x 8 x double> @vfneg_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
|
|||
%v = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
|
||||
ret <vscale x 8 x double> %v
|
||||
}
|
||||
|
||||
; Test splitting.
|
||||
declare <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double>, <vscale x 16 x i1>, i32)
|
||||
|
||||
define <vscale x 16 x double> @vfneg_vv_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vfneg_vv_nxv16f64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v24, v0
|
||||
; CHECK-NEXT: li a2, 0
|
||||
; CHECK-NEXT: csrr a1, vlenb
|
||||
; CHECK-NEXT: srli a4, a1, 3
|
||||
; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: sub a3, a0, a1
|
||||
; CHECK-NEXT: vslidedown.vx v0, v0, a4
|
||||
; CHECK-NEXT: bltu a0, a3, .LBB32_2
|
||||
; CHECK-NEXT: # %bb.1:
|
||||
; CHECK-NEXT: mv a2, a3
|
||||
; CHECK-NEXT: .LBB32_2:
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vfneg.v v16, v16, v0.t
|
||||
; CHECK-NEXT: bltu a0, a1, .LBB32_4
|
||||
; CHECK-NEXT: # %bb.3:
|
||||
; CHECK-NEXT: mv a0, a1
|
||||
; CHECK-NEXT: .LBB32_4:
|
||||
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vmv1r.v v0, v24
|
||||
; CHECK-NEXT: vfneg.v v8, v8, v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
|
||||
ret <vscale x 16 x double> %v
|
||||
}
|
||||
|
||||
define <vscale x 16 x double> @vfneg_vv_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vfneg_vv_nxv16f64_unmasked:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: csrr a1, vlenb
|
||||
; CHECK-NEXT: mv a2, a0
|
||||
; CHECK-NEXT: bltu a0, a1, .LBB33_2
|
||||
; CHECK-NEXT: # %bb.1:
|
||||
; CHECK-NEXT: mv a2, a1
|
||||
; CHECK-NEXT: .LBB33_2:
|
||||
; CHECK-NEXT: li a3, 0
|
||||
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||||
; CHECK-NEXT: sub a1, a0, a1
|
||||
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
|
||||
; CHECK-NEXT: bltu a0, a1, .LBB33_4
|
||||
; CHECK-NEXT: # %bb.3:
|
||||
; CHECK-NEXT: mv a3, a1
|
||||
; CHECK-NEXT: .LBB33_4:
|
||||
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vfsgnjn.vv v16, v16, v16
|
||||
; CHECK-NEXT: ret
|
||||
%head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
|
||||
%m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
|
||||
%v = call <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
|
||||
ret <vscale x 16 x double> %v
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue