[PowerPC] Support constrained scalar sitofp/uitofp

This patch adds support for constrained scalar int to fp operations on
PowerPC. Besides, this also fixes the FP exception bit of FCFID*
instructions.

Reviewed By: steven.zhang, uweigand

Differential Revision: https://reviews.llvm.org/D81669
This commit is contained in:
Qiu Chaofan 2020-08-22 01:58:07 +08:00
parent 670063eb22
commit a5b7b8cce0
8 changed files with 463 additions and 47 deletions

View File

@ -221,6 +221,13 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
if (isPPC64 || Subtarget.hasFPCVT()) {
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Promote);
AddPromotedToType(ISD::STRICT_SINT_TO_FP, MVT::i1,
isPPC64 ? MVT::i64 : MVT::i32);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Promote);
AddPromotedToType(ISD::STRICT_UINT_TO_FP, MVT::i1,
isPPC64 ? MVT::i64 : MVT::i32);
setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
isPPC64 ? MVT::i64 : MVT::i32);
@ -228,6 +235,8 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
isPPC64 ? MVT::i64 : MVT::i32);
} else {
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Custom);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
}
@ -454,6 +463,8 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
// PowerPC does not have [U|S]INT_TO_FP
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
}
@ -585,6 +596,8 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
// They also have instructions for converting between i64 and fp.
setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Expand);
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand);
setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
@ -594,8 +607,10 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
if (Subtarget.hasLFIWAX() || Subtarget.isPPC64())
if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) {
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
}
} else {
// PowerPC does not have FP_TO_UINT on 32-bit implementations.
if (Subtarget.hasSPE()) {
@ -612,6 +627,8 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
if (Subtarget.has64BitSupport()) {
setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
@ -620,6 +637,8 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
@ -1484,6 +1503,14 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
return "PPCISD::STRICT_FCTIDUZ";
case PPCISD::STRICT_FCTIWUZ:
return "PPCISD::STRICT_FCTIWUZ";
case PPCISD::STRICT_FCFID:
return "PPCISD::STRICT_FCFID";
case PPCISD::STRICT_FCFIDU:
return "PPCISD::STRICT_FCFIDU";
case PPCISD::STRICT_FCFIDS:
return "PPCISD::STRICT_FCFIDS";
case PPCISD::STRICT_FCFIDUS:
return "PPCISD::STRICT_FCFIDUS";
}
return nullptr;
}
@ -7985,6 +8012,14 @@ static unsigned getPPCStrictOpcode(unsigned Opc) {
return PPCISD::STRICT_FCTIDUZ;
case PPCISD::FCTIWUZ:
return PPCISD::STRICT_FCTIWUZ;
case PPCISD::FCFID:
return PPCISD::STRICT_FCFID;
case PPCISD::FCFIDU:
return PPCISD::STRICT_FCFIDU;
case PPCISD::FCFIDS:
return PPCISD::STRICT_FCFIDS;
case PPCISD::FCFIDUS:
return PPCISD::STRICT_FCFIDUS;
}
}
@ -8152,6 +8187,10 @@ bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
ReuseLoadInfo &RLI,
SelectionDAG &DAG,
ISD::LoadExtType ET) const {
// Conservatively skip reusing for constrained FP nodes.
if (Op->isStrictFPOpcode())
return false;
SDLoc dl(Op);
bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
(Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
@ -8235,7 +8274,9 @@ bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
continue;
if (UI->getOpcode() != ISD::SINT_TO_FP &&
UI->getOpcode() != ISD::UINT_TO_FP)
UI->getOpcode() != ISD::UINT_TO_FP &&
UI->getOpcode() != ISD::STRICT_SINT_TO_FP &&
UI->getOpcode() != ISD::STRICT_UINT_TO_FP)
return true;
}
@ -8243,8 +8284,10 @@ bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
}
static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG,
const PPCSubtarget &Subtarget) {
bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP;
const PPCSubtarget &Subtarget,
SDValue Chain = SDValue()) {
bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
SDLoc dl(Op);
// If we have FCFIDS, then use it when converting to single-precision.
// Otherwise, convert to double-precision and then round.
@ -8252,7 +8295,13 @@ static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG,
unsigned ConvOpc = IsSingle ? (IsSigned ? PPCISD::FCFIDS : PPCISD::FCFIDUS)
: (IsSigned ? PPCISD::FCFID : PPCISD::FCFIDU);
EVT ConvTy = IsSingle ? MVT::f32 : MVT::f64;
return DAG.getNode(ConvOpc, dl, ConvTy, Src);
if (Op->isStrictFPOpcode()) {
if (!Chain)
Chain = Op.getOperand(0);
return DAG.getNode(getPPCStrictOpcode(ConvOpc), dl, {ConvTy, MVT::Other},
{Chain, Src});
} else
return DAG.getNode(ConvOpc, dl, ConvTy, Src);
}
/// Custom lowers integer to floating point conversions to use
@ -8266,9 +8315,10 @@ SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
"Invalid floating point type as target of conversion");
assert(Subtarget.hasFPCVT() &&
"Int to FP conversions with direct moves require FPCVT");
SDValue Src = Op.getOperand(0);
SDValue Src = Op.getOperand(Op->isStrictFPOpcode() ? 1 : 0);
bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
bool Signed = Op.getOpcode() == ISD::SINT_TO_FP;
bool Signed = Op.getOpcode() == ISD::SINT_TO_FP ||
Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
unsigned MovOpc = (WordInt && !Signed) ? PPCISD::MTVSRZ : PPCISD::MTVSRA;
SDValue Mov = DAG.getNode(MovOpc, dl, MVT::f64, Src);
return convertIntToFP(Op, Mov, DAG, Subtarget);
@ -8347,8 +8397,11 @@ SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
SDValue Src = Op.getOperand(0);
bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP;
bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
bool IsStrict = Op->isStrictFPOpcode();
SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
EVT InVT = Src.getValueType();
EVT OutVT = Op.getValueType();
@ -8467,16 +8520,16 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
SDValue Store =
DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx,
MachinePointerInfo::getFixedStack(
DAG.getMachineFunction(), FrameIdx));
SDValue Store = DAG.getStore(Chain, dl, SINT.getOperand(0), FIdx,
MachinePointerInfo::getFixedStack(
DAG.getMachineFunction(), FrameIdx));
Chain = Store;
assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
"Expected an i32 store");
RLI.Ptr = FIdx;
RLI.Chain = Store;
RLI.Chain = Chain;
RLI.MPI =
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
RLI.Alignment = Align(4);
@ -8489,14 +8542,22 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
PPCISD::LFIWZX : PPCISD::LFIWAX,
dl, DAG.getVTList(MVT::f64, MVT::Other),
Ops, MVT::i32, MMO);
Chain = Bits.getValue(1);
} else
Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
SDValue FP = convertIntToFP(Op, Bits, DAG, Subtarget);
SDValue FP = convertIntToFP(Op, Bits, DAG, Subtarget, Chain);
if (IsStrict)
Chain = FP.getValue(1);
if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
FP = DAG.getNode(ISD::FP_ROUND, dl,
MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
if (IsStrict)
FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl, {MVT::f32, MVT::Other},
{Chain, FP, DAG.getIntPtrConstant(0, dl)});
else
FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
DAG.getIntPtrConstant(0, dl));
}
return FP;
}
@ -8518,15 +8579,16 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Src, FIdx,
SDValue Store = DAG.getStore(Chain, dl, Src, FIdx,
MachinePointerInfo::getFixedStack(
DAG.getMachineFunction(), FrameIdx));
Chain = Store;
assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
"Expected an i32 store");
RLI.Ptr = FIdx;
RLI.Chain = Store;
RLI.Chain = Chain;
RLI.MPI =
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
RLI.Alignment = Align(4);
@ -8539,6 +8601,7 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
Ld = DAG.getMemIntrinsicNode(IsSigned ? PPCISD::LFIWAX : PPCISD::LFIWZX, dl,
DAG.getVTList(MVT::f64, MVT::Other), Ops,
MVT::i32, MMO);
Chain = Ld.getValue(1);
if (ReusingLoad)
spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
} else {
@ -8552,20 +8615,29 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
// STD the extended value into the stack slot.
SDValue Store = DAG.getStore(
DAG.getEntryNode(), dl, Ext64, FIdx,
Chain, dl, Ext64, FIdx,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
Chain = Store;
// Load the value as a double.
Ld = DAG.getLoad(
MVT::f64, dl, Store, FIdx,
MVT::f64, dl, Chain, FIdx,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
Chain = Ld.getValue(1);
}
// FCFID it and return it.
SDValue FP = convertIntToFP(Op, Ld, DAG, Subtarget);
if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
DAG.getIntPtrConstant(0, dl));
SDValue FP = convertIntToFP(Op, Ld, DAG, Subtarget, Chain);
if (IsStrict)
Chain = FP.getValue(1);
if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
if (IsStrict)
FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl, {MVT::f32, MVT::Other},
{Chain, FP, DAG.getIntPtrConstant(0, dl)});
else
FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
DAG.getIntPtrConstant(0, dl));
}
return FP;
}
@ -10533,6 +10605,8 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::STRICT_FP_TO_SINT:
case ISD::FP_TO_UINT:
case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
case ISD::STRICT_UINT_TO_FP:
case ISD::STRICT_SINT_TO_FP:
case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);

View File

@ -447,6 +447,12 @@ namespace llvm {
STRICT_FCTIDUZ,
STRICT_FCTIWUZ,
/// Constrained integer-to-floating-point conversion instructions.
STRICT_FCFID,
STRICT_FCFIDU,
STRICT_FCFIDS,
STRICT_FCFIDUS,
/// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
/// byte-swapping store instruction. It byte-swaps the low "Type" bits of
/// the GPRC input, then stores it through Ptr. Type can be either i16 or

View File

@ -1447,11 +1447,11 @@ def : Pat<(pre_store i64:$rS, iPTR:$ptrreg, iPTR:$ptroff),
//
let PPC970_Unit = 3, hasSideEffects = 0,
let PPC970_Unit = 3, hasSideEffects = 0, mayRaiseFPException = 1,
Uses = [RM] in { // FPU Operations.
defm FCFID : XForm_26r<63, 846, (outs f8rc:$frD), (ins f8rc:$frB),
"fcfid", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCfcfid f64:$frB))]>, isPPC64;
[(set f64:$frD, (PPCany_fcfid f64:$frB))]>, isPPC64;
defm FCTID : XForm_26r<63, 814, (outs f8rc:$frD), (ins f8rc:$frB),
"fctid", "$frD, $frB", IIC_FPGeneral,
[]>, isPPC64;
@ -1464,13 +1464,13 @@ defm FCTIDZ : XForm_26r<63, 815, (outs f8rc:$frD), (ins f8rc:$frB),
defm FCFIDU : XForm_26r<63, 974, (outs f8rc:$frD), (ins f8rc:$frB),
"fcfidu", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCfcfidu f64:$frB))]>, isPPC64;
[(set f64:$frD, (PPCany_fcfidu f64:$frB))]>, isPPC64;
defm FCFIDS : XForm_26r<59, 846, (outs f4rc:$frD), (ins f8rc:$frB),
"fcfids", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (PPCfcfids f64:$frB))]>, isPPC64;
[(set f32:$frD, (PPCany_fcfids f64:$frB))]>, isPPC64;
defm FCFIDUS : XForm_26r<59, 974, (outs f4rc:$frD), (ins f8rc:$frB),
"fcfidus", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (PPCfcfidus f64:$frB))]>, isPPC64;
[(set f32:$frD, (PPCany_fcfidus f64:$frB))]>, isPPC64;
defm FCTIDUZ : XForm_26r<63, 943, (outs f8rc:$frD), (ins f8rc:$frB),
"fctiduz", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCany_fctiduz f64:$frB))]>, isPPC64;

View File

@ -134,6 +134,28 @@ def PPCfctiwz : SDNode<"PPCISD::FCTIWZ", SDTFPUnaryOp, []>;
def PPCfctiduz: SDNode<"PPCISD::FCTIDUZ",SDTFPUnaryOp, []>;
def PPCfctiwuz: SDNode<"PPCISD::FCTIWUZ",SDTFPUnaryOp, []>;
def PPCstrict_fcfid : SDNode<"PPCISD::STRICT_FCFID",
SDTFPUnaryOp, [SDNPHasChain]>;
def PPCstrict_fcfidu : SDNode<"PPCISD::STRICT_FCFIDU",
SDTFPUnaryOp, [SDNPHasChain]>;
def PPCstrict_fcfids : SDNode<"PPCISD::STRICT_FCFIDS",
SDTFPRoundOp, [SDNPHasChain]>;
def PPCstrict_fcfidus : SDNode<"PPCISD::STRICT_FCFIDUS",
SDTFPRoundOp, [SDNPHasChain]>;
def PPCany_fcfid : PatFrags<(ops node:$op),
[(PPCfcfid node:$op),
(PPCstrict_fcfid node:$op)]>;
def PPCany_fcfidu : PatFrags<(ops node:$op),
[(PPCfcfidu node:$op),
(PPCstrict_fcfidu node:$op)]>;
def PPCany_fcfids : PatFrags<(ops node:$op),
[(PPCfcfids node:$op),
(PPCstrict_fcfids node:$op)]>;
def PPCany_fcfidus : PatFrags<(ops node:$op),
[(PPCfcfidus node:$op),
(PPCstrict_fcfidus node:$op)]>;
def PPCcv_fp_to_uint_in_vsr:
SDNode<"PPCISD::FP_TO_UINT_IN_VSR", SDT_PPCcv_fp_to_int, []>;
def PPCcv_fp_to_sint_in_vsr:

View File

@ -808,11 +808,11 @@ let hasSideEffects = 0 in {
def XSCVSXDDP : XX2Form<60, 376,
(outs vsfrc:$XT), (ins vsfrc:$XB),
"xscvsxddp $XT, $XB", IIC_VecFP,
[(set f64:$XT, (PPCfcfid f64:$XB))]>;
[(set f64:$XT, (PPCany_fcfid f64:$XB))]>;
def XSCVUXDDP : XX2Form<60, 360,
(outs vsfrc:$XT), (ins vsfrc:$XB),
"xscvuxddp $XT, $XB", IIC_VecFP,
[(set f64:$XT, (PPCfcfidu f64:$XB))]>;
[(set f64:$XT, (PPCany_fcfidu f64:$XB))]>;
def XVCVDPSP : XX2Form<60, 393,
(outs vsrc:$XT), (ins vsrc:$XB),
@ -1271,11 +1271,11 @@ let Predicates = [HasVSX, HasP8Vector] in {
def XSCVSXDSP : XX2Form<60, 312,
(outs vssrc:$XT), (ins vsfrc:$XB),
"xscvsxdsp $XT, $XB", IIC_VecFP,
[(set f32:$XT, (PPCfcfids f64:$XB))]>;
[(set f32:$XT, (PPCany_fcfids f64:$XB))]>;
def XSCVUXDSP : XX2Form<60, 296,
(outs vssrc:$XT), (ins vsfrc:$XB),
"xscvuxdsp $XT, $XB", IIC_VecFP,
[(set f32:$XT, (PPCfcfidus f64:$XB))]>;
[(set f32:$XT, (PPCany_fcfidus f64:$XB))]>;
// Conversions between vector and scalar single precision
def XSCVDPSPN : XX2Form<60, 267, (outs vsrc:$XT), (ins vssrc:$XB),
@ -3571,25 +3571,25 @@ def : Pat<(fneg (PPCfnmsub f128:$A, f128:$B, f128:$C)),
def : Pat<(PPCfnmsub f128:$A, f128:$B, (fneg f128:$C)),
(XSNMADDQP $C, $A, $B)>;
def : Pat<(f128 (sint_to_fp i64:$src)),
def : Pat<(f128 (any_sint_to_fp i64:$src)),
(f128 (XSCVSDQP (COPY_TO_REGCLASS $src, VFRC)))>;
def : Pat<(f128 (sint_to_fp (i64 (PPCmfvsr f64:$src)))),
def : Pat<(f128 (any_sint_to_fp (i64 (PPCmfvsr f64:$src)))),
(f128 (XSCVSDQP $src))>;
def : Pat<(f128 (sint_to_fp (i32 (PPCmfvsr f64:$src)))),
def : Pat<(f128 (any_sint_to_fp (i32 (PPCmfvsr f64:$src)))),
(f128 (XSCVSDQP (VEXTSW2Ds $src)))>;
def : Pat<(f128 (uint_to_fp i64:$src)),
def : Pat<(f128 (any_uint_to_fp i64:$src)),
(f128 (XSCVUDQP (COPY_TO_REGCLASS $src, VFRC)))>;
def : Pat<(f128 (uint_to_fp (i64 (PPCmfvsr f64:$src)))),
def : Pat<(f128 (any_uint_to_fp (i64 (PPCmfvsr f64:$src)))),
(f128 (XSCVUDQP $src))>;
// Convert (Un)Signed Word -> QP.
def : Pat<(f128 (sint_to_fp i32:$src)),
def : Pat<(f128 (any_sint_to_fp i32:$src)),
(f128 (XSCVSDQP (MTVSRWA $src)))>;
def : Pat<(f128 (sint_to_fp (i32 (load xoaddr:$src)))),
def : Pat<(f128 (any_sint_to_fp (i32 (load xoaddr:$src)))),
(f128 (XSCVSDQP (LIWAX xoaddr:$src)))>;
def : Pat<(f128 (uint_to_fp i32:$src)),
def : Pat<(f128 (any_uint_to_fp i32:$src)),
(f128 (XSCVUDQP (MTVSRWZ $src)))>;
def : Pat<(f128 (uint_to_fp (i32 (load xoaddr:$src)))),
def : Pat<(f128 (any_uint_to_fp (i32 (load xoaddr:$src)))),
(f128 (XSCVUDQP (LIWZX xoaddr:$src)))>;
// Pattern for matching Vector HP -> Vector SP intrinsic. Defined as a

View File

@ -26,6 +26,16 @@ declare i128 @llvm.experimental.constrained.fptoui.i128.ppcf128(ppc_fp128, metad
declare i128 @llvm.experimental.constrained.fptosi.i128.f128(fp128, metadata)
declare i128 @llvm.experimental.constrained.fptoui.i128.f128(fp128, metadata)
declare fp128 @llvm.experimental.constrained.sitofp.f128.i32(i32, metadata, metadata)
declare fp128 @llvm.experimental.constrained.sitofp.f128.i64(i64, metadata, metadata)
declare fp128 @llvm.experimental.constrained.uitofp.f128.i32(i32, metadata, metadata)
declare fp128 @llvm.experimental.constrained.uitofp.f128.i64(i64, metadata, metadata)
declare ppc_fp128 @llvm.experimental.constrained.sitofp.ppcf128.i32(i32, metadata, metadata)
declare ppc_fp128 @llvm.experimental.constrained.sitofp.ppcf128.i64(i64, metadata, metadata)
declare ppc_fp128 @llvm.experimental.constrained.uitofp.ppcf128.i32(i32, metadata, metadata)
declare ppc_fp128 @llvm.experimental.constrained.uitofp.ppcf128.i64(i64, metadata, metadata)
define i128 @q_to_i128(fp128 %m) #0 {
; P8-LABEL: q_to_i128:
; P8: # %bb.0: # %entry
@ -581,6 +591,162 @@ entry:
ret i32 %conv
}
define fp128 @i32_to_q(i32 signext %m) #0 {
; P8-LABEL: i32_to_q:
; P8: # %bb.0: # %entry
; P8-NEXT: mflr r0
; P8-NEXT: std r0, 16(r1)
; P8-NEXT: stdu r1, -112(r1)
; P8-NEXT: .cfi_def_cfa_offset 112
; P8-NEXT: .cfi_offset lr, 16
; P8-NEXT: bl __floatsikf
; P8-NEXT: nop
; P8-NEXT: addi r1, r1, 112
; P8-NEXT: ld r0, 16(r1)
; P8-NEXT: mtlr r0
; P8-NEXT: blr
;
; P9-LABEL: i32_to_q:
; P9: # %bb.0: # %entry
; P9-NEXT: mtvsrwa v2, r3
; P9-NEXT: xscvsdqp v2, v2
; P9-NEXT: blr
;
; NOVSX-LABEL: i32_to_q:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: mflr r0
; NOVSX-NEXT: std r0, 16(r1)
; NOVSX-NEXT: stdu r1, -32(r1)
; NOVSX-NEXT: .cfi_def_cfa_offset 32
; NOVSX-NEXT: .cfi_offset lr, 16
; NOVSX-NEXT: bl __floatsikf
; NOVSX-NEXT: nop
; NOVSX-NEXT: addi r1, r1, 32
; NOVSX-NEXT: ld r0, 16(r1)
; NOVSX-NEXT: mtlr r0
; NOVSX-NEXT: blr
entry:
%conv = tail call fp128 @llvm.experimental.constrained.sitofp.f128.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %conv
}
define fp128 @i64_to_q(i64 %m) #0 {
; P8-LABEL: i64_to_q:
; P8: # %bb.0: # %entry
; P8-NEXT: mflr r0
; P8-NEXT: std r0, 16(r1)
; P8-NEXT: stdu r1, -112(r1)
; P8-NEXT: .cfi_def_cfa_offset 112
; P8-NEXT: .cfi_offset lr, 16
; P8-NEXT: bl __floatdikf
; P8-NEXT: nop
; P8-NEXT: addi r1, r1, 112
; P8-NEXT: ld r0, 16(r1)
; P8-NEXT: mtlr r0
; P8-NEXT: blr
;
; P9-LABEL: i64_to_q:
; P9: # %bb.0: # %entry
; P9-NEXT: mtvsrd v2, r3
; P9-NEXT: xscvsdqp v2, v2
; P9-NEXT: blr
;
; NOVSX-LABEL: i64_to_q:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: mflr r0
; NOVSX-NEXT: std r0, 16(r1)
; NOVSX-NEXT: stdu r1, -32(r1)
; NOVSX-NEXT: .cfi_def_cfa_offset 32
; NOVSX-NEXT: .cfi_offset lr, 16
; NOVSX-NEXT: bl __floatdikf
; NOVSX-NEXT: nop
; NOVSX-NEXT: addi r1, r1, 32
; NOVSX-NEXT: ld r0, 16(r1)
; NOVSX-NEXT: mtlr r0
; NOVSX-NEXT: blr
entry:
%conv = tail call fp128 @llvm.experimental.constrained.sitofp.f128.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %conv
}
define fp128 @u32_to_q(i32 zeroext %m) #0 {
; P8-LABEL: u32_to_q:
; P8: # %bb.0: # %entry
; P8-NEXT: mflr r0
; P8-NEXT: std r0, 16(r1)
; P8-NEXT: stdu r1, -112(r1)
; P8-NEXT: .cfi_def_cfa_offset 112
; P8-NEXT: .cfi_offset lr, 16
; P8-NEXT: bl __floatunsikf
; P8-NEXT: nop
; P8-NEXT: addi r1, r1, 112
; P8-NEXT: ld r0, 16(r1)
; P8-NEXT: mtlr r0
; P8-NEXT: blr
;
; P9-LABEL: u32_to_q:
; P9: # %bb.0: # %entry
; P9-NEXT: mtvsrwz v2, r3
; P9-NEXT: xscvudqp v2, v2
; P9-NEXT: blr
;
; NOVSX-LABEL: u32_to_q:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: mflr r0
; NOVSX-NEXT: std r0, 16(r1)
; NOVSX-NEXT: stdu r1, -32(r1)
; NOVSX-NEXT: .cfi_def_cfa_offset 32
; NOVSX-NEXT: .cfi_offset lr, 16
; NOVSX-NEXT: bl __floatunsikf
; NOVSX-NEXT: nop
; NOVSX-NEXT: addi r1, r1, 32
; NOVSX-NEXT: ld r0, 16(r1)
; NOVSX-NEXT: mtlr r0
; NOVSX-NEXT: blr
entry:
%conv = tail call fp128 @llvm.experimental.constrained.uitofp.f128.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %conv
}
define fp128 @u64_to_q(i64 %m) #0 {
; P8-LABEL: u64_to_q:
; P8: # %bb.0: # %entry
; P8-NEXT: mflr r0
; P8-NEXT: std r0, 16(r1)
; P8-NEXT: stdu r1, -112(r1)
; P8-NEXT: .cfi_def_cfa_offset 112
; P8-NEXT: .cfi_offset lr, 16
; P8-NEXT: bl __floatundikf
; P8-NEXT: nop
; P8-NEXT: addi r1, r1, 112
; P8-NEXT: ld r0, 16(r1)
; P8-NEXT: mtlr r0
; P8-NEXT: blr
;
; P9-LABEL: u64_to_q:
; P9: # %bb.0: # %entry
; P9-NEXT: mtvsrd v2, r3
; P9-NEXT: xscvudqp v2, v2
; P9-NEXT: blr
;
; NOVSX-LABEL: u64_to_q:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: mflr r0
; NOVSX-NEXT: std r0, 16(r1)
; NOVSX-NEXT: stdu r1, -32(r1)
; NOVSX-NEXT: .cfi_def_cfa_offset 32
; NOVSX-NEXT: .cfi_offset lr, 16
; NOVSX-NEXT: bl __floatundikf
; NOVSX-NEXT: nop
; NOVSX-NEXT: addi r1, r1, 32
; NOVSX-NEXT: ld r0, 16(r1)
; NOVSX-NEXT: mtlr r0
; NOVSX-NEXT: blr
entry:
%conv = tail call fp128 @llvm.experimental.constrained.uitofp.f128.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %conv
}
define void @fptoint_nofpexcept(fp128 %m, i32* %addr1, i64* %addr2) {
; MIR-LABEL: name: fptoint_nofpexcept
; MIR: renamable $v{{[0-9]+}} = nofpexcept XSCVQPSWZ

View File

@ -178,4 +178,152 @@ entry:
ret i32 %conv
}
define double @i32_to_d(i32 signext %m) #0 {
; CHECK-LABEL: i32_to_d:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwa f0, r3
; CHECK-NEXT: xscvsxddp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: i32_to_d:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: addi r4, r1, -4
; NOVSX-NEXT: stw r3, -4(r1)
; NOVSX-NEXT: lfiwax f0, 0, r4
; NOVSX-NEXT: fcfid f1, f0
; NOVSX-NEXT: blr
entry:
%conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret double %conv
}
define double @i64_to_d(i64 %m) #0 {
; CHECK-LABEL: i64_to_d:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprd f0, r3
; CHECK-NEXT: xscvsxddp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: i64_to_d:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: std r3, -8(r1)
; NOVSX-NEXT: lfd f0, -8(r1)
; NOVSX-NEXT: fcfid f1, f0
; NOVSX-NEXT: blr
entry:
%conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret double %conv
}
define double @u32_to_d(i32 zeroext %m) #0 {
; CHECK-LABEL: u32_to_d:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwz f0, r3
; CHECK-NEXT: xscvuxddp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: u32_to_d:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: addi r4, r1, -4
; NOVSX-NEXT: stw r3, -4(r1)
; NOVSX-NEXT: lfiwzx f0, 0, r4
; NOVSX-NEXT: fcfidu f1, f0
; NOVSX-NEXT: blr
entry:
%conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret double %conv
}
define double @u64_to_d(i64 %m) #0 {
; CHECK-LABEL: u64_to_d:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprd f0, r3
; CHECK-NEXT: xscvuxddp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: u64_to_d:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: std r3, -8(r1)
; NOVSX-NEXT: lfd f0, -8(r1)
; NOVSX-NEXT: fcfidu f1, f0
; NOVSX-NEXT: blr
entry:
%conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret double %conv
}
define float @i32_to_f(i32 signext %m) #0 {
; CHECK-LABEL: i32_to_f:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwa f0, r3
; CHECK-NEXT: xscvsxdsp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: i32_to_f:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: addi r4, r1, -4
; NOVSX-NEXT: stw r3, -4(r1)
; NOVSX-NEXT: lfiwax f0, 0, r4
; NOVSX-NEXT: fcfids f1, f0
; NOVSX-NEXT: blr
entry:
%conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret float %conv
}
define float @i64_to_f(i64 %m) #0 {
; CHECK-LABEL: i64_to_f:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprd f0, r3
; CHECK-NEXT: xscvsxdsp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: i64_to_f:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: std r3, -8(r1)
; NOVSX-NEXT: lfd f0, -8(r1)
; NOVSX-NEXT: fcfids f1, f0
; NOVSX-NEXT: blr
entry:
%conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret float %conv
}
define float @u32_to_f(i32 zeroext %m) #0 {
; CHECK-LABEL: u32_to_f:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwz f0, r3
; CHECK-NEXT: xscvuxdsp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: u32_to_f:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: addi r4, r1, -4
; NOVSX-NEXT: stw r3, -4(r1)
; NOVSX-NEXT: lfiwzx f0, 0, r4
; NOVSX-NEXT: fcfidus f1, f0
; NOVSX-NEXT: blr
entry:
%conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret float %conv
}
define float @u64_to_f(i64 %m) #0 {
; CHECK-LABEL: u64_to_f:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprd f0, r3
; CHECK-NEXT: xscvuxdsp f1, f0
; CHECK-NEXT: blr
;
; NOVSX-LABEL: u64_to_f:
; NOVSX: # %bb.0: # %entry
; NOVSX-NEXT: std r3, -8(r1)
; NOVSX-NEXT: lfd f0, -8(r1)
; NOVSX-NEXT: fcfidus f1, f0
; NOVSX-NEXT: blr
entry:
%conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret float %conv
}
attributes #0 = { strictfp }

View File

@ -13,7 +13,7 @@ define float @test(float %a) {
; CHECK: %2:f8rc = nofpexcept FCTIWZ killed %1, implicit $rm
; CHECK: STFIWX killed %2, $zero8, %3
; CHECK-NEXT: %4:f8rc = LFIWAX $zero8, %3 :: (load 4 from %stack.0)
; CHECK-NEXT: %5:f4rc = FCFIDS killed %4, implicit $rm
; CHECK-NEXT: %5:f4rc = nofpexcept FCFIDS killed %4, implicit $rm
; CHECK-NEXT: $f1 = COPY %5
; CHECK-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $f1
@ -22,7 +22,7 @@ define float @test(float %a) {
; CHECK-P6: %2:f8rc = nofpexcept FCTIWZ killed %1, implicit $rm
; CHECK-P6: STFIWX killed %2, $zero, %3
; CHECK-P6-NEXT: %4:f8rc = LFIWAX $zero, %3 :: (load 4 from %stack.0)
; CHECK-P6-NEXT: %5:f8rc = FCFID killed %4, implicit $rm
; CHECK-P6-NEXT: %5:f8rc = nofpexcept FCFID killed %4, implicit $rm
; CHECK-P6-NEXT: %6:f4rc = nofpexcept FRSP killed %5, implicit $rm
; CHECK-P6-NEXT: $f1 = COPY %6
; CHECK-P6-NEXT: BLR implicit $lr, implicit $rm, implicit $f1
@ -32,7 +32,7 @@ define float @test(float %a) {
; CHECK-P6-64: %2:f8rc = nofpexcept FCTIWZ killed %1, implicit $rm
; CHECK-P6-64: STFIWX killed %2, $zero8, %3
; CHECK-P6-64-NEXT: %4:f8rc = LFIWAX $zero8, %3 :: (load 4 from %stack.0)
; CHECK-P6-64-NEXT: %5:f8rc = FCFID killed %4, implicit $rm
; CHECK-P6-64-NEXT: %5:f8rc = nofpexcept FCFID killed %4, implicit $rm
; CHECK-P6-64-NEXT: %6:f4rc = nofpexcept FRSP killed %5, implicit $rm
; CHECK-P6-64-NEXT: $f1 = COPY %6
; CHECK-P6-64-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $f1