From 24ee4edee8e00bb7ad3d3cda17d02a442456ff3e Mon Sep 17 00:00:00 2001 From: Jinsong Ji Date: Mon, 6 Jan 2020 19:05:12 +0000 Subject: [PATCH] [PowerPC][NFC] Rename record instructions to use _rec suffix instead of o We use o suffix to indicate record form instuctions, (as it is similar to dot '.' in mne?) This was fine before, as we did not support XO-form. However, with https://reviews.llvm.org/D66902, we now have XO-form support. It becomes confusing now to still use 'o' for record form, and it is weird to have something like 'Oo' . This patch rename all 'o' instructions to use '_rec' instead. Also rename `isDot` to `isRecordForm`. Reviewed By: #powerpc, hfinkel, nemanjai, steven.zhang, lkail Differential Revision: https://reviews.llvm.org/D70758 --- .../Target/PowerPC/AsmParser/PPCAsmParser.cpp | 88 +- llvm/lib/Target/PowerPC/P9InstrResources.td | 216 ++-- llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp | 77 +- llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 31 +- llvm/lib/Target/PowerPC/PPCISelLowering.h | 923 +++++++++--------- llvm/lib/Target/PowerPC/PPCInstr64Bit.td | 14 +- llvm/lib/Target/PowerPC/PPCInstrAltivec.td | 70 +- llvm/lib/Target/PowerPC/PPCInstrFormats.td | 38 +- llvm/lib/Target/PowerPC/PPCInstrHTM.td | 16 +- llvm/lib/Target/PowerPC/PPCInstrInfo.cpp | 284 +++--- llvm/lib/Target/PowerPC/PPCInstrInfo.td | 238 ++--- llvm/lib/Target/PowerPC/PPCInstrVSX.td | 20 +- llvm/lib/Target/PowerPC/PPCMIPeephole.cpp | 42 +- llvm/test/CodeGen/PowerPC/block-placement.mir | 2 +- .../convert-rr-to-ri-instrs-out-of-range.mir | 100 +- .../PowerPC/convert-rr-to-ri-instrs.mir | 170 ++-- llvm/test/CodeGen/PowerPC/fold-rlwinm.mir | 8 +- .../CodeGen/PowerPC/ifcvt-diamond-ret.mir | 4 +- .../CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll | 16 +- .../CodeGen/PowerPC/opt-sub-inst-cr0-live.mir | 2 +- .../PowerPC/peephole-miscompile-extswsli.mir | 6 +- .../CodeGen/PowerPC/rlwinm_rldicl_to_andi.mir | 24 +- 22 files changed, 1216 insertions(+), 1173 deletions(-) diff --git a/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp b/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp index aedf5b713c3f..eedbdd3aaf0f 100644 --- a/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp +++ b/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp @@ -800,9 +800,9 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, Inst = TmpInst; break; } - case PPC::SUBICo: { + case PPC::SUBIC_rec: { MCInst TmpInst; - TmpInst.setOpcode(PPC::ADDICo); + TmpInst.setOpcode(PPC::ADDIC_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); addNegOperand(TmpInst, Inst.getOperand(2), getContext()); @@ -810,11 +810,11 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::EXTLWI: - case PPC::EXTLWIo: { + case PPC::EXTLWI_rec: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); int64_t B = Inst.getOperand(3).getImm(); - TmpInst.setOpcode(Opcode == PPC::EXTLWI? PPC::RLWINM : PPC::RLWINMo); + TmpInst.setOpcode(Opcode == PPC::EXTLWI ? PPC::RLWINM : PPC::RLWINM_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::createImm(B)); @@ -824,11 +824,11 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::EXTRWI: - case PPC::EXTRWIo: { + case PPC::EXTRWI_rec: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); int64_t B = Inst.getOperand(3).getImm(); - TmpInst.setOpcode(Opcode == PPC::EXTRWI? PPC::RLWINM : PPC::RLWINMo); + TmpInst.setOpcode(Opcode == PPC::EXTRWI ? PPC::RLWINM : PPC::RLWINM_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::createImm(B + N)); @@ -838,11 +838,11 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::INSLWI: - case PPC::INSLWIo: { + case PPC::INSLWI_rec: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); int64_t B = Inst.getOperand(3).getImm(); - TmpInst.setOpcode(Opcode == PPC::INSLWI? PPC::RLWIMI : PPC::RLWIMIo); + TmpInst.setOpcode(Opcode == PPC::INSLWI ? PPC::RLWIMI : PPC::RLWIMI_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); @@ -853,11 +853,11 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::INSRWI: - case PPC::INSRWIo: { + case PPC::INSRWI_rec: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); int64_t B = Inst.getOperand(3).getImm(); - TmpInst.setOpcode(Opcode == PPC::INSRWI? PPC::RLWIMI : PPC::RLWIMIo); + TmpInst.setOpcode(Opcode == PPC::INSRWI ? PPC::RLWIMI : PPC::RLWIMI_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); @@ -868,10 +868,10 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::ROTRWI: - case PPC::ROTRWIo: { + case PPC::ROTRWI_rec: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); - TmpInst.setOpcode(Opcode == PPC::ROTRWI? PPC::RLWINM : PPC::RLWINMo); + TmpInst.setOpcode(Opcode == PPC::ROTRWI ? PPC::RLWINM : PPC::RLWINM_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::createImm(32 - N)); @@ -881,10 +881,10 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::SLWI: - case PPC::SLWIo: { + case PPC::SLWI_rec: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); - TmpInst.setOpcode(Opcode == PPC::SLWI? PPC::RLWINM : PPC::RLWINMo); + TmpInst.setOpcode(Opcode == PPC::SLWI ? PPC::RLWINM : PPC::RLWINM_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::createImm(N)); @@ -894,10 +894,10 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::SRWI: - case PPC::SRWIo: { + case PPC::SRWI_rec: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); - TmpInst.setOpcode(Opcode == PPC::SRWI? PPC::RLWINM : PPC::RLWINMo); + TmpInst.setOpcode(Opcode == PPC::SRWI ? PPC::RLWINM : PPC::RLWINM_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::createImm(32 - N)); @@ -907,10 +907,10 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::CLRRWI: - case PPC::CLRRWIo: { + case PPC::CLRRWI_rec: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); - TmpInst.setOpcode(Opcode == PPC::CLRRWI? PPC::RLWINM : PPC::RLWINMo); + TmpInst.setOpcode(Opcode == PPC::CLRRWI ? PPC::RLWINM : PPC::RLWINM_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::createImm(0)); @@ -920,11 +920,11 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::CLRLSLWI: - case PPC::CLRLSLWIo: { + case PPC::CLRLSLWI_rec: { MCInst TmpInst; int64_t B = Inst.getOperand(2).getImm(); int64_t N = Inst.getOperand(3).getImm(); - TmpInst.setOpcode(Opcode == PPC::CLRLSLWI? PPC::RLWINM : PPC::RLWINMo); + TmpInst.setOpcode(Opcode == PPC::CLRLSLWI ? PPC::RLWINM : PPC::RLWINM_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::createImm(N)); @@ -934,11 +934,11 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::EXTLDI: - case PPC::EXTLDIo: { + case PPC::EXTLDI_rec: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); int64_t B = Inst.getOperand(3).getImm(); - TmpInst.setOpcode(Opcode == PPC::EXTLDI? PPC::RLDICR : PPC::RLDICRo); + TmpInst.setOpcode(Opcode == PPC::EXTLDI ? PPC::RLDICR : PPC::RLDICR_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::createImm(B)); @@ -947,11 +947,11 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::EXTRDI: - case PPC::EXTRDIo: { + case PPC::EXTRDI_rec: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); int64_t B = Inst.getOperand(3).getImm(); - TmpInst.setOpcode(Opcode == PPC::EXTRDI? PPC::RLDICL : PPC::RLDICLo); + TmpInst.setOpcode(Opcode == PPC::EXTRDI ? PPC::RLDICL : PPC::RLDICL_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::createImm(B + N)); @@ -960,11 +960,11 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::INSRDI: - case PPC::INSRDIo: { + case PPC::INSRDI_rec: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); int64_t B = Inst.getOperand(3).getImm(); - TmpInst.setOpcode(Opcode == PPC::INSRDI? PPC::RLDIMI : PPC::RLDIMIo); + TmpInst.setOpcode(Opcode == PPC::INSRDI ? PPC::RLDIMI : PPC::RLDIMI_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); @@ -974,10 +974,10 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::ROTRDI: - case PPC::ROTRDIo: { + case PPC::ROTRDI_rec: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); - TmpInst.setOpcode(Opcode == PPC::ROTRDI? PPC::RLDICL : PPC::RLDICLo); + TmpInst.setOpcode(Opcode == PPC::ROTRDI ? PPC::RLDICL : PPC::RLDICL_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::createImm(64 - N)); @@ -986,10 +986,10 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::SLDI: - case PPC::SLDIo: { + case PPC::SLDI_rec: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); - TmpInst.setOpcode(Opcode == PPC::SLDI? PPC::RLDICR : PPC::RLDICRo); + TmpInst.setOpcode(Opcode == PPC::SLDI ? PPC::RLDICR : PPC::RLDICR_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::createImm(N)); @@ -1007,10 +1007,10 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::SRDI: - case PPC::SRDIo: { + case PPC::SRDI_rec: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); - TmpInst.setOpcode(Opcode == PPC::SRDI? PPC::RLDICL : PPC::RLDICLo); + TmpInst.setOpcode(Opcode == PPC::SRDI ? PPC::RLDICL : PPC::RLDICL_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::createImm(64 - N)); @@ -1019,10 +1019,10 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::CLRRDI: - case PPC::CLRRDIo: { + case PPC::CLRRDI_rec: { MCInst TmpInst; int64_t N = Inst.getOperand(2).getImm(); - TmpInst.setOpcode(Opcode == PPC::CLRRDI? PPC::RLDICR : PPC::RLDICRo); + TmpInst.setOpcode(Opcode == PPC::CLRRDI ? PPC::RLDICR : PPC::RLDICR_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::createImm(0)); @@ -1031,11 +1031,11 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::CLRLSLDI: - case PPC::CLRLSLDIo: { + case PPC::CLRLSLDI_rec: { MCInst TmpInst; int64_t B = Inst.getOperand(2).getImm(); int64_t N = Inst.getOperand(3).getImm(); - TmpInst.setOpcode(Opcode == PPC::CLRLSLDI? PPC::RLDIC : PPC::RLDICo); + TmpInst.setOpcode(Opcode == PPC::CLRLSLDI ? PPC::RLDIC : PPC::RLDIC_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::createImm(N)); @@ -1044,14 +1044,14 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::RLWINMbm: - case PPC::RLWINMobm: { + case PPC::RLWINMbm_rec: { unsigned MB, ME; int64_t BM = Inst.getOperand(3).getImm(); if (!isRunOfOnes(BM, MB, ME)) break; MCInst TmpInst; - TmpInst.setOpcode(Opcode == PPC::RLWINMbm ? PPC::RLWINM : PPC::RLWINMo); + TmpInst.setOpcode(Opcode == PPC::RLWINMbm ? PPC::RLWINM : PPC::RLWINM_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(Inst.getOperand(2)); @@ -1061,14 +1061,14 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::RLWIMIbm: - case PPC::RLWIMIobm: { + case PPC::RLWIMIbm_rec: { unsigned MB, ME; int64_t BM = Inst.getOperand(3).getImm(); if (!isRunOfOnes(BM, MB, ME)) break; MCInst TmpInst; - TmpInst.setOpcode(Opcode == PPC::RLWIMIbm ? PPC::RLWIMI : PPC::RLWIMIo); + TmpInst.setOpcode(Opcode == PPC::RLWIMIbm ? PPC::RLWIMI : PPC::RLWIMI_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(0)); // The tied operand. TmpInst.addOperand(Inst.getOperand(1)); @@ -1079,14 +1079,14 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, break; } case PPC::RLWNMbm: - case PPC::RLWNMobm: { + case PPC::RLWNMbm_rec: { unsigned MB, ME; int64_t BM = Inst.getOperand(3).getImm(); if (!isRunOfOnes(BM, MB, ME)) break; MCInst TmpInst; - TmpInst.setOpcode(Opcode == PPC::RLWNMbm ? PPC::RLWNM : PPC::RLWNMo); + TmpInst.setOpcode(Opcode == PPC::RLWNMbm ? PPC::RLWNM : PPC::RLWNM_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(Inst.getOperand(2)); @@ -1116,8 +1116,8 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst, case PPC::CP_PASTEx : case PPC::CP_PASTE_LAST: { MCInst TmpInst; - TmpInst.setOpcode(Opcode == PPC::CP_PASTEx ? - PPC::CP_PASTE : PPC::CP_PASTEo); + TmpInst.setOpcode(Opcode == PPC::CP_PASTEx ? PPC::CP_PASTE + : PPC::CP_PASTE_rec); TmpInst.addOperand(Inst.getOperand(0)); TmpInst.addOperand(Inst.getOperand(1)); TmpInst.addOperand(MCOperand::createImm(Opcode == PPC::CP_PASTEx ? 0 : 1)); diff --git a/llvm/lib/Target/PowerPC/P9InstrResources.td b/llvm/lib/Target/PowerPC/P9InstrResources.td index 22e8217cc0eb..9b3d13989ee2 100644 --- a/llvm/lib/Target/PowerPC/P9InstrResources.td +++ b/llvm/lib/Target/PowerPC/P9InstrResources.td @@ -107,7 +107,7 @@ def : InstRW<[P9_ALU_3C, IP_EXEC_1C, DISP_1C], (instregex "XSMAX(C|J)?DP$"), (instregex "XSMIN(C|J)?DP$"), (instregex "XSCMP(EQ|EXP|GE|GT|O|U)DP$"), - (instregex "CNT(L|T)Z(D|W)(8)?(o)?$"), + (instregex "CNT(L|T)Z(D|W)(8)?(_rec)?$"), (instregex "POPCNT(D|W)$"), (instregex "CMPB(8)?$"), (instregex "SETB(8)?$"), @@ -130,23 +130,23 @@ def : InstRW<[P9_ALU_2C, IP_EXEC_1C, DISP_1C], (instregex "CMP(WI|LWI|W|LW)(8)?$"), (instregex "CMP(L)?D(I)?$"), (instregex "SUBF(I)?C(8)?(O)?$"), - (instregex "ANDI(S)?(8)?(o)?$"), + (instregex "ANDI(S)?(8)?(_rec)?$"), (instregex "ADDC(8)?(O)?$"), - (instregex "ADDIC(8)?(o)?$"), - (instregex "ADD(8|4)(O)?(o)?$"), - (instregex "ADD(E|ME|ZE)(8)?(O)?(o)?$"), - (instregex "SUBF(E|ME|ZE)?(8)?(O)?(o)?$"), - (instregex "NEG(8)?(O)?(o)?$"), + (instregex "ADDIC(8)?(_rec)?$"), + (instregex "ADD(8|4)(O)?(_rec)?$"), + (instregex "ADD(E|ME|ZE)(8)?(O)?(_rec)?$"), + (instregex "SUBF(E|ME|ZE)?(8)?(O)?(_rec)?$"), + (instregex "NEG(8)?(O)?(_rec)?$"), (instregex "POPCNTB$"), (instregex "ADD(I|IS)?(8)?$"), (instregex "LI(S)?(8)?$"), - (instregex "(X)?OR(I|IS)?(8)?(o)?$"), - (instregex "NAND(8)?(o)?$"), - (instregex "AND(C)?(8)?(o)?$"), - (instregex "NOR(8)?(o)?$"), - (instregex "OR(C)?(8)?(o)?$"), - (instregex "EQV(8)?(o)?$"), - (instregex "EXTS(B|H|W)(8)?(_32)?(_64)?(o)?$"), + (instregex "(X)?OR(I|IS)?(8)?(_rec)?$"), + (instregex "NAND(8)?(_rec)?$"), + (instregex "AND(C)?(8)?(_rec)?$"), + (instregex "NOR(8)?(_rec)?$"), + (instregex "OR(C)?(8)?(_rec)?$"), + (instregex "EQV(8)?(_rec)?$"), + (instregex "EXTS(B|H|W)(8)?(_32)?(_64)?(_rec)?$"), (instregex "ADD(4|8)(TLS)?(_)?$"), (instregex "NEG(8)?(O)?$"), (instregex "ADDI(S)?toc(HA|L)(8)?$"), @@ -211,8 +211,8 @@ def : InstRW<[P9_ALUE_3C, P9_ALUO_3C, IP_EXECE_1C, IP_EXECO_1C, DISP_1C], (instregex "VABSDU(B|H|W)$"), (instregex "VADDU(B|H|W)S$"), (instregex "VAVG(S|U)(B|H|W)$"), - (instregex "VCMP(EQ|GE|GT)FP(o)?$"), - (instregex "VCMPBFP(o)?$"), + (instregex "VCMP(EQ|GE|GT)FP(_rec)?$"), + (instregex "VCMPBFP(_rec)?$"), (instregex "VC(L|T)Z(B|H|W|D)$"), (instregex "VADDS(B|H|W)S$"), (instregex "V(MIN|MAX)FP$"), @@ -233,43 +233,43 @@ def : InstRW<[P9_ALUE_3C, P9_ALUO_3C, IP_EXECE_1C, IP_EXECO_1C, DISP_1C], VSUBUWS, VSUBCUW, VCMPGTSB, - VCMPGTSBo, + VCMPGTSB_rec, VCMPGTSD, - VCMPGTSDo, + VCMPGTSD_rec, VCMPGTSH, - VCMPGTSHo, + VCMPGTSH_rec, VCMPGTSW, - VCMPGTSWo, + VCMPGTSW_rec, VCMPGTUB, - VCMPGTUBo, + VCMPGTUB_rec, VCMPGTUD, - VCMPGTUDo, + VCMPGTUD_rec, VCMPGTUH, - VCMPGTUHo, + VCMPGTUH_rec, VCMPGTUW, - VCMPGTUWo, - VCMPNEBo, - VCMPNEHo, - VCMPNEWo, - VCMPNEZBo, - VCMPNEZHo, - VCMPNEZWo, - VCMPEQUBo, - VCMPEQUDo, - VCMPEQUHo, - VCMPEQUWo, + VCMPGTUW_rec, + VCMPNEB_rec, + VCMPNEH_rec, + VCMPNEW_rec, + VCMPNEZB_rec, + VCMPNEZH_rec, + VCMPNEZW_rec, + VCMPEQUB_rec, + VCMPEQUD_rec, + VCMPEQUH_rec, + VCMPEQUW_rec, XVCMPEQDP, - XVCMPEQDPo, + XVCMPEQDP_rec, XVCMPEQSP, - XVCMPEQSPo, + XVCMPEQSP_rec, XVCMPGEDP, - XVCMPGEDPo, + XVCMPGEDP_rec, XVCMPGESP, - XVCMPGESPo, + XVCMPGESP_rec, XVCMPGTDP, - XVCMPGTDPo, + XVCMPGTDP_rec, XVCMPGTSP, - XVCMPGTSPo, + XVCMPGTSP_rec, XVMAXDP, XVMAXSP, XVMINDP, @@ -451,14 +451,14 @@ def : InstRW<[P9_DP_7C, IP_EXEC_1C, DISP_3SLOTS_1C], def : InstRW<[P9_DP_7C, P9_ALU_3C, IP_EXEC_1C, IP_EXEC_1C, DISP_3SLOTS_1C, DISP_1C], (instrs - (instregex "FSEL(D|S)o$") + (instregex "FSEL(D|S)_rec$") )>; // 5 Cycle Restricted DP operation and one 2 cycle ALU operation. def : InstRW<[P9_DPOpAndALUOp_7C, IP_EXEC_1C, IP_EXEC_1C, DISP_3SLOTS_1C, DISP_1C], (instrs - (instregex "MUL(H|L)(D|W)(U)?(O)?o$") + (instregex "MUL(H|L)(D|W)(U)?(O)?_rec$") )>; // 7 cycle Restricted DP operation and one 3 cycle ALU operation. @@ -467,18 +467,18 @@ def : InstRW<[P9_DPOpAndALUOp_7C, IP_EXEC_1C, IP_EXEC_1C, def : InstRW<[P9_DPOpAndALU2Op_10C, IP_EXEC_1C, IP_EXEC_1C, DISP_3SLOTS_1C, DISP_1C], (instrs - (instregex "FRI(N|P|Z|M)(D|S)o$"), - (instregex "FRE(S)?o$"), - (instregex "FADD(S)?o$"), - (instregex "FSUB(S)?o$"), - (instregex "F(N)?MSUB(S)?o$"), - (instregex "F(N)?MADD(S)?o$"), - (instregex "FCFID(U)?(S)?o$"), - (instregex "FCTID(U)?(Z)?o$"), - (instregex "FCTIW(U)?(Z)?o$"), - (instregex "FMUL(S)?o$"), - (instregex "FRSQRTE(S)?o$"), - FRSPo + (instregex "FRI(N|P|Z|M)(D|S)_rec$"), + (instregex "FRE(S)?_rec$"), + (instregex "FADD(S)?_rec$"), + (instregex "FSUB(S)?_rec$"), + (instregex "F(N)?MSUB(S)?_rec$"), + (instregex "F(N)?MADD(S)?_rec$"), + (instregex "FCFID(U)?(S)?_rec$"), + (instregex "FCTID(U)?(Z)?_rec$"), + (instregex "FCTIW(U)?(Z)?_rec$"), + (instregex "FMUL(S)?_rec$"), + (instregex "FRSQRTE(S)?_rec$"), + FRSP_rec )>; // 7 cycle DP operation. One DP unit, one EXEC pipeline and 1 dispatch units. @@ -613,16 +613,16 @@ def : InstRW<[P9_PM_3C, IP_EXECO_1C, IP_EXECE_1C, DISP_1C], XSCMPUQP, XSTSTDCQP, XSXSIGQP, - BCDCFNo, - BCDCFZo, - BCDCPSGNo, - BCDCTNo, - BCDCTZo, - BCDSETSGNo, - BCDSo, - BCDTRUNCo, - BCDUSo, - BCDUTRUNCo + BCDCFN_rec, + BCDCFZ_rec, + BCDCPSGN_rec, + BCDCTN_rec, + BCDCTZ_rec, + BCDSETSGN_rec, + BCDS_rec, + BCDTRUNC_rec, + BCDUS_rec, + BCDUTRUNC_rec )>; // 12 Cycle DFU operation. Only one DFU unit per CPU so we use a whole @@ -630,7 +630,7 @@ def : InstRW<[P9_PM_3C, IP_EXECO_1C, IP_EXECE_1C, DISP_1C], // dispatch. def : InstRW<[P9_DFU_12C, IP_EXECE_1C, IP_EXECO_1C, DISP_1C], (instrs - BCDSRo, + BCDSR_rec, XSADDQP, XSADDQPO, XSCVDPQP, @@ -654,7 +654,7 @@ def : InstRW<[P9_DFU_12C, IP_EXECE_1C, IP_EXECO_1C, DISP_1C], // dispatch. def : InstRW<[P9_DFU_23C, IP_EXECE_1C, IP_EXECO_1C, DISP_1C], (instrs - BCDCTSQo + BCDCTSQ_rec )>; // 24 Cycle DFU operation. Only one DFU unit per CPU so we use a whole @@ -679,7 +679,7 @@ def : InstRW<[P9_DFU_24C, IP_EXECE_1C, IP_EXECO_1C, DISP_1C], // dispatch. def : InstRW<[P9_DFU_37C, IP_EXECE_1C, IP_EXECO_1C, DISP_1C], (instrs - BCDCFSQo + BCDCFSQ_rec )>; // 58 Cycle DFU operation. Only one DFU unit per CPU so we use a whole @@ -819,7 +819,7 @@ def : InstRW<[P9_LoadAndALUOp_6C, IP_EXEC_1C, IP_AGEN_1C, DISP_1C, DISP_1C], (instrs (instregex "LHA(X)?(8)?$"), - (instregex "CP_PASTE(8)?o$"), + (instregex "CP_PASTE(8)?_rec$"), (instregex "LWA(X)?(_32)?$"), TCHECK )>; @@ -987,7 +987,7 @@ def : InstRW<[P9_DIV_40C_8, IP_EXECO_1C, IP_EXECE_1C, DISP_EVEN_1C], def : InstRW<[P9_IntDivAndALUOp_18C_8, IP_EXECE_1C, IP_EXECO_1C, IP_EXEC_1C, DISP_EVEN_1C, DISP_1C], (instrs - (instregex "DIVW(U)?(O)?o$") + (instregex "DIVW(U)?(O)?_rec$") )>; // Cracked DIV and ALU operation. Requires one full slice for the ALU operation @@ -996,14 +996,14 @@ def : InstRW<[P9_IntDivAndALUOp_18C_8, IP_EXECE_1C, IP_EXECO_1C, IP_EXEC_1C, def : InstRW<[P9_IntDivAndALUOp_26C_8, IP_EXECE_1C, IP_EXECO_1C, IP_EXEC_1C, DISP_EVEN_1C, DISP_1C], (instrs - DIVDo, - DIVDOo, - DIVDUo, - DIVDUOo, - DIVWEo, - DIVWEOo, - DIVWEUo, - DIVWEUOo + DIVD_rec, + DIVDO_rec, + DIVDU_rec, + DIVDUO_rec, + DIVWE_rec, + DIVWEO_rec, + DIVWEU_rec, + DIVWEUO_rec )>; // Cracked DIV and ALU operation. Requires one full slice for the ALU operation @@ -1012,10 +1012,10 @@ def : InstRW<[P9_IntDivAndALUOp_26C_8, IP_EXECE_1C, IP_EXECO_1C, IP_EXEC_1C, def : InstRW<[P9_IntDivAndALUOp_42C_8, IP_EXECE_1C, IP_EXECO_1C, IP_EXEC_1C, DISP_EVEN_1C, DISP_1C], (instrs - DIVDEo, - DIVDEOo, - DIVDEUo, - DIVDEUOo + DIVDE_rec, + DIVDEO_rec, + DIVDEU_rec, + DIVDEUO_rec )>; // CR access instructions in _BrMCR, IIC_BrMCRX. @@ -1040,8 +1040,8 @@ def : InstRW<[P9_ALU_2C, P9_ALU_2C, IP_EXEC_1C, IP_EXEC_1C, def : InstRW<[P9_ALU_2C, P9_ALU_2C, IP_EXEC_1C, IP_EXEC_1C, DISP_1C, DISP_1C], (instrs - (instregex "ADDC(8)?(O)?o$"), - (instregex "SUBFC(8)?(O)?o$") + (instregex "ADDC(8)?(O)?_rec$"), + (instregex "SUBFC(8)?(O)?_rec$") )>; // Cracked ALU operations. @@ -1052,10 +1052,10 @@ def : InstRW<[P9_ALU_2C, P9_ALU_2C, IP_EXEC_1C, IP_EXEC_1C, def : InstRW<[P9_ALU_2C, P9_ALU_3C, IP_EXEC_1C, IP_EXEC_1C, DISP_3SLOTS_1C, DISP_1C], (instrs - (instregex "F(N)?ABS(D|S)o$"), - (instregex "FCPSGN(D|S)o$"), - (instregex "FNEG(D|S)o$"), - FMRo + (instregex "F(N)?ABS(D|S)_rec$"), + (instregex "FCPSGN(D|S)_rec$"), + (instregex "FNEG(D|S)_rec$"), + FMR_rec )>; // Cracked ALU operations. @@ -1077,8 +1077,8 @@ def : InstRW<[P9_ALU_3C, P9_ALU_3C, IP_EXEC_1C, IP_EXEC_1C, def : InstRW<[P9_ALU_3C, P9_ALU_3C, IP_EXEC_1C, IP_EXEC_1C, DISP_3SLOTS_1C, DISP_3SLOTS_1C], (instrs - (instregex "MTFSF(b|o)?$"), - (instregex "MTFSFI(o)?$") + (instregex "MTFSF(b|_rec)?$"), + (instregex "MTFSFI(_rec)?$") )>; // Cracked instruction made of two ALU ops. @@ -1087,13 +1087,13 @@ def : InstRW<[P9_ALU_3C, P9_ALU_3C, IP_EXEC_1C, IP_EXEC_1C, def : InstRW<[P9_ALUOpAndALUOp_4C, IP_EXEC_1C, IP_EXEC_1C, DISP_3SLOTS_1C, DISP_1C], (instrs - (instregex "RLD(I)?C(R|L)o$"), - (instregex "RLW(IMI|INM|NM)(8)?o$"), - (instregex "SLW(8)?o$"), - (instregex "SRAW(I)?o$"), - (instregex "SRW(8)?o$"), - RLDICL_32o, - RLDIMIo + (instregex "RLD(I)?C(R|L)_rec$"), + (instregex "RLW(IMI|INM|NM)(8)?_rec$"), + (instregex "SLW(8)?_rec$"), + (instregex "SRAW(I)?_rec$"), + (instregex "SRW(8)?_rec$"), + RLDICL_32_rec, + RLDIMI_rec )>; // Cracked instruction made of two ALU ops. @@ -1102,7 +1102,7 @@ def : InstRW<[P9_ALUOpAndALUOp_4C, IP_EXEC_1C, IP_EXEC_1C, def : InstRW<[P9_ALU2OpAndALU2Op_6C, IP_EXEC_1C, IP_EXEC_1C, DISP_3SLOTS_1C, DISP_3SLOTS_1C], (instrs - (instregex "MFFS(L|CE|o)?$") + (instregex "MFFS(L|CE|_rec)?$") )>; // Cracked ALU instruction composed of three consecutive 2 cycle loads for a @@ -1118,12 +1118,12 @@ def : InstRW<[P9_ALUOpAndALUOpAndALUOp_6C, IP_EXEC_1C, IP_EXEC_1C, IP_EXEC_1C, // The two ops cannot be done in parallel. def : InstRW<[P9_ALUOpAndALUOp_4C, IP_EXEC_1C, IP_EXEC_1C, DISP_1C, DISP_1C], (instrs - (instregex "EXTSWSLI_32_64o$"), - (instregex "SRAD(I)?o$"), - EXTSWSLIo, - SLDo, - SRDo, - RLDICo + (instregex "EXTSWSLI_32_64_rec$"), + (instregex "SRAD(I)?_rec$"), + EXTSWSLI_rec, + SLD_rec, + SRD_rec, + RLDIC_rec )>; // 33 Cycle DP Instruction Restricted. Takes one slice and 3 dispatches. @@ -1136,7 +1136,7 @@ def : InstRW<[P9_DP_33C_8, IP_EXEC_1C, DISP_3SLOTS_1C], def : InstRW<[P9_DPOpAndALU2Op_36C_8, IP_EXEC_1C, IP_EXEC_1C, DISP_3SLOTS_1C, DISP_1C], (instrs - FDIVo + FDIV_rec )>; // 36 Cycle DP Instruction. @@ -1170,7 +1170,7 @@ def : InstRW<[P9_DPE_27C_10, P9_DPO_27C_10, IP_EXECE_1C, IP_EXECO_1C, def : InstRW<[P9_DPOpAndALU2Op_39C_10, IP_EXEC_1C, IP_EXEC_1C, DISP_3SLOTS_1C, DISP_1C], (instrs - FSQRTo + FSQRT_rec )>; // 26 Cycle DP Instruction. @@ -1189,7 +1189,7 @@ def : InstRW<[P9_DP_26C_5, IP_EXEC_1C, DISP_3SLOTS_1C], def : InstRW<[P9_DPOpAndALU2Op_29C_5, IP_EXEC_1C, IP_EXEC_1C, DISP_3SLOTS_1C, DISP_1C], (instrs - FSQRTSo + FSQRTS_rec )>; // 33 Cycle DP Instruction. Takes one slice and 1 dispatch. @@ -1208,7 +1208,7 @@ def : InstRW<[P9_DP_22C_5, IP_EXEC_1C, DISP_3SLOTS_1C], def : InstRW<[P9_DPOpAndALU2Op_25C_5, IP_EXEC_1C, IP_EXEC_1C, DISP_3SLOTS_1C, DISP_1C], (instrs - FDIVSo + FDIVS_rec )>; // 22 Cycle DP Instruction. Takes one slice and 1 dispatch. @@ -1415,7 +1415,7 @@ def : InstRW<[], MBAR, MSYNC, SLBSYNC, - SLBFEEo, + SLBFEE_rec, NAP, STOP, TRAP, diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index 07ea66237d6f..41042e26f3fd 100644 --- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -1812,11 +1812,14 @@ class BitPermutationSelector { SDValue ANDIVal, ANDISVal; if (ANDIMask != 0) - ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDIo, dl, MVT::i32, - VRot, getI32Imm(ANDIMask, dl)), 0); + ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDI_rec, dl, MVT::i32, + VRot, getI32Imm(ANDIMask, dl)), + 0); if (ANDISMask != 0) - ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDISo, dl, MVT::i32, - VRot, getI32Imm(ANDISMask, dl)), 0); + ANDISVal = + SDValue(CurDAG->getMachineNode(PPC::ANDIS_rec, dl, MVT::i32, VRot, + getI32Imm(ANDISMask, dl)), + 0); SDValue TotalVal; if (!ANDIVal) @@ -1905,11 +1908,14 @@ class BitPermutationSelector { SDValue ANDIVal, ANDISVal; if (ANDIMask != 0) - ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDIo, dl, MVT::i32, - Res, getI32Imm(ANDIMask, dl)), 0); + ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDI_rec, dl, MVT::i32, + Res, getI32Imm(ANDIMask, dl)), + 0); if (ANDISMask != 0) - ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDISo, dl, MVT::i32, - Res, getI32Imm(ANDISMask, dl)), 0); + ANDISVal = + SDValue(CurDAG->getMachineNode(PPC::ANDIS_rec, dl, MVT::i32, Res, + getI32Imm(ANDISMask, dl)), + 0); if (!ANDIVal) Res = ANDISVal; @@ -2182,15 +2188,16 @@ class BitPermutationSelector { SDValue ANDIVal, ANDISVal; if (ANDIMask != 0) - ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDI8o, dl, MVT::i64, + ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDI8_rec, dl, MVT::i64, ExtendToInt64(VRot, dl), getI32Imm(ANDIMask, dl)), 0); if (ANDISMask != 0) - ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDIS8o, dl, MVT::i64, - ExtendToInt64(VRot, dl), - getI32Imm(ANDISMask, dl)), - 0); + ANDISVal = + SDValue(CurDAG->getMachineNode(PPC::ANDIS8_rec, dl, MVT::i64, + ExtendToInt64(VRot, dl), + getI32Imm(ANDISMask, dl)), + 0); if (!ANDIVal) TotalVal = ANDISVal; @@ -2331,11 +2338,16 @@ class BitPermutationSelector { SDValue ANDIVal, ANDISVal; if (ANDIMask != 0) - ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDI8o, dl, MVT::i64, - ExtendToInt64(Res, dl), getI32Imm(ANDIMask, dl)), 0); + ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDI8_rec, dl, MVT::i64, + ExtendToInt64(Res, dl), + getI32Imm(ANDIMask, dl)), + 0); if (ANDISMask != 0) - ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDIS8o, dl, MVT::i64, - ExtendToInt64(Res, dl), getI32Imm(ANDISMask, dl)), 0); + ANDISVal = + SDValue(CurDAG->getMachineNode(PPC::ANDIS8_rec, dl, MVT::i64, + ExtendToInt64(Res, dl), + getI32Imm(ANDISMask, dl)), + 0); if (!ANDIVal) Res = ANDISVal; @@ -2624,8 +2636,9 @@ SDNode *IntegerCompareEliminator::tryLogicOpOfCompares(SDNode *N) { assert((NewOpc != -1 || !IsBitwiseNegate) && "No record form available for AND8/OR8/XOR8?"); WideOp = - SDValue(CurDAG->getMachineNode(NewOpc == -1 ? PPC::ANDI8o : NewOpc, dl, - MVT::i64, MVT::Glue, LHS, RHS), 0); + SDValue(CurDAG->getMachineNode(NewOpc == -1 ? PPC::ANDI8_rec : NewOpc, + dl, MVT::i64, MVT::Glue, LHS, RHS), + 0); } // Select this node to a single bit from CR0 set by the record-form node @@ -4820,24 +4833,24 @@ void PPCDAGToDAGISel::Select(SDNode *N) { break; } // FIXME: Remove this once the ANDI glue bug is fixed: - case PPCISD::ANDIo_1_EQ_BIT: - case PPCISD::ANDIo_1_GT_BIT: { + case PPCISD::ANDI_rec_1_EQ_BIT: + case PPCISD::ANDI_rec_1_GT_BIT: { if (!ANDIGlueBug) break; EVT InVT = N->getOperand(0).getValueType(); assert((InVT == MVT::i64 || InVT == MVT::i32) && - "Invalid input type for ANDIo_1_EQ_BIT"); + "Invalid input type for ANDI_rec_1_EQ_BIT"); - unsigned Opcode = (InVT == MVT::i64) ? PPC::ANDI8o : PPC::ANDIo; + unsigned Opcode = (InVT == MVT::i64) ? PPC::ANDI8_rec : PPC::ANDI_rec; SDValue AndI(CurDAG->getMachineNode(Opcode, dl, InVT, MVT::Glue, N->getOperand(0), CurDAG->getTargetConstant(1, dl, InVT)), 0); SDValue CR0Reg = CurDAG->getRegister(PPC::CR0, MVT::i32); - SDValue SRIdxVal = - CurDAG->getTargetConstant(N->getOpcode() == PPCISD::ANDIo_1_EQ_BIT ? - PPC::sub_eq : PPC::sub_gt, dl, MVT::i32); + SDValue SRIdxVal = CurDAG->getTargetConstant( + N->getOpcode() == PPCISD::ANDI_rec_1_EQ_BIT ? PPC::sub_eq : PPC::sub_gt, + dl, MVT::i32); CurDAG->SelectNodeTo(N, TargetOpcode::EXTRACT_SUBREG, MVT::i1, CR0Reg, SRIdxVal, SDValue(AndI.getNode(), 1) /* glue */); @@ -6222,8 +6235,8 @@ static bool PeepholePPC64ZExtGather(SDValue Op32, // For ANDI and ANDIS, the higher-order bits are zero if either that is true // of the first operand, or if the second operand is positive (so that it is // not sign extended). - if (Op32.getMachineOpcode() == PPC::ANDIo || - Op32.getMachineOpcode() == PPC::ANDISo) { + if (Op32.getMachineOpcode() == PPC::ANDI_rec || + Op32.getMachineOpcode() == PPC::ANDIS_rec) { SmallPtrSet ToPromote1; bool Op0OK = PeepholePPC64ZExtGather(Op32.getOperand(0), ToPromote1); @@ -6345,8 +6358,12 @@ void PPCDAGToDAGISel::PeepholePPC64ZExt() { case PPC::ORI: NewOpcode = PPC::ORI8; break; case PPC::ORIS: NewOpcode = PPC::ORIS8; break; case PPC::AND: NewOpcode = PPC::AND8; break; - case PPC::ANDIo: NewOpcode = PPC::ANDI8o; break; - case PPC::ANDISo: NewOpcode = PPC::ANDIS8o; break; + case PPC::ANDI_rec: + NewOpcode = PPC::ANDI8_rec; + break; + case PPC::ANDIS_rec: + NewOpcode = PPC::ANDIS8_rec; + break; } // Note: During the replacement process, the nodes will be in an diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index cebbd880569d..a82de6cab168 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1394,8 +1394,10 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; - case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; - case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; + case PPCISD::ANDI_rec_1_EQ_BIT: + return "PPCISD::ANDI_rec_1_EQ_BIT"; + case PPCISD::ANDI_rec_1_GT_BIT: + return "PPCISD::ANDI_rec_1_GT_BIT"; case PPCISD::VCMP: return "PPCISD::VCMP"; case PPCISD::VCMPo: return "PPCISD::VCMPo"; case PPCISD::LBRX: return "PPCISD::LBRX"; @@ -7393,8 +7395,7 @@ SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { "Custom lowering only for i1 results"); SDLoc DL(Op); - return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, - Op.getOperand(0)); + return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0)); } SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op, @@ -11663,20 +11664,20 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, // Restore FPSCR value. BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); - } else if (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || - MI.getOpcode() == PPC::ANDIo_1_GT_BIT || - MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || - MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) { - unsigned Opcode = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || - MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) - ? PPC::ANDI8o - : PPC::ANDIo; - bool IsEQ = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || - MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8); + } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT || + MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT || + MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 || + MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) { + unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 || + MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) + ? PPC::ANDI8_rec + : PPC::ANDI_rec; + bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT || + MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8); MachineRegisterInfo &RegInfo = F->getRegInfo(); Register Dest = RegInfo.createVirtualRegister( - Opcode == PPC::ANDIo ? &PPC::GPRCRegClass : &PPC::G8RCRegClass); + Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass); DebugLoc Dl = MI.getDebugLoc(); BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest) diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h index 57db15f3f423..774cc353b91a 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -43,460 +43,475 @@ namespace llvm { // that come before it. For example, ADD or MUL should be placed before // the ISD::FIRST_TARGET_MEMORY_OPCODE while a LOAD or STORE should come // after it. - enum NodeType : unsigned { - // Start the numbering where the builtin ops and target ops leave off. - FIRST_NUMBER = ISD::BUILTIN_OP_END, - - /// FSEL - Traditional three-operand fsel node. - /// - FSEL, - - /// XSMAXCDP, XSMINCDP - C-type min/max instructions. - XSMAXCDP, XSMINCDP, - - /// FCFID - The FCFID instruction, taking an f64 operand and producing - /// and f64 value containing the FP representation of the integer that - /// was temporarily in the f64 operand. - FCFID, - - /// Newer FCFID[US] integer-to-floating-point conversion instructions for - /// unsigned integers and single-precision outputs. - FCFIDU, FCFIDS, FCFIDUS, - - /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 - /// operand, producing an f64 value containing the integer representation - /// of that FP value. - FCTIDZ, FCTIWZ, - - /// Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for - /// unsigned integers with round toward zero. - FCTIDUZ, FCTIWUZ, - - /// Floating-point-to-interger conversion instructions - FP_TO_UINT_IN_VSR, FP_TO_SINT_IN_VSR, - - /// VEXTS, ByteWidth - takes an input in VSFRC and produces an output in - /// VSFRC that is sign-extended from ByteWidth to a 64-byte integer. - VEXTS, - - /// SExtVElems, takes an input vector of a smaller type and sign - /// extends to an output vector of a larger type. - SExtVElems, - - /// Reciprocal estimate instructions (unary FP ops). - FRE, FRSQRTE, - - // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking - // three v4f32 operands and producing a v4f32 result. - VMADDFP, VNMSUBFP, - - /// VPERM - The PPC VPERM Instruction. - /// - VPERM, - - /// XXSPLT - The PPC VSX splat instructions - /// - XXSPLT, - - /// VECINSERT - The PPC vector insert instruction - /// - VECINSERT, - - /// VECSHL - The PPC vector shift left instruction - /// - VECSHL, - - /// XXPERMDI - The PPC XXPERMDI instruction - /// - XXPERMDI, - - /// The CMPB instruction (takes two operands of i32 or i64). - CMPB, - - /// Hi/Lo - These represent the high and low 16-bit parts of a global - /// address respectively. These nodes have two operands, the first of - /// which must be a TargetGlobalAddress, and the second of which must be a - /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C', - /// though these are usually folded into other nodes. - Hi, Lo, - - /// The following two target-specific nodes are used for calls through - /// function pointers in the 64-bit SVR4 ABI. - - /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX) - /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to - /// compute an allocation on the stack. - DYNALLOC, - - /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to - /// compute an offset from native SP to the address of the most recent - /// dynamic alloca. - DYNAREAOFFSET, - - /// GlobalBaseReg - On Darwin, this node represents the result of the mflr - /// at function entry, used for PIC code. - GlobalBaseReg, - - /// These nodes represent PPC shifts. - /// - /// For scalar types, only the last `n + 1` bits of the shift amounts - /// are used, where n is log2(sizeof(element) * 8). See sld/slw, etc. - /// for exact behaviors. - /// - /// For vector types, only the last n bits are used. See vsld. - SRL, SRA, SHL, - - /// EXTSWSLI = The PPC extswsli instruction, which does an extend-sign - /// word and shift left immediate. - EXTSWSLI, - - /// The combination of sra[wd]i and addze used to implemented signed - /// integer division by a power of 2. The first operand is the dividend, - /// and the second is the constant shift amount (representing the - /// divisor). - SRA_ADDZE, - - /// CALL - A direct function call. - /// CALL_NOP is a call with the special NOP which follows 64-bit - /// SVR4 calls and 32-bit/64-bit AIX calls. - CALL, CALL_NOP, - - /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a - /// MTCTR instruction. - MTCTR, - - /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a - /// BCTRL instruction. - BCTRL, - - /// CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl - /// instruction and the TOC reload required on 64-bit ELF, 32-bit AIX - /// and 64-bit AIX. - BCTRL_LOAD_TOC, - - /// Return with a flag operand, matched by 'blr' - RET_FLAG, - - /// R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction. - /// This copies the bits corresponding to the specified CRREG into the - /// resultant GPR. Bits corresponding to other CR regs are undefined. - MFOCRF, - - /// Direct move from a VSX register to a GPR - MFVSR, - - /// Direct move from a GPR to a VSX register (algebraic) - MTVSRA, - - /// Direct move from a GPR to a VSX register (zero) - MTVSRZ, - - /// Direct move of 2 consecutive GPR to a VSX register. - BUILD_FP128, - - /// BUILD_SPE64 and EXTRACT_SPE are analogous to BUILD_PAIR and - /// EXTRACT_ELEMENT but take f64 arguments instead of i64, as i64 is - /// unsupported for this target. - /// Merge 2 GPRs to a single SPE register. - BUILD_SPE64, - - /// Extract SPE register component, second argument is high or low. - EXTRACT_SPE, - - /// Extract a subvector from signed integer vector and convert to FP. - /// It is primarily used to convert a (widened) illegal integer vector - /// type to a legal floating point vector type. - /// For example v2i32 -> widened to v4i32 -> v2f64 - SINT_VEC_TO_FP, - - /// Extract a subvector from unsigned integer vector and convert to FP. - /// As with SINT_VEC_TO_FP, used for converting illegal types. - UINT_VEC_TO_FP, - - // FIXME: Remove these once the ANDI glue bug is fixed: - /// i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the - /// eq or gt bit of CR0 after executing andi. x, 1. This is used to - /// implement truncation of i32 or i64 to i1. - ANDIo_1_EQ_BIT, ANDIo_1_GT_BIT, - - // READ_TIME_BASE - A read of the 64-bit time-base register on a 32-bit - // target (returns (Lo, Hi)). It takes a chain operand. - READ_TIME_BASE, - - // EH_SJLJ_SETJMP - SjLj exception handling setjmp. - EH_SJLJ_SETJMP, - - // EH_SJLJ_LONGJMP - SjLj exception handling longjmp. - EH_SJLJ_LONGJMP, - - /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* - /// instructions. For lack of better number, we use the opcode number - /// encoding for the OPC field to identify the compare. For example, 838 - /// is VCMPGTSH. - VCMP, - - /// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the - /// altivec VCMP*o instructions. For lack of better number, we use the - /// opcode number encoding for the OPC field to identify the compare. For - /// example, 838 is VCMPGTSH. - VCMPo, - - /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This - /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the - /// condition register to branch on, OPC is the branch opcode to use (e.g. - /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is - /// an optional input flag argument. - COND_BRANCH, - - /// CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based - /// loops. - BDNZ, BDZ, - - /// F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding - /// towards zero. Used only as part of the long double-to-int - /// conversion sequence. - FADDRTZ, - - /// F8RC = MFFS - This moves the FPSCR (not modeled) into the register. - MFFS, - - /// TC_RETURN - A tail call return. - /// operand #0 chain - /// operand #1 callee (register or absolute) - /// operand #2 stack adjustment - /// operand #3 optional in flag - TC_RETURN, - - /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls - CR6SET, - CR6UNSET, - - /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by initial-exec TLS - /// for non-position independent code on PPC32. - PPC32_GOT, - - /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and - /// local dynamic TLS and position indendepent code on PPC32. - PPC32_PICGOT, - - /// G8RC = ADDIS_GOT_TPREL_HA %x2, Symbol - Used by the initial-exec - /// TLS model, produces an ADDIS8 instruction that adds the GOT - /// base to sym\@got\@tprel\@ha. - ADDIS_GOT_TPREL_HA, - - /// G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec - /// TLS model, produces a LD instruction with base register G8RReg - /// and offset sym\@got\@tprel\@l. This completes the addition that - /// finds the offset of "sym" relative to the thread pointer. - LD_GOT_TPREL_L, - - /// G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS - /// model, produces an ADD instruction that adds the contents of - /// G8RReg to the thread pointer. Symbol contains a relocation - /// sym\@tls which is to be replaced by the thread pointer and - /// identifies to the linker that the instruction is part of a - /// TLS sequence. - ADD_TLS, - - /// G8RC = ADDIS_TLSGD_HA %x2, Symbol - For the general-dynamic TLS - /// model, produces an ADDIS8 instruction that adds the GOT base - /// register to sym\@got\@tlsgd\@ha. - ADDIS_TLSGD_HA, - - /// %x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS - /// model, produces an ADDI8 instruction that adds G8RReg to - /// sym\@got\@tlsgd\@l and stores the result in X3. Hidden by - /// ADDIS_TLSGD_L_ADDR until after register assignment. - ADDI_TLSGD_L, - - /// %x3 = GET_TLS_ADDR %x3, Symbol - For the general-dynamic TLS - /// model, produces a call to __tls_get_addr(sym\@tlsgd). Hidden by - /// ADDIS_TLSGD_L_ADDR until after register assignment. - GET_TLS_ADDR, - - /// G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that - /// combines ADDI_TLSGD_L and GET_TLS_ADDR until expansion following - /// register assignment. - ADDI_TLSGD_L_ADDR, - - /// G8RC = ADDIS_TLSLD_HA %x2, Symbol - For the local-dynamic TLS - /// model, produces an ADDIS8 instruction that adds the GOT base - /// register to sym\@got\@tlsld\@ha. - ADDIS_TLSLD_HA, - - /// %x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS - /// model, produces an ADDI8 instruction that adds G8RReg to - /// sym\@got\@tlsld\@l and stores the result in X3. Hidden by - /// ADDIS_TLSLD_L_ADDR until after register assignment. - ADDI_TLSLD_L, - - /// %x3 = GET_TLSLD_ADDR %x3, Symbol - For the local-dynamic TLS - /// model, produces a call to __tls_get_addr(sym\@tlsld). Hidden by - /// ADDIS_TLSLD_L_ADDR until after register assignment. - GET_TLSLD_ADDR, - - /// G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that - /// combines ADDI_TLSLD_L and GET_TLSLD_ADDR until expansion - /// following register assignment. - ADDI_TLSLD_L_ADDR, - - /// G8RC = ADDIS_DTPREL_HA %x3, Symbol - For the local-dynamic TLS - /// model, produces an ADDIS8 instruction that adds X3 to - /// sym\@dtprel\@ha. - ADDIS_DTPREL_HA, - - /// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS - /// model, produces an ADDI8 instruction that adds G8RReg to - /// sym\@got\@dtprel\@l. - ADDI_DTPREL_L, - - /// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded - /// during instruction selection to optimize a BUILD_VECTOR into - /// operations on splats. This is necessary to avoid losing these - /// optimizations due to constant folding. - VADD_SPLAT, - - /// CHAIN = SC CHAIN, Imm128 - System call. The 7-bit unsigned - /// operand identifies the operating system entry point. - SC, - - /// CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer. - CLRBHRB, - - /// GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch - /// history rolling buffer entry. - MFBHRBE, - - /// CHAIN = RFEBB CHAIN, State - Return from event-based branch. - RFEBB, - - /// VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little - /// endian. Maps to an xxswapd instruction that corrects an lxvd2x - /// or stxvd2x instruction. The chain is necessary because the - /// sequence replaces a load and needs to provide the same number - /// of outputs. - XXSWAPD, - - /// An SDNode for swaps that are not associated with any loads/stores - /// and thereby have no chain. - SWAP_NO_CHAIN, - - /// An SDNode for Power9 vector absolute value difference. - /// operand #0 vector - /// operand #1 vector - /// operand #2 constant i32 0 or 1, to indicate whether needs to patch - /// the most significant bit for signed i32 - /// - /// Power9 VABSD* instructions are designed to support unsigned integer - /// vectors (byte/halfword/word), if we want to make use of them for signed - /// integer vectors, we have to flip their sign bits first. To flip sign bit - /// for byte/halfword integer vector would become inefficient, but for word - /// integer vector, we can leverage XVNEGSP to make it efficiently. eg: - /// abs(sub(a,b)) => VABSDUW(a+0x80000000, b+0x80000000) - /// => VABSDUW((XVNEGSP a), (XVNEGSP b)) - VABSD, - - /// QVFPERM = This corresponds to the QPX qvfperm instruction. - QVFPERM, - - /// QVGPCI = This corresponds to the QPX qvgpci instruction. - QVGPCI, - - /// QVALIGNI = This corresponds to the QPX qvaligni instruction. - QVALIGNI, - - /// QVESPLATI = This corresponds to the QPX qvesplati instruction. - QVESPLATI, - - /// QBFLT = Access the underlying QPX floating-point boolean - /// representation. - QBFLT, - - /// FP_EXTEND_HALF(VECTOR, IDX) - Custom extend upper (IDX=0) half or - /// lower (IDX=1) half of v4f32 to v2f64. - FP_EXTEND_HALF, - - /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a - /// byte-swapping store instruction. It byte-swaps the low "Type" bits of - /// the GPRC input, then stores it through Ptr. Type can be either i16 or - /// i32. - STBRX = ISD::FIRST_TARGET_MEMORY_OPCODE, - - /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a - /// byte-swapping load instruction. It loads "Type" bits, byte swaps it, - /// then puts it in the bottom bits of the GPRC. TYPE can be either i16 - /// or i32. - LBRX, - - /// STFIWX - The STFIWX instruction. The first operand is an input token - /// chain, then an f64 value to store, then an address to store it to. - STFIWX, - - /// GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point - /// load which sign-extends from a 32-bit integer value into the - /// destination 64-bit register. - LFIWAX, - - /// GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point - /// load which zero-extends from a 32-bit integer value into the - /// destination 64-bit register. - LFIWZX, - - /// GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an - /// integer smaller than 64 bits into a VSR. The integer is zero-extended. - /// This can be used for converting loaded integers to floating point. - LXSIZX, - - /// STXSIX - The STXSI[bh]X instruction. The first operand is an input - /// chain, then an f64 value to store, then an address to store it to, - /// followed by a byte-width for the store. - STXSIX, - - /// VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian. - /// Maps directly to an lxvd2x instruction that will be followed by - /// an xxswapd. - LXVD2X, - - /// VSRC, CHAIN = LOAD_VEC_BE CHAIN, Ptr - Occurs only for little endian. - /// Maps directly to one of lxvd2x/lxvw4x/lxvh8x/lxvb16x depending on - /// the vector type to load vector in big-endian element order. - LOAD_VEC_BE, - - /// VSRC, CHAIN = LD_VSX_LH CHAIN, Ptr - This is a floating-point load of a - /// v2f32 value into the lower half of a VSR register. - LD_VSX_LH, - - /// VSRC, CHAIN = LD_SPLAT, CHAIN, Ptr - a splatting load memory - /// instructions such as LXVDSX, LXVWSX. - LD_SPLAT, - - /// CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian. - /// Maps directly to an stxvd2x instruction that will be preceded by - /// an xxswapd. - STXVD2X, - - /// CHAIN = STORE_VEC_BE CHAIN, VSRC, Ptr - Occurs only for little endian. - /// Maps directly to one of stxvd2x/stxvw4x/stxvh8x/stxvb16x depending on - /// the vector type to store vector in big-endian element order. - STORE_VEC_BE, - - /// Store scalar integers from VSR. - ST_VSR_SCAL_INT, - - /// QBRC, CHAIN = QVLFSb CHAIN, Ptr - /// The 4xf32 load used for v4i1 constants. - QVLFSb, - - /// ATOMIC_CMP_SWAP - the exact same as the target-independent nodes - /// except they ensure that the compare input is zero-extended for - /// sub-word versions because the atomic loads zero-extend. - ATOMIC_CMP_SWAP_8, ATOMIC_CMP_SWAP_16, - - /// GPRC = TOC_ENTRY GA, TOC - /// Loads the entry for GA from the TOC, where the TOC base is given by - /// the last operand. - TOC_ENTRY - }; + enum NodeType : unsigned { + // Start the numbering where the builtin ops and target ops leave off. + FIRST_NUMBER = ISD::BUILTIN_OP_END, + + /// FSEL - Traditional three-operand fsel node. + /// + FSEL, + + /// XSMAXCDP, XSMINCDP - C-type min/max instructions. + XSMAXCDP, + XSMINCDP, + + /// FCFID - The FCFID instruction, taking an f64 operand and producing + /// and f64 value containing the FP representation of the integer that + /// was temporarily in the f64 operand. + FCFID, + + /// Newer FCFID[US] integer-to-floating-point conversion instructions for + /// unsigned integers and single-precision outputs. + FCFIDU, + FCFIDS, + FCFIDUS, + + /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 + /// operand, producing an f64 value containing the integer representation + /// of that FP value. + FCTIDZ, + FCTIWZ, + + /// Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for + /// unsigned integers with round toward zero. + FCTIDUZ, + FCTIWUZ, + + /// Floating-point-to-interger conversion instructions + FP_TO_UINT_IN_VSR, + FP_TO_SINT_IN_VSR, + + /// VEXTS, ByteWidth - takes an input in VSFRC and produces an output in + /// VSFRC that is sign-extended from ByteWidth to a 64-byte integer. + VEXTS, + + /// SExtVElems, takes an input vector of a smaller type and sign + /// extends to an output vector of a larger type. + SExtVElems, + + /// Reciprocal estimate instructions (unary FP ops). + FRE, + FRSQRTE, + + // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking + // three v4f32 operands and producing a v4f32 result. + VMADDFP, + VNMSUBFP, + + /// VPERM - The PPC VPERM Instruction. + /// + VPERM, + + /// XXSPLT - The PPC VSX splat instructions + /// + XXSPLT, + + /// VECINSERT - The PPC vector insert instruction + /// + VECINSERT, + + /// VECSHL - The PPC vector shift left instruction + /// + VECSHL, + + /// XXPERMDI - The PPC XXPERMDI instruction + /// + XXPERMDI, + + /// The CMPB instruction (takes two operands of i32 or i64). + CMPB, + + /// Hi/Lo - These represent the high and low 16-bit parts of a global + /// address respectively. These nodes have two operands, the first of + /// which must be a TargetGlobalAddress, and the second of which must be a + /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C', + /// though these are usually folded into other nodes. + Hi, + Lo, + + /// The following two target-specific nodes are used for calls through + /// function pointers in the 64-bit SVR4 ABI. + + /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX) + /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to + /// compute an allocation on the stack. + DYNALLOC, + + /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to + /// compute an offset from native SP to the address of the most recent + /// dynamic alloca. + DYNAREAOFFSET, + + /// GlobalBaseReg - On Darwin, this node represents the result of the mflr + /// at function entry, used for PIC code. + GlobalBaseReg, + + /// These nodes represent PPC shifts. + /// + /// For scalar types, only the last `n + 1` bits of the shift amounts + /// are used, where n is log2(sizeof(element) * 8). See sld/slw, etc. + /// for exact behaviors. + /// + /// For vector types, only the last n bits are used. See vsld. + SRL, + SRA, + SHL, + + /// EXTSWSLI = The PPC extswsli instruction, which does an extend-sign + /// word and shift left immediate. + EXTSWSLI, + + /// The combination of sra[wd]i and addze used to implemented signed + /// integer division by a power of 2. The first operand is the dividend, + /// and the second is the constant shift amount (representing the + /// divisor). + SRA_ADDZE, + + /// CALL - A direct function call. + /// CALL_NOP is a call with the special NOP which follows 64-bit + /// SVR4 calls and 32-bit/64-bit AIX calls. + CALL, + CALL_NOP, + + /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a + /// MTCTR instruction. + MTCTR, + + /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a + /// BCTRL instruction. + BCTRL, + + /// CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl + /// instruction and the TOC reload required on 64-bit ELF, 32-bit AIX + /// and 64-bit AIX. + BCTRL_LOAD_TOC, + + /// Return with a flag operand, matched by 'blr' + RET_FLAG, + + /// R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction. + /// This copies the bits corresponding to the specified CRREG into the + /// resultant GPR. Bits corresponding to other CR regs are undefined. + MFOCRF, + + /// Direct move from a VSX register to a GPR + MFVSR, + + /// Direct move from a GPR to a VSX register (algebraic) + MTVSRA, + + /// Direct move from a GPR to a VSX register (zero) + MTVSRZ, + + /// Direct move of 2 consecutive GPR to a VSX register. + BUILD_FP128, + + /// BUILD_SPE64 and EXTRACT_SPE are analogous to BUILD_PAIR and + /// EXTRACT_ELEMENT but take f64 arguments instead of i64, as i64 is + /// unsupported for this target. + /// Merge 2 GPRs to a single SPE register. + BUILD_SPE64, + + /// Extract SPE register component, second argument is high or low. + EXTRACT_SPE, + + /// Extract a subvector from signed integer vector and convert to FP. + /// It is primarily used to convert a (widened) illegal integer vector + /// type to a legal floating point vector type. + /// For example v2i32 -> widened to v4i32 -> v2f64 + SINT_VEC_TO_FP, + + /// Extract a subvector from unsigned integer vector and convert to FP. + /// As with SINT_VEC_TO_FP, used for converting illegal types. + UINT_VEC_TO_FP, + + // FIXME: Remove these once the ANDI glue bug is fixed: + /// i1 = ANDI_rec_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the + /// eq or gt bit of CR0 after executing andi. x, 1. This is used to + /// implement truncation of i32 or i64 to i1. + ANDI_rec_1_EQ_BIT, + ANDI_rec_1_GT_BIT, + + // READ_TIME_BASE - A read of the 64-bit time-base register on a 32-bit + // target (returns (Lo, Hi)). It takes a chain operand. + READ_TIME_BASE, + + // EH_SJLJ_SETJMP - SjLj exception handling setjmp. + EH_SJLJ_SETJMP, + + // EH_SJLJ_LONGJMP - SjLj exception handling longjmp. + EH_SJLJ_LONGJMP, + + /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* + /// instructions. For lack of better number, we use the opcode number + /// encoding for the OPC field to identify the compare. For example, 838 + /// is VCMPGTSH. + VCMP, + + /// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the + /// altivec VCMP*o instructions. For lack of better number, we use the + /// opcode number encoding for the OPC field to identify the compare. For + /// example, 838 is VCMPGTSH. + VCMPo, + + /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This + /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the + /// condition register to branch on, OPC is the branch opcode to use (e.g. + /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is + /// an optional input flag argument. + COND_BRANCH, + + /// CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based + /// loops. + BDNZ, + BDZ, + + /// F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding + /// towards zero. Used only as part of the long double-to-int + /// conversion sequence. + FADDRTZ, + + /// F8RC = MFFS - This moves the FPSCR (not modeled) into the register. + MFFS, + + /// TC_RETURN - A tail call return. + /// operand #0 chain + /// operand #1 callee (register or absolute) + /// operand #2 stack adjustment + /// operand #3 optional in flag + TC_RETURN, + + /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls + CR6SET, + CR6UNSET, + + /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by initial-exec TLS + /// for non-position independent code on PPC32. + PPC32_GOT, + + /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and + /// local dynamic TLS and position indendepent code on PPC32. + PPC32_PICGOT, + + /// G8RC = ADDIS_GOT_TPREL_HA %x2, Symbol - Used by the initial-exec + /// TLS model, produces an ADDIS8 instruction that adds the GOT + /// base to sym\@got\@tprel\@ha. + ADDIS_GOT_TPREL_HA, + + /// G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec + /// TLS model, produces a LD instruction with base register G8RReg + /// and offset sym\@got\@tprel\@l. This completes the addition that + /// finds the offset of "sym" relative to the thread pointer. + LD_GOT_TPREL_L, + + /// G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS + /// model, produces an ADD instruction that adds the contents of + /// G8RReg to the thread pointer. Symbol contains a relocation + /// sym\@tls which is to be replaced by the thread pointer and + /// identifies to the linker that the instruction is part of a + /// TLS sequence. + ADD_TLS, + + /// G8RC = ADDIS_TLSGD_HA %x2, Symbol - For the general-dynamic TLS + /// model, produces an ADDIS8 instruction that adds the GOT base + /// register to sym\@got\@tlsgd\@ha. + ADDIS_TLSGD_HA, + + /// %x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS + /// model, produces an ADDI8 instruction that adds G8RReg to + /// sym\@got\@tlsgd\@l and stores the result in X3. Hidden by + /// ADDIS_TLSGD_L_ADDR until after register assignment. + ADDI_TLSGD_L, + + /// %x3 = GET_TLS_ADDR %x3, Symbol - For the general-dynamic TLS + /// model, produces a call to __tls_get_addr(sym\@tlsgd). Hidden by + /// ADDIS_TLSGD_L_ADDR until after register assignment. + GET_TLS_ADDR, + + /// G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that + /// combines ADDI_TLSGD_L and GET_TLS_ADDR until expansion following + /// register assignment. + ADDI_TLSGD_L_ADDR, + + /// G8RC = ADDIS_TLSLD_HA %x2, Symbol - For the local-dynamic TLS + /// model, produces an ADDIS8 instruction that adds the GOT base + /// register to sym\@got\@tlsld\@ha. + ADDIS_TLSLD_HA, + + /// %x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS + /// model, produces an ADDI8 instruction that adds G8RReg to + /// sym\@got\@tlsld\@l and stores the result in X3. Hidden by + /// ADDIS_TLSLD_L_ADDR until after register assignment. + ADDI_TLSLD_L, + + /// %x3 = GET_TLSLD_ADDR %x3, Symbol - For the local-dynamic TLS + /// model, produces a call to __tls_get_addr(sym\@tlsld). Hidden by + /// ADDIS_TLSLD_L_ADDR until after register assignment. + GET_TLSLD_ADDR, + + /// G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that + /// combines ADDI_TLSLD_L and GET_TLSLD_ADDR until expansion + /// following register assignment. + ADDI_TLSLD_L_ADDR, + + /// G8RC = ADDIS_DTPREL_HA %x3, Symbol - For the local-dynamic TLS + /// model, produces an ADDIS8 instruction that adds X3 to + /// sym\@dtprel\@ha. + ADDIS_DTPREL_HA, + + /// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS + /// model, produces an ADDI8 instruction that adds G8RReg to + /// sym\@got\@dtprel\@l. + ADDI_DTPREL_L, + + /// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded + /// during instruction selection to optimize a BUILD_VECTOR into + /// operations on splats. This is necessary to avoid losing these + /// optimizations due to constant folding. + VADD_SPLAT, + + /// CHAIN = SC CHAIN, Imm128 - System call. The 7-bit unsigned + /// operand identifies the operating system entry point. + SC, + + /// CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer. + CLRBHRB, + + /// GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch + /// history rolling buffer entry. + MFBHRBE, + + /// CHAIN = RFEBB CHAIN, State - Return from event-based branch. + RFEBB, + + /// VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little + /// endian. Maps to an xxswapd instruction that corrects an lxvd2x + /// or stxvd2x instruction. The chain is necessary because the + /// sequence replaces a load and needs to provide the same number + /// of outputs. + XXSWAPD, + + /// An SDNode for swaps that are not associated with any loads/stores + /// and thereby have no chain. + SWAP_NO_CHAIN, + + /// An SDNode for Power9 vector absolute value difference. + /// operand #0 vector + /// operand #1 vector + /// operand #2 constant i32 0 or 1, to indicate whether needs to patch + /// the most significant bit for signed i32 + /// + /// Power9 VABSD* instructions are designed to support unsigned integer + /// vectors (byte/halfword/word), if we want to make use of them for signed + /// integer vectors, we have to flip their sign bits first. To flip sign bit + /// for byte/halfword integer vector would become inefficient, but for word + /// integer vector, we can leverage XVNEGSP to make it efficiently. eg: + /// abs(sub(a,b)) => VABSDUW(a+0x80000000, b+0x80000000) + /// => VABSDUW((XVNEGSP a), (XVNEGSP b)) + VABSD, + + /// QVFPERM = This corresponds to the QPX qvfperm instruction. + QVFPERM, + + /// QVGPCI = This corresponds to the QPX qvgpci instruction. + QVGPCI, + + /// QVALIGNI = This corresponds to the QPX qvaligni instruction. + QVALIGNI, + + /// QVESPLATI = This corresponds to the QPX qvesplati instruction. + QVESPLATI, + + /// QBFLT = Access the underlying QPX floating-point boolean + /// representation. + QBFLT, + + /// FP_EXTEND_HALF(VECTOR, IDX) - Custom extend upper (IDX=0) half or + /// lower (IDX=1) half of v4f32 to v2f64. + FP_EXTEND_HALF, + + /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a + /// byte-swapping store instruction. It byte-swaps the low "Type" bits of + /// the GPRC input, then stores it through Ptr. Type can be either i16 or + /// i32. + STBRX = ISD::FIRST_TARGET_MEMORY_OPCODE, + + /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a + /// byte-swapping load instruction. It loads "Type" bits, byte swaps it, + /// then puts it in the bottom bits of the GPRC. TYPE can be either i16 + /// or i32. + LBRX, + + /// STFIWX - The STFIWX instruction. The first operand is an input token + /// chain, then an f64 value to store, then an address to store it to. + STFIWX, + + /// GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point + /// load which sign-extends from a 32-bit integer value into the + /// destination 64-bit register. + LFIWAX, + + /// GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point + /// load which zero-extends from a 32-bit integer value into the + /// destination 64-bit register. + LFIWZX, + + /// GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an + /// integer smaller than 64 bits into a VSR. The integer is zero-extended. + /// This can be used for converting loaded integers to floating point. + LXSIZX, + + /// STXSIX - The STXSI[bh]X instruction. The first operand is an input + /// chain, then an f64 value to store, then an address to store it to, + /// followed by a byte-width for the store. + STXSIX, + + /// VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian. + /// Maps directly to an lxvd2x instruction that will be followed by + /// an xxswapd. + LXVD2X, + + /// VSRC, CHAIN = LOAD_VEC_BE CHAIN, Ptr - Occurs only for little endian. + /// Maps directly to one of lxvd2x/lxvw4x/lxvh8x/lxvb16x depending on + /// the vector type to load vector in big-endian element order. + LOAD_VEC_BE, + + /// VSRC, CHAIN = LD_VSX_LH CHAIN, Ptr - This is a floating-point load of a + /// v2f32 value into the lower half of a VSR register. + LD_VSX_LH, + + /// VSRC, CHAIN = LD_SPLAT, CHAIN, Ptr - a splatting load memory + /// instructions such as LXVDSX, LXVWSX. + LD_SPLAT, + + /// CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian. + /// Maps directly to an stxvd2x instruction that will be preceded by + /// an xxswapd. + STXVD2X, + + /// CHAIN = STORE_VEC_BE CHAIN, VSRC, Ptr - Occurs only for little endian. + /// Maps directly to one of stxvd2x/stxvw4x/stxvh8x/stxvb16x depending on + /// the vector type to store vector in big-endian element order. + STORE_VEC_BE, + + /// Store scalar integers from VSR. + ST_VSR_SCAL_INT, + + /// QBRC, CHAIN = QVLFSb CHAIN, Ptr + /// The 4xf32 load used for v4i1 constants. + QVLFSb, + + /// ATOMIC_CMP_SWAP - the exact same as the target-independent nodes + /// except they ensure that the compare input is zero-extended for + /// sub-word versions because the atomic loads zero-extend. + ATOMIC_CMP_SWAP_8, + ATOMIC_CMP_SWAP_16, + + /// GPRC = TOC_ENTRY GA, TOC + /// Loads the entry for GA from the TOC, where the TOC base is given by + /// the last operand. + TOC_ENTRY + }; } // end namespace PPCISD diff --git a/llvm/lib/Target/PowerPC/PPCInstr64Bit.td b/llvm/lib/Target/PowerPC/PPCInstr64Bit.td index b8cb7b1fe7f7..6dedb874362d 100644 --- a/llvm/lib/Target/PowerPC/PPCInstr64Bit.td +++ b/llvm/lib/Target/PowerPC/PPCInstr64Bit.td @@ -253,7 +253,7 @@ def LDARX : XForm_1_memOp<31, 84, (outs g8rc:$rD), (ins memrr:$ptr), // Instruction to support lock versions of atomics // (EH=1 - see Power ISA 2.07 Book II 4.4.2) def LDARXL : XForm_1<31, 84, (outs g8rc:$rD), (ins memrr:$ptr), - "ldarx $rD, $ptr, 1", IIC_LdStLDARX, []>, isDOT; + "ldarx $rD, $ptr, 1", IIC_LdStLDARX, []>, isRecordForm; let hasExtraDefRegAllocReq = 1 in def LDAT : X_RD5_RS5_IM5<31, 614, (outs g8rc:$rD), (ins g8rc:$rA, u5imm:$FC), @@ -263,7 +263,7 @@ def LDAT : X_RD5_RS5_IM5<31, 614, (outs g8rc:$rD), (ins g8rc:$rA, u5imm:$FC), let Defs = [CR0], mayStore = 1, mayLoad = 0, hasSideEffects = 0 in def STDCX : XForm_1_memOp<31, 214, (outs), (ins g8rc:$rS, memrr:$dst), - "stdcx. $rS, $dst", IIC_LdStSTDCX, []>, isDOT; + "stdcx. $rS, $dst", IIC_LdStSTDCX, []>, isRecordForm; let mayStore = 1, mayLoad = 0, hasSideEffects = 0 in def STDAT : X_RD5_RS5_IM5<31, 742, (outs), (ins g8rc:$rS, g8rc:$rA, u5imm:$FC), @@ -476,14 +476,14 @@ defm XOR8 : XForm_6r<31, 316, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB), // Logical ops with immediate. let Defs = [CR0] in { -def ANDI8o : DForm_4<28, (outs g8rc:$dst), (ins g8rc:$src1, u16imm64:$src2), +def ANDI8_rec : DForm_4<28, (outs g8rc:$dst), (ins g8rc:$src1, u16imm64:$src2), "andi. $dst, $src1, $src2", IIC_IntGeneral, [(set i64:$dst, (and i64:$src1, immZExt16:$src2))]>, - isDOT; -def ANDIS8o : DForm_4<29, (outs g8rc:$dst), (ins g8rc:$src1, u16imm64:$src2), + isRecordForm; +def ANDIS8_rec : DForm_4<29, (outs g8rc:$dst), (ins g8rc:$src1, u16imm64:$src2), "andis. $dst, $src1, $src2", IIC_IntGeneral, [(set i64:$dst, (and i64:$src1, imm16ShiftedZExt:$src2))]>, - isDOT; + isRecordForm; } def ORI8 : DForm_4<24, (outs g8rc:$dst), (ins g8rc:$src1, u16imm64:$src2), "ori $dst, $src1, $src2", IIC_IntSimple, @@ -1461,7 +1461,7 @@ class X_L1_RA5_RB5 opcode, bits<10> xo, string opc, RegisterOperand ty, let Interpretation64Bit = 1, isCodeGenOnly = 1 in { def CP_COPY8 : X_L1_RA5_RB5<31, 774, "copy" , g8rc, IIC_LdStCOPY, []>; def CP_PASTE8 : X_L1_RA5_RB5<31, 902, "paste" , g8rc, IIC_LdStPASTE, []>; -def CP_PASTE8o : X_L1_RA5_RB5<31, 902, "paste.", g8rc, IIC_LdStPASTE, []>,isDOT; +def CP_PASTE8_rec : X_L1_RA5_RB5<31, 902, "paste.", g8rc, IIC_LdStPASTE, []>,isRecordForm; } // SLB Invalidate Entry Global diff --git a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td index 1cdb49261010..f94816a35f79 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td +++ b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td @@ -794,37 +794,37 @@ class VCMPo xo, string asmstr, ValueType Ty> // f32 element comparisons.0 def VCMPBFP : VCMP <966, "vcmpbfp $vD, $vA, $vB" , v4f32>; -def VCMPBFPo : VCMPo<966, "vcmpbfp. $vD, $vA, $vB" , v4f32>; +def VCMPBFP_rec : VCMPo<966, "vcmpbfp. $vD, $vA, $vB" , v4f32>; def VCMPEQFP : VCMP <198, "vcmpeqfp $vD, $vA, $vB" , v4f32>; -def VCMPEQFPo : VCMPo<198, "vcmpeqfp. $vD, $vA, $vB", v4f32>; +def VCMPEQFP_rec : VCMPo<198, "vcmpeqfp. $vD, $vA, $vB", v4f32>; def VCMPGEFP : VCMP <454, "vcmpgefp $vD, $vA, $vB" , v4f32>; -def VCMPGEFPo : VCMPo<454, "vcmpgefp. $vD, $vA, $vB", v4f32>; +def VCMPGEFP_rec : VCMPo<454, "vcmpgefp. $vD, $vA, $vB", v4f32>; def VCMPGTFP : VCMP <710, "vcmpgtfp $vD, $vA, $vB" , v4f32>; -def VCMPGTFPo : VCMPo<710, "vcmpgtfp. $vD, $vA, $vB", v4f32>; +def VCMPGTFP_rec : VCMPo<710, "vcmpgtfp. $vD, $vA, $vB", v4f32>; // i8 element comparisons. def VCMPEQUB : VCMP < 6, "vcmpequb $vD, $vA, $vB" , v16i8>; -def VCMPEQUBo : VCMPo< 6, "vcmpequb. $vD, $vA, $vB", v16i8>; +def VCMPEQUB_rec : VCMPo< 6, "vcmpequb. $vD, $vA, $vB", v16i8>; def VCMPGTSB : VCMP <774, "vcmpgtsb $vD, $vA, $vB" , v16i8>; -def VCMPGTSBo : VCMPo<774, "vcmpgtsb. $vD, $vA, $vB", v16i8>; +def VCMPGTSB_rec : VCMPo<774, "vcmpgtsb. $vD, $vA, $vB", v16i8>; def VCMPGTUB : VCMP <518, "vcmpgtub $vD, $vA, $vB" , v16i8>; -def VCMPGTUBo : VCMPo<518, "vcmpgtub. $vD, $vA, $vB", v16i8>; +def VCMPGTUB_rec : VCMPo<518, "vcmpgtub. $vD, $vA, $vB", v16i8>; // i16 element comparisons. def VCMPEQUH : VCMP < 70, "vcmpequh $vD, $vA, $vB" , v8i16>; -def VCMPEQUHo : VCMPo< 70, "vcmpequh. $vD, $vA, $vB", v8i16>; +def VCMPEQUH_rec : VCMPo< 70, "vcmpequh. $vD, $vA, $vB", v8i16>; def VCMPGTSH : VCMP <838, "vcmpgtsh $vD, $vA, $vB" , v8i16>; -def VCMPGTSHo : VCMPo<838, "vcmpgtsh. $vD, $vA, $vB", v8i16>; +def VCMPGTSH_rec : VCMPo<838, "vcmpgtsh. $vD, $vA, $vB", v8i16>; def VCMPGTUH : VCMP <582, "vcmpgtuh $vD, $vA, $vB" , v8i16>; -def VCMPGTUHo : VCMPo<582, "vcmpgtuh. $vD, $vA, $vB", v8i16>; +def VCMPGTUH_rec : VCMPo<582, "vcmpgtuh. $vD, $vA, $vB", v8i16>; // i32 element comparisons. def VCMPEQUW : VCMP <134, "vcmpequw $vD, $vA, $vB" , v4i32>; -def VCMPEQUWo : VCMPo<134, "vcmpequw. $vD, $vA, $vB", v4i32>; +def VCMPEQUW_rec : VCMPo<134, "vcmpequw. $vD, $vA, $vB", v4i32>; def VCMPGTSW : VCMP <902, "vcmpgtsw $vD, $vA, $vB" , v4i32>; -def VCMPGTSWo : VCMPo<902, "vcmpgtsw. $vD, $vA, $vB", v4i32>; +def VCMPGTSW_rec : VCMPo<902, "vcmpgtsw. $vD, $vA, $vB", v4i32>; def VCMPGTUW : VCMP <646, "vcmpgtuw $vD, $vA, $vB" , v4i32>; -def VCMPGTUWo : VCMPo<646, "vcmpgtuw. $vD, $vA, $vB", v4i32>; +def VCMPGTUW_rec : VCMPo<646, "vcmpgtuw. $vD, $vA, $vB", v4i32>; let isCodeGenOnly = 1, isMoveImm = 1, isAsCheapAsAMove = 1, isReMaterializable = 1 in { @@ -1276,11 +1276,11 @@ def VORC : VXForm_1<1348, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), // i64 element comparisons. def VCMPEQUD : VCMP <199, "vcmpequd $vD, $vA, $vB" , v2i64>; -def VCMPEQUDo : VCMPo<199, "vcmpequd. $vD, $vA, $vB", v2i64>; +def VCMPEQUD_rec : VCMPo<199, "vcmpequd. $vD, $vA, $vB", v2i64>; def VCMPGTSD : VCMP <967, "vcmpgtsd $vD, $vA, $vB" , v2i64>; -def VCMPGTSDo : VCMPo<967, "vcmpgtsd. $vD, $vA, $vB", v2i64>; +def VCMPGTSD_rec : VCMPo<967, "vcmpgtsd. $vD, $vA, $vB", v2i64>; def VCMPGTUD : VCMP <711, "vcmpgtud $vD, $vA, $vB" , v2i64>; -def VCMPGTUDo : VCMPo<711, "vcmpgtud. $vD, $vA, $vB", v2i64>; +def VCMPGTUD_rec : VCMPo<711, "vcmpgtud. $vD, $vA, $vB", v2i64>; // The cryptography instructions that do not require Category:Vector.Crypto def VPMSUMB : VX1_Int_Ty<1032, "vpmsumb", @@ -1344,21 +1344,21 @@ let Predicates = [HasP9Altivec] in { // i8 element comparisons. def VCMPNEB : VCMP < 7, "vcmpneb $vD, $vA, $vB" , v16i8>; -def VCMPNEBo : VCMPo < 7, "vcmpneb. $vD, $vA, $vB" , v16i8>; +def VCMPNEB_rec : VCMPo < 7, "vcmpneb. $vD, $vA, $vB" , v16i8>; def VCMPNEZB : VCMP <263, "vcmpnezb $vD, $vA, $vB" , v16i8>; -def VCMPNEZBo : VCMPo<263, "vcmpnezb. $vD, $vA, $vB", v16i8>; +def VCMPNEZB_rec : VCMPo<263, "vcmpnezb. $vD, $vA, $vB", v16i8>; // i16 element comparisons. def VCMPNEH : VCMP < 71, "vcmpneh $vD, $vA, $vB" , v8i16>; -def VCMPNEHo : VCMPo< 71, "vcmpneh. $vD, $vA, $vB" , v8i16>; +def VCMPNEH_rec : VCMPo< 71, "vcmpneh. $vD, $vA, $vB" , v8i16>; def VCMPNEZH : VCMP <327, "vcmpnezh $vD, $vA, $vB" , v8i16>; -def VCMPNEZHo : VCMPo<327, "vcmpnezh. $vD, $vA, $vB", v8i16>; +def VCMPNEZH_rec : VCMPo<327, "vcmpnezh. $vD, $vA, $vB", v8i16>; // i32 element comparisons. def VCMPNEW : VCMP <135, "vcmpnew $vD, $vA, $vB" , v4i32>; -def VCMPNEWo : VCMPo<135, "vcmpnew. $vD, $vA, $vB" , v4i32>; +def VCMPNEW_rec : VCMPo<135, "vcmpnew. $vD, $vA, $vB" , v4i32>; def VCMPNEZW : VCMP <391, "vcmpnezw $vD, $vA, $vB" , v4i32>; -def VCMPNEZWo : VCMPo<391, "vcmpnezw. $vD, $vA, $vB", v4i32>; +def VCMPNEZW_rec : VCMPo<391, "vcmpnezw. $vD, $vA, $vB", v4i32>; // VX-Form: [PO VRT / UIM VRB XO]. // We use VXForm_1 to implement it, that is, we use "VRA" (5 bit) to represent @@ -1535,18 +1535,18 @@ class VX_VT5_EO5_VB5_XO9_o eo, bits<9> xo, string opc, } // Decimal Convert From/to National/Zoned/Signed-QWord -def BCDCFNo : VX_VT5_EO5_VB5_PS1_XO9_o<7, 385, "bcdcfn." , []>; -def BCDCFZo : VX_VT5_EO5_VB5_PS1_XO9_o<6, 385, "bcdcfz." , []>; -def BCDCTNo : VX_VT5_EO5_VB5_XO9_o <5, 385, "bcdctn." , []>; -def BCDCTZo : VX_VT5_EO5_VB5_PS1_XO9_o<4, 385, "bcdctz." , []>; -def BCDCFSQo : VX_VT5_EO5_VB5_PS1_XO9_o<2, 385, "bcdcfsq.", []>; -def BCDCTSQo : VX_VT5_EO5_VB5_XO9_o <0, 385, "bcdctsq.", []>; +def BCDCFN_rec : VX_VT5_EO5_VB5_PS1_XO9_o<7, 385, "bcdcfn." , []>; +def BCDCFZ_rec : VX_VT5_EO5_VB5_PS1_XO9_o<6, 385, "bcdcfz." , []>; +def BCDCTN_rec : VX_VT5_EO5_VB5_XO9_o <5, 385, "bcdctn." , []>; +def BCDCTZ_rec : VX_VT5_EO5_VB5_PS1_XO9_o<4, 385, "bcdctz." , []>; +def BCDCFSQ_rec : VX_VT5_EO5_VB5_PS1_XO9_o<2, 385, "bcdcfsq.", []>; +def BCDCTSQ_rec : VX_VT5_EO5_VB5_XO9_o <0, 385, "bcdctsq.", []>; // Decimal Copy-Sign/Set-Sign let Defs = [CR6] in -def BCDCPSGNo : VX1_VT5_VA5_VB5<833, "bcdcpsgn.", []>; +def BCDCPSGN_rec : VX1_VT5_VA5_VB5<833, "bcdcpsgn.", []>; -def BCDSETSGNo : VX_VT5_EO5_VB5_PS1_XO9_o<31, 385, "bcdsetsgn.", []>; +def BCDSETSGN_rec : VX_VT5_EO5_VB5_PS1_XO9_o<31, 385, "bcdsetsgn.", []>; // [PO VRT VRA VRB 1 PS XO], "_o" means CR6 is set. class VX_VT5_VA5_VB5_PS1_XO9_o xo, string opc, list pattern> @@ -1565,13 +1565,13 @@ class VX_VT5_VA5_VB5_XO9_o xo, string opc, list pattern> } // Decimal Shift/Unsigned-Shift/Shift-and-Round -def BCDSo : VX_VT5_VA5_VB5_PS1_XO9_o<193, "bcds." , []>; -def BCDUSo : VX_VT5_VA5_VB5_XO9_o <129, "bcdus.", []>; -def BCDSRo : VX_VT5_VA5_VB5_PS1_XO9_o<449, "bcdsr.", []>; +def BCDS_rec : VX_VT5_VA5_VB5_PS1_XO9_o<193, "bcds." , []>; +def BCDUS_rec : VX_VT5_VA5_VB5_XO9_o <129, "bcdus.", []>; +def BCDSR_rec : VX_VT5_VA5_VB5_PS1_XO9_o<449, "bcdsr.", []>; // Decimal (Unsigned) Truncate -def BCDTRUNCo : VX_VT5_VA5_VB5_PS1_XO9_o<257, "bcdtrunc." , []>; -def BCDUTRUNCo : VX_VT5_VA5_VB5_XO9_o <321, "bcdutrunc.", []>; +def BCDTRUNC_rec : VX_VT5_VA5_VB5_PS1_XO9_o<257, "bcdtrunc." , []>; +def BCDUTRUNC_rec : VX_VT5_VA5_VB5_XO9_o <321, "bcdutrunc.", []>; // Absolute Difference def VABSDUB : VXForm_1<1027, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), diff --git a/llvm/lib/Target/PowerPC/PPCInstrFormats.td b/llvm/lib/Target/PowerPC/PPCInstrFormats.td index ab61f73207d2..115bd44ea202 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrFormats.td +++ b/llvm/lib/Target/PowerPC/PPCInstrFormats.td @@ -262,8 +262,8 @@ class DForm_2 opcode, dag OOL, dag IOL, string asmstr, InstrItinClass itin, list pattern> : DForm_base { - // Even though ADDICo does not really have an RC bit, provide - // the declaration of one here so that isDOT has something to set. + // Even though ADDIC_rec does not really have an RC bit, provide + // the declaration of one here so that isRecordForm has something to set. bit RC = 0; } @@ -428,7 +428,7 @@ class XForm_base_r3xo opcode, bits<10> xo, dag OOL, dag IOL, string asms let Pattern = pattern; - bit RC = 0; // set by isDOT + bit RC = 0; // set by isRecordForm let Inst{6-10} = RST; let Inst{11-15} = A; @@ -463,7 +463,7 @@ class XForm_base_r3xo_swapped bits<5> RST; bits<5> B; - bit RC = 0; // set by isDOT + bit RC = 0; // set by isRecordForm let Inst{6-10} = RST; let Inst{11-15} = A; @@ -744,7 +744,7 @@ class XForm_42 opcode, bits<10> xo, dag OOL, dag IOL, string asmstr, : XForm_base_r3xo { let Pattern = pattern; - bit RC = 0; // set by isDOT + bit RC = 0; // set by isRecordForm let Inst{6-10} = RST; let Inst{11-20} = 0; @@ -757,7 +757,7 @@ class XForm_43 opcode, bits<10> xo, dag OOL, dag IOL, string asmstr, let Pattern = pattern; bits<5> FM; - bit RC = 0; // set by isDOT + bit RC = 0; // set by isRecordForm let Inst{6-10} = FM; let Inst{11-20} = 0; @@ -902,7 +902,7 @@ class XForm_htm2 opcode, bits<10> xo, dag OOL, dag IOL, string asmstr, : I { bit L; - bit RC = 0; // set by isDOT + bit RC = 0; // set by isRecordForm let Inst{7-9} = 0; let Inst{10} = L; @@ -1265,7 +1265,7 @@ class XX3Form_Rc opcode, bits<7> xo, dag OOL, dag IOL, string asmstr, let Pattern = pattern; - bit RC = 0; // set by isDOT + bit RC = 0; // set by isRecordForm let Inst{6-10} = XT{4-0}; let Inst{11-15} = XA{4-0}; @@ -1651,7 +1651,7 @@ class XFLForm opcode, bits<10> xo, dag OOL, dag IOL, string asmstr, bits<8> FM; bits<5> rT; - bit RC = 0; // set by isDOT + bit RC = 0; // set by isRecordForm let Pattern = pattern; let Inst{6} = 0; @@ -1670,7 +1670,7 @@ class XFLForm_1 opcode, bits<10> xo, dag OOL, dag IOL, string asmstr, bit W; bits<5> FRB; - bit RC = 0; // set by isDOT + bit RC = 0; // set by isRecordForm let Pattern = pattern; let Inst{6} = L; @@ -1689,7 +1689,7 @@ class XSForm_1 opcode, bits<9> xo, dag OOL, dag IOL, string asmstr, bits<5> RS; bits<6> SH; - bit RC = 0; // set by isDOT + bit RC = 0; // set by isRecordForm let Pattern = pattern; let Inst{6-10} = RS; @@ -1710,7 +1710,7 @@ class XOForm_1 opcode, bits<9> xo, bit oe, dag OOL, dag IOL, string asms let Pattern = pattern; - bit RC = 0; // set by isDOT + bit RC = 0; // set by isRecordForm let Inst{6-10} = RT; let Inst{11-15} = RA; @@ -1737,7 +1737,7 @@ class AForm_1 opcode, bits<5> xo, dag OOL, dag IOL, string asmstr, let Pattern = pattern; - bit RC = 0; // set by isDOT + bit RC = 0; // set by isRecordForm let Inst{6-10} = FRT; let Inst{11-15} = FRA; @@ -1797,7 +1797,7 @@ class MForm_1 opcode, dag OOL, dag IOL, string asmstr, let Pattern = pattern; - bit RC = 0; // set by isDOT + bit RC = 0; // set by isRecordForm let Inst{6-10} = RS; let Inst{11-15} = RA; @@ -1823,7 +1823,7 @@ class MDForm_1 opcode, bits<3> xo, dag OOL, dag IOL, string asmstr, let Pattern = pattern; - bit RC = 0; // set by isDOT + bit RC = 0; // set by isRecordForm let Inst{6-10} = RS; let Inst{11-15} = RA; @@ -1844,7 +1844,7 @@ class MDSForm_1 opcode, bits<4> xo, dag OOL, dag IOL, string asmstr, let Pattern = pattern; - bit RC = 0; // set by isDOT + bit RC = 0; // set by isRecordForm let Inst{6-10} = RS; let Inst{11-15} = RA; @@ -2106,7 +2106,7 @@ class Z23Form_1 opcode, bits<8> xo, dag OOL, dag IOL, string asmstr, let Pattern = pattern; - bit RC = 0; // set by isDOT + bit RC = 0; // set by isRecordForm let Inst{6-10} = FRT; let Inst{11-15} = FRA; @@ -2130,7 +2130,7 @@ class Z23Form_3 opcode, bits<8> xo, dag OOL, dag IOL, string asmstr, let Pattern = pattern; - bit RC = 0; // set by isDOT + bit RC = 0; // set by isRecordForm let Inst{6-10} = FRT; let Inst{11-22} = idx; @@ -2148,7 +2148,7 @@ class Z23Form_8 opcode, bits<8> xo, dag OOL, dag IOL, string asmstr, let Pattern = pattern; - bit RC = 0; // set by isDOT + bit RC = 0; // set by isRecordForm let Inst{6-10} = VRT; let Inst{11-14} = 0; diff --git a/llvm/lib/Target/PowerPC/PPCInstrHTM.td b/llvm/lib/Target/PowerPC/PPCInstrHTM.td index 104b57a70a2e..6cbf999ca73d 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrHTM.td +++ b/llvm/lib/Target/PowerPC/PPCInstrHTM.td @@ -36,7 +36,7 @@ def TEND : XForm_htm1 <31, 686, def TABORT : XForm_base_r3xo <31, 910, (outs), (ins gprc:$A), "tabort. $A", IIC_SprMTSPR, - []>, isDOT { + []>, isRecordForm { let RST = 0; let B = 0; } @@ -44,38 +44,38 @@ def TABORT : XForm_base_r3xo <31, 910, def TABORTWC : XForm_base_r3xo <31, 782, (outs), (ins u5imm:$RTS, gprc:$A, gprc:$B), "tabortwc. $RTS, $A, $B", IIC_SprMTSPR, []>, - isDOT; + isRecordForm; def TABORTWCI : XForm_base_r3xo <31, 846, (outs), (ins u5imm:$RTS, gprc:$A, u5imm:$B), "tabortwci. $RTS, $A, $B", IIC_SprMTSPR, []>, - isDOT; + isRecordForm; def TABORTDC : XForm_base_r3xo <31, 814, (outs), (ins u5imm:$RTS, gprc:$A, gprc:$B), "tabortdc. $RTS, $A, $B", IIC_SprMTSPR, []>, - isDOT; + isRecordForm; def TABORTDCI : XForm_base_r3xo <31, 878, (outs), (ins u5imm:$RTS, gprc:$A, u5imm:$B), "tabortdci. $RTS, $A, $B", IIC_SprMTSPR, []>, - isDOT; + isRecordForm; def TSR : XForm_htm2 <31, 750, (outs), (ins u1imm:$L), "tsr. $L", IIC_SprMTSPR, []>, - isDOT; + isRecordForm; def TRECLAIM : XForm_base_r3xo <31, 942, (outs), (ins gprc:$A), "treclaim. $A", IIC_SprMTSPR, []>, - isDOT { + isRecordForm { let RST = 0; let B = 0; } def TRECHKPT : XForm_base_r3xo <31, 1006, (outs), (ins), "trechkpt.", IIC_SprMTSPR, []>, - isDOT { + isRecordForm { let RST = 0; let A = 0; let B = 0; diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp index 00fe43fc8b9c..e97056b2385a 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -371,7 +371,7 @@ MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, MachineFunction &MF = *MI.getParent()->getParent(); // Normal instructions can be commuted the obvious way. - if (MI.getOpcode() != PPC::RLWIMI && MI.getOpcode() != PPC::RLWIMIo) + if (MI.getOpcode() != PPC::RLWIMI && MI.getOpcode() != PPC::RLWIMI_rec) return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); // Note that RLWIMI can be commuted as a 32-bit instruction, but not as a // 64-bit instruction (so we don't handle PPC::RLWIMI8 here), because @@ -391,7 +391,7 @@ MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, // Swap op1/op2 assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) && - "Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMIo."); + "Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMI_rec."); Register Reg0 = MI.getOperand(0).getReg(); Register Reg1 = MI.getOperand(1).getReg(); Register Reg2 = MI.getOperand(2).getReg(); @@ -1836,8 +1836,8 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg, int NewOpC = -1; int MIOpC = MI->getOpcode(); - if (MIOpC == PPC::ANDIo || MIOpC == PPC::ANDI8o || - MIOpC == PPC::ANDISo || MIOpC == PPC::ANDIS8o) + if (MIOpC == PPC::ANDI_rec || MIOpC == PPC::ANDI8_rec || + MIOpC == PPC::ANDIS_rec || MIOpC == PPC::ANDIS8_rec) NewOpC = MIOpC; else { NewOpC = PPC::getRecordFormOpcode(MIOpC); @@ -1943,9 +1943,9 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg, Mask = ((1LLU << (32 - MB)) - 1) & ~((1LLU << (31 - ME)) - 1); // The mask value needs to shift right 16 if we're emitting andis. Mask >>= MBInLoHWord ? 0 : 16; - NewOpC = MIOpC == PPC::RLWINM ? - (MBInLoHWord ? PPC::ANDIo : PPC::ANDISo) : - (MBInLoHWord ? PPC::ANDI8o :PPC::ANDIS8o); + NewOpC = MIOpC == PPC::RLWINM + ? (MBInLoHWord ? PPC::ANDI_rec : PPC::ANDIS_rec) + : (MBInLoHWord ? PPC::ANDI8_rec : PPC::ANDIS8_rec); } else if (MRI->use_empty(GPRRes) && (ME == 31) && (ME - MB + 1 == SH) && (MB >= 16)) { // If we are rotating by the exact number of bits as are in the mask @@ -1953,7 +1953,7 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg, // that's just an andis. (as long as the GPR result has no uses). Mask = ((1LLU << 32) - 1) & ~((1LLU << (32 - SH)) - 1); Mask >>= 16; - NewOpC = MIOpC == PPC::RLWINM ? PPC::ANDISo :PPC::ANDIS8o; + NewOpC = MIOpC == PPC::RLWINM ? PPC::ANDIS_rec : PPC::ANDIS8_rec; } // If we've set the mask, we can transform. if (Mask != ~0LLU) { @@ -1966,7 +1966,7 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg, int64_t MB = MI->getOperand(3).getImm(); if (MB >= 48) { uint64_t Mask = (1LLU << (63 - MB + 1)) - 1; - NewOpC = PPC::ANDI8o; + NewOpC = PPC::ANDI8_rec; MI->RemoveOperand(3); MI->getOperand(2).setImm(Mask); NumRcRotatesConvertedToRcAnd++; @@ -2306,7 +2306,7 @@ void PPCInstrInfo::replaceInstrWithLI(MachineInstr &MI, // Replace the instruction. if (LII.SetCR) { - MI.setDesc(get(LII.Is64Bit ? PPC::ANDI8o : PPC::ANDIo)); + MI.setDesc(get(LII.Is64Bit ? PPC::ANDI8_rec : PPC::ANDI_rec)); // Set the immediate. MachineInstrBuilder(*MI.getParent()->getParent(), MI) .addImm(LII.Imm).addReg(PPC::CR0, RegState::ImplicitDefine); @@ -2370,15 +2370,13 @@ MachineInstr *PPCInstrInfo::getForwardingDefMI( ImmInstrInfo III; unsigned Opc = MI.getOpcode(); bool ConvertibleImmForm = - Opc == PPC::CMPWI || Opc == PPC::CMPLWI || - Opc == PPC::CMPDI || Opc == PPC::CMPLDI || - Opc == PPC::ADDI || Opc == PPC::ADDI8 || - Opc == PPC::ORI || Opc == PPC::ORI8 || - Opc == PPC::XORI || Opc == PPC::XORI8 || - Opc == PPC::RLDICL || Opc == PPC::RLDICLo || - Opc == PPC::RLDICL_32 || Opc == PPC::RLDICL_32_64 || - Opc == PPC::RLWINM || Opc == PPC::RLWINMo || - Opc == PPC::RLWINM8 || Opc == PPC::RLWINM8o; + Opc == PPC::CMPWI || Opc == PPC::CMPLWI || Opc == PPC::CMPDI || + Opc == PPC::CMPLDI || Opc == PPC::ADDI || Opc == PPC::ADDI8 || + Opc == PPC::ORI || Opc == PPC::ORI8 || Opc == PPC::XORI || + Opc == PPC::XORI8 || Opc == PPC::RLDICL || Opc == PPC::RLDICL_rec || + Opc == PPC::RLDICL_32 || Opc == PPC::RLDICL_32_64 || + Opc == PPC::RLWINM || Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8 || + Opc == PPC::RLWINM8_rec; bool IsVFReg = (MI.getNumOperands() && MI.getOperand(0).isReg()) ? isVFRegister(MI.getOperand(0).getReg()) : false; @@ -2879,34 +2877,34 @@ bool PPCInstrInfo::convertToImmediateForm(MachineInstr &MI, return false; } case PPC::RLDICL: - case PPC::RLDICLo: + case PPC::RLDICL_rec: case PPC::RLDICL_32: case PPC::RLDICL_32_64: { // Use APInt's rotate function. int64_t SH = MI.getOperand(2).getImm(); int64_t MB = MI.getOperand(3).getImm(); - APInt InVal((Opc == PPC::RLDICL || Opc == PPC::RLDICLo) ? - 64 : 32, SExtImm, true); + APInt InVal((Opc == PPC::RLDICL || Opc == PPC::RLDICL_rec) ? 64 : 32, + SExtImm, true); InVal = InVal.rotl(SH); uint64_t Mask = (1LLU << (63 - MB + 1)) - 1; InVal &= Mask; // Can't replace negative values with an LI as that will sign-extend // and not clear the left bits. If we're setting the CR bit, we will use - // ANDIo which won't sign extend, so that's safe. + // ANDI_rec which won't sign extend, so that's safe. if (isUInt<15>(InVal.getSExtValue()) || - (Opc == PPC::RLDICLo && isUInt<16>(InVal.getSExtValue()))) { + (Opc == PPC::RLDICL_rec && isUInt<16>(InVal.getSExtValue()))) { ReplaceWithLI = true; Is64BitLI = Opc != PPC::RLDICL_32; NewImm = InVal.getSExtValue(); - SetCR = Opc == PPC::RLDICLo; + SetCR = Opc == PPC::RLDICL_rec; break; } return false; } case PPC::RLWINM: case PPC::RLWINM8: - case PPC::RLWINMo: - case PPC::RLWINM8o: { + case PPC::RLWINM_rec: + case PPC::RLWINM8_rec: { int64_t SH = MI.getOperand(2).getImm(); int64_t MB = MI.getOperand(3).getImm(); int64_t ME = MI.getOperand(4).getImm(); @@ -2917,15 +2915,15 @@ bool PPCInstrInfo::convertToImmediateForm(MachineInstr &MI, InVal &= Mask; // Can't replace negative values with an LI as that will sign-extend // and not clear the left bits. If we're setting the CR bit, we will use - // ANDIo which won't sign extend, so that's safe. + // ANDI_rec which won't sign extend, so that's safe. bool ValueFits = isUInt<15>(InVal.getSExtValue()); - ValueFits |= ((Opc == PPC::RLWINMo || Opc == PPC::RLWINM8o) && + ValueFits |= ((Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8_rec) && isUInt<16>(InVal.getSExtValue())); if (ValueFits) { ReplaceWithLI = true; - Is64BitLI = Opc == PPC::RLWINM8 || Opc == PPC::RLWINM8o; + Is64BitLI = Opc == PPC::RLWINM8 || Opc == PPC::RLWINM8_rec; NewImm = InVal.getSExtValue(); - SetCR = Opc == PPC::RLWINMo || Opc == PPC::RLWINM8o; + SetCR = Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8_rec; break; } return false; @@ -2987,7 +2985,7 @@ bool PPCInstrInfo::convertToImmediateForm(MachineInstr &MI, LII.Is64Bit = Is64BitLI; LII.SetCR = SetCR; // If we're setting the CR, the original load-immediate must be kept (as an - // operand to ANDIo/ANDI8o). + // operand to ANDI_rec/ANDI8_rec). if (KilledDef && SetCR) *KilledDef = nullptr; replaceInstrWithLI(MI, LII); @@ -3038,13 +3036,13 @@ bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg, III.IsSummingOperands = true; III.ImmOpcode = Opc == PPC::ADDC ? PPC::ADDIC : PPC::ADDIC8; break; - case PPC::ADDCo: + case PPC::ADDC_rec: III.SignedImm = true; III.ZeroIsSpecialOrig = 0; III.ZeroIsSpecialNew = 0; III.IsCommutative = true; III.IsSummingOperands = true; - III.ImmOpcode = PPC::ADDICo; + III.ImmOpcode = PPC::ADDIC_rec; break; case PPC::SUBFC: case PPC::SUBFC8: @@ -3070,8 +3068,8 @@ bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg, III.IsCommutative = false; III.ImmOpcode = Opc == PPC::CMPLW ? PPC::CMPLWI : PPC::CMPLDI; break; - case PPC::ANDo: - case PPC::AND8o: + case PPC::AND_rec: + case PPC::AND8_rec: case PPC::OR: case PPC::OR8: case PPC::XOR: @@ -3082,8 +3080,12 @@ bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg, III.IsCommutative = true; switch(Opc) { default: llvm_unreachable("Unknown opcode"); - case PPC::ANDo: III.ImmOpcode = PPC::ANDIo; break; - case PPC::AND8o: III.ImmOpcode = PPC::ANDI8o; break; + case PPC::AND_rec: + III.ImmOpcode = PPC::ANDI_rec; + break; + case PPC::AND8_rec: + III.ImmOpcode = PPC::ANDI8_rec; + break; case PPC::OR: III.ImmOpcode = PPC::ORI; break; case PPC::OR8: III.ImmOpcode = PPC::ORI8; break; case PPC::XOR: III.ImmOpcode = PPC::XORI; break; @@ -3092,18 +3094,18 @@ bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg, break; case PPC::RLWNM: case PPC::RLWNM8: - case PPC::RLWNMo: - case PPC::RLWNM8o: + case PPC::RLWNM_rec: + case PPC::RLWNM8_rec: case PPC::SLW: case PPC::SLW8: - case PPC::SLWo: - case PPC::SLW8o: + case PPC::SLW_rec: + case PPC::SLW8_rec: case PPC::SRW: case PPC::SRW8: - case PPC::SRWo: - case PPC::SRW8o: + case PPC::SRW_rec: + case PPC::SRW8_rec: case PPC::SRAW: - case PPC::SRAWo: + case PPC::SRAW_rec: III.SignedImm = false; III.ZeroIsSpecialOrig = 0; III.ZeroIsSpecialNew = 0; @@ -3113,8 +3115,8 @@ bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg, // This does not apply to shift right algebraic because a value // out of range will produce a -1/0. III.ImmWidth = 16; - if (Opc == PPC::RLWNM || Opc == PPC::RLWNM8 || - Opc == PPC::RLWNMo || Opc == PPC::RLWNM8o) + if (Opc == PPC::RLWNM || Opc == PPC::RLWNM8 || Opc == PPC::RLWNM_rec || + Opc == PPC::RLWNM8_rec) III.TruncateImmTo = 5; else III.TruncateImmTo = 6; @@ -3122,38 +3124,50 @@ bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg, default: llvm_unreachable("Unknown opcode"); case PPC::RLWNM: III.ImmOpcode = PPC::RLWINM; break; case PPC::RLWNM8: III.ImmOpcode = PPC::RLWINM8; break; - case PPC::RLWNMo: III.ImmOpcode = PPC::RLWINMo; break; - case PPC::RLWNM8o: III.ImmOpcode = PPC::RLWINM8o; break; + case PPC::RLWNM_rec: + III.ImmOpcode = PPC::RLWINM_rec; + break; + case PPC::RLWNM8_rec: + III.ImmOpcode = PPC::RLWINM8_rec; + break; case PPC::SLW: III.ImmOpcode = PPC::RLWINM; break; case PPC::SLW8: III.ImmOpcode = PPC::RLWINM8; break; - case PPC::SLWo: III.ImmOpcode = PPC::RLWINMo; break; - case PPC::SLW8o: III.ImmOpcode = PPC::RLWINM8o; break; + case PPC::SLW_rec: + III.ImmOpcode = PPC::RLWINM_rec; + break; + case PPC::SLW8_rec: + III.ImmOpcode = PPC::RLWINM8_rec; + break; case PPC::SRW: III.ImmOpcode = PPC::RLWINM; break; case PPC::SRW8: III.ImmOpcode = PPC::RLWINM8; break; - case PPC::SRWo: III.ImmOpcode = PPC::RLWINMo; break; - case PPC::SRW8o: III.ImmOpcode = PPC::RLWINM8o; break; + case PPC::SRW_rec: + III.ImmOpcode = PPC::RLWINM_rec; + break; + case PPC::SRW8_rec: + III.ImmOpcode = PPC::RLWINM8_rec; + break; case PPC::SRAW: III.ImmWidth = 5; III.TruncateImmTo = 0; III.ImmOpcode = PPC::SRAWI; break; - case PPC::SRAWo: + case PPC::SRAW_rec: III.ImmWidth = 5; III.TruncateImmTo = 0; - III.ImmOpcode = PPC::SRAWIo; + III.ImmOpcode = PPC::SRAWI_rec; break; } break; case PPC::RLDCL: - case PPC::RLDCLo: + case PPC::RLDCL_rec: case PPC::RLDCR: - case PPC::RLDCRo: + case PPC::RLDCR_rec: case PPC::SLD: - case PPC::SLDo: + case PPC::SLD_rec: case PPC::SRD: - case PPC::SRDo: + case PPC::SRD_rec: case PPC::SRAD: - case PPC::SRADo: + case PPC::SRAD_rec: III.SignedImm = false; III.ZeroIsSpecialOrig = 0; III.ZeroIsSpecialNew = 0; @@ -3163,30 +3177,38 @@ bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg, // This does not apply to shift right algebraic because a value // out of range will produce a -1/0. III.ImmWidth = 16; - if (Opc == PPC::RLDCL || Opc == PPC::RLDCLo || - Opc == PPC::RLDCR || Opc == PPC::RLDCRo) + if (Opc == PPC::RLDCL || Opc == PPC::RLDCL_rec || Opc == PPC::RLDCR || + Opc == PPC::RLDCR_rec) III.TruncateImmTo = 6; else III.TruncateImmTo = 7; switch(Opc) { default: llvm_unreachable("Unknown opcode"); case PPC::RLDCL: III.ImmOpcode = PPC::RLDICL; break; - case PPC::RLDCLo: III.ImmOpcode = PPC::RLDICLo; break; + case PPC::RLDCL_rec: + III.ImmOpcode = PPC::RLDICL_rec; + break; case PPC::RLDCR: III.ImmOpcode = PPC::RLDICR; break; - case PPC::RLDCRo: III.ImmOpcode = PPC::RLDICRo; break; + case PPC::RLDCR_rec: + III.ImmOpcode = PPC::RLDICR_rec; + break; case PPC::SLD: III.ImmOpcode = PPC::RLDICR; break; - case PPC::SLDo: III.ImmOpcode = PPC::RLDICRo; break; + case PPC::SLD_rec: + III.ImmOpcode = PPC::RLDICR_rec; + break; case PPC::SRD: III.ImmOpcode = PPC::RLDICL; break; - case PPC::SRDo: III.ImmOpcode = PPC::RLDICLo; break; + case PPC::SRD_rec: + III.ImmOpcode = PPC::RLDICL_rec; + break; case PPC::SRAD: III.ImmWidth = 6; III.TruncateImmTo = 0; III.ImmOpcode = PPC::SRADI; break; - case PPC::SRADo: + case PPC::SRAD_rec: III.ImmWidth = 6; III.TruncateImmTo = 0; - III.ImmOpcode = PPC::SRADIo; + III.ImmOpcode = PPC::SRADI_rec; break; } break; @@ -3757,16 +3779,16 @@ bool PPCInstrInfo::transformToImmFormFedByLI(MachineInstr &MI, ForwardKilledOperandReg = MI.getOperand(ConstantOpNo).getReg(); unsigned Opc = MI.getOpcode(); - bool SpecialShift32 = Opc == PPC::SLW || Opc == PPC::SLWo || - Opc == PPC::SRW || Opc == PPC::SRWo || - Opc == PPC::SLW8 || Opc == PPC::SLW8o || - Opc == PPC::SRW8 || Opc == PPC::SRW8o; - bool SpecialShift64 = - Opc == PPC::SLD || Opc == PPC::SLDo || Opc == PPC::SRD || Opc == PPC::SRDo; - bool SetCR = Opc == PPC::SLWo || Opc == PPC::SRWo || - Opc == PPC::SLDo || Opc == PPC::SRDo; - bool RightShift = - Opc == PPC::SRW || Opc == PPC::SRWo || Opc == PPC::SRD || Opc == PPC::SRDo; + bool SpecialShift32 = Opc == PPC::SLW || Opc == PPC::SLW_rec || + Opc == PPC::SRW || Opc == PPC::SRW_rec || + Opc == PPC::SLW8 || Opc == PPC::SLW8_rec || + Opc == PPC::SRW8 || Opc == PPC::SRW8_rec; + bool SpecialShift64 = Opc == PPC::SLD || Opc == PPC::SLD_rec || + Opc == PPC::SRD || Opc == PPC::SRD_rec; + bool SetCR = Opc == PPC::SLW_rec || Opc == PPC::SRW_rec || + Opc == PPC::SLD_rec || Opc == PPC::SRD_rec; + bool RightShift = Opc == PPC::SRW || Opc == PPC::SRW_rec || Opc == PPC::SRD || + Opc == PPC::SRD_rec; MI.setDesc(get(III.ImmOpcode)); if (ConstantOpNo == III.OpNoForForwarding) { @@ -3870,27 +3892,21 @@ int PPCInstrInfo::getRecordFormOpcode(unsigned Opcode) { // i.e. 0 to 31-th bits are same as 32-th bit. static bool isSignExtendingOp(const MachineInstr &MI) { int Opcode = MI.getOpcode(); - if (Opcode == PPC::LI || Opcode == PPC::LI8 || - Opcode == PPC::LIS || Opcode == PPC::LIS8 || - Opcode == PPC::SRAW || Opcode == PPC::SRAWo || - Opcode == PPC::SRAWI || Opcode == PPC::SRAWIo || - Opcode == PPC::LWA || Opcode == PPC::LWAX || - Opcode == PPC::LWA_32 || Opcode == PPC::LWAX_32 || - Opcode == PPC::LHA || Opcode == PPC::LHAX || - Opcode == PPC::LHA8 || Opcode == PPC::LHAX8 || - Opcode == PPC::LBZ || Opcode == PPC::LBZX || - Opcode == PPC::LBZ8 || Opcode == PPC::LBZX8 || - Opcode == PPC::LBZU || Opcode == PPC::LBZUX || - Opcode == PPC::LBZU8 || Opcode == PPC::LBZUX8 || - Opcode == PPC::LHZ || Opcode == PPC::LHZX || - Opcode == PPC::LHZ8 || Opcode == PPC::LHZX8 || - Opcode == PPC::LHZU || Opcode == PPC::LHZUX || - Opcode == PPC::LHZU8 || Opcode == PPC::LHZUX8 || - Opcode == PPC::EXTSB || Opcode == PPC::EXTSBo || - Opcode == PPC::EXTSH || Opcode == PPC::EXTSHo || - Opcode == PPC::EXTSB8 || Opcode == PPC::EXTSH8 || - Opcode == PPC::EXTSW || Opcode == PPC::EXTSWo || - Opcode == PPC::SETB || Opcode == PPC::SETB8 || + if (Opcode == PPC::LI || Opcode == PPC::LI8 || Opcode == PPC::LIS || + Opcode == PPC::LIS8 || Opcode == PPC::SRAW || Opcode == PPC::SRAW_rec || + Opcode == PPC::SRAWI || Opcode == PPC::SRAWI_rec || Opcode == PPC::LWA || + Opcode == PPC::LWAX || Opcode == PPC::LWA_32 || Opcode == PPC::LWAX_32 || + Opcode == PPC::LHA || Opcode == PPC::LHAX || Opcode == PPC::LHA8 || + Opcode == PPC::LHAX8 || Opcode == PPC::LBZ || Opcode == PPC::LBZX || + Opcode == PPC::LBZ8 || Opcode == PPC::LBZX8 || Opcode == PPC::LBZU || + Opcode == PPC::LBZUX || Opcode == PPC::LBZU8 || Opcode == PPC::LBZUX8 || + Opcode == PPC::LHZ || Opcode == PPC::LHZX || Opcode == PPC::LHZ8 || + Opcode == PPC::LHZX8 || Opcode == PPC::LHZU || Opcode == PPC::LHZUX || + Opcode == PPC::LHZU8 || Opcode == PPC::LHZUX8 || Opcode == PPC::EXTSB || + Opcode == PPC::EXTSB_rec || Opcode == PPC::EXTSH || + Opcode == PPC::EXTSH_rec || Opcode == PPC::EXTSB8 || + Opcode == PPC::EXTSH8 || Opcode == PPC::EXTSW || + Opcode == PPC::EXTSW_rec || Opcode == PPC::SETB || Opcode == PPC::SETB8 || Opcode == PPC::EXTSH8_32_64 || Opcode == PPC::EXTSW_32_64 || Opcode == PPC::EXTSB8_32_64) return true; @@ -3898,8 +3914,8 @@ static bool isSignExtendingOp(const MachineInstr &MI) { if (Opcode == PPC::RLDICL && MI.getOperand(3).getImm() >= 33) return true; - if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINMo || - Opcode == PPC::RLWNM || Opcode == PPC::RLWNMo) && + if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec || + Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec) && MI.getOperand(3).getImm() > 0 && MI.getOperand(3).getImm() <= MI.getOperand(4).getImm()) return true; @@ -3922,52 +3938,46 @@ static bool isZeroExtendingOp(const MachineInstr &MI) { // We have some variations of rotate-and-mask instructions // that clear higher 32-bits. - if ((Opcode == PPC::RLDICL || Opcode == PPC::RLDICLo || - Opcode == PPC::RLDCL || Opcode == PPC::RLDCLo || + if ((Opcode == PPC::RLDICL || Opcode == PPC::RLDICL_rec || + Opcode == PPC::RLDCL || Opcode == PPC::RLDCL_rec || Opcode == PPC::RLDICL_32_64) && MI.getOperand(3).getImm() >= 32) return true; - if ((Opcode == PPC::RLDIC || Opcode == PPC::RLDICo) && + if ((Opcode == PPC::RLDIC || Opcode == PPC::RLDIC_rec) && MI.getOperand(3).getImm() >= 32 && MI.getOperand(3).getImm() <= 63 - MI.getOperand(2).getImm()) return true; - if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINMo || - Opcode == PPC::RLWNM || Opcode == PPC::RLWNMo || + if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec || + Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec || Opcode == PPC::RLWINM8 || Opcode == PPC::RLWNM8) && MI.getOperand(3).getImm() <= MI.getOperand(4).getImm()) return true; // There are other instructions that clear higher 32-bits. - if (Opcode == PPC::CNTLZW || Opcode == PPC::CNTLZWo || - Opcode == PPC::CNTTZW || Opcode == PPC::CNTTZWo || + if (Opcode == PPC::CNTLZW || Opcode == PPC::CNTLZW_rec || + Opcode == PPC::CNTTZW || Opcode == PPC::CNTTZW_rec || Opcode == PPC::CNTLZW8 || Opcode == PPC::CNTTZW8 || - Opcode == PPC::CNTLZD || Opcode == PPC::CNTLZDo || - Opcode == PPC::CNTTZD || Opcode == PPC::CNTTZDo || - Opcode == PPC::POPCNTD || Opcode == PPC::POPCNTW || - Opcode == PPC::SLW || Opcode == PPC::SLWo || - Opcode == PPC::SRW || Opcode == PPC::SRWo || - Opcode == PPC::SLW8 || Opcode == PPC::SRW8 || - Opcode == PPC::SLWI || Opcode == PPC::SLWIo || - Opcode == PPC::SRWI || Opcode == PPC::SRWIo || - Opcode == PPC::LWZ || Opcode == PPC::LWZX || - Opcode == PPC::LWZU || Opcode == PPC::LWZUX || - Opcode == PPC::LWBRX || Opcode == PPC::LHBRX || - Opcode == PPC::LHZ || Opcode == PPC::LHZX || - Opcode == PPC::LHZU || Opcode == PPC::LHZUX || - Opcode == PPC::LBZ || Opcode == PPC::LBZX || - Opcode == PPC::LBZU || Opcode == PPC::LBZUX || - Opcode == PPC::LWZ8 || Opcode == PPC::LWZX8 || - Opcode == PPC::LWZU8 || Opcode == PPC::LWZUX8 || - Opcode == PPC::LWBRX8 || Opcode == PPC::LHBRX8 || - Opcode == PPC::LHZ8 || Opcode == PPC::LHZX8 || - Opcode == PPC::LHZU8 || Opcode == PPC::LHZUX8 || - Opcode == PPC::LBZ8 || Opcode == PPC::LBZX8 || - Opcode == PPC::LBZU8 || Opcode == PPC::LBZUX8 || - Opcode == PPC::ANDIo || Opcode == PPC::ANDISo || - Opcode == PPC::ROTRWI || Opcode == PPC::ROTRWIo || - Opcode == PPC::EXTLWI || Opcode == PPC::EXTLWIo || + Opcode == PPC::CNTLZD || Opcode == PPC::CNTLZD_rec || + Opcode == PPC::CNTTZD || Opcode == PPC::CNTTZD_rec || + Opcode == PPC::POPCNTD || Opcode == PPC::POPCNTW || Opcode == PPC::SLW || + Opcode == PPC::SLW_rec || Opcode == PPC::SRW || Opcode == PPC::SRW_rec || + Opcode == PPC::SLW8 || Opcode == PPC::SRW8 || Opcode == PPC::SLWI || + Opcode == PPC::SLWI_rec || Opcode == PPC::SRWI || + Opcode == PPC::SRWI_rec || Opcode == PPC::LWZ || Opcode == PPC::LWZX || + Opcode == PPC::LWZU || Opcode == PPC::LWZUX || Opcode == PPC::LWBRX || + Opcode == PPC::LHBRX || Opcode == PPC::LHZ || Opcode == PPC::LHZX || + Opcode == PPC::LHZU || Opcode == PPC::LHZUX || Opcode == PPC::LBZ || + Opcode == PPC::LBZX || Opcode == PPC::LBZU || Opcode == PPC::LBZUX || + Opcode == PPC::LWZ8 || Opcode == PPC::LWZX8 || Opcode == PPC::LWZU8 || + Opcode == PPC::LWZUX8 || Opcode == PPC::LWBRX8 || Opcode == PPC::LHBRX8 || + Opcode == PPC::LHZ8 || Opcode == PPC::LHZX8 || Opcode == PPC::LHZU8 || + Opcode == PPC::LHZUX8 || Opcode == PPC::LBZ8 || Opcode == PPC::LBZX8 || + Opcode == PPC::LBZU8 || Opcode == PPC::LBZUX8 || + Opcode == PPC::ANDI_rec || Opcode == PPC::ANDIS_rec || + Opcode == PPC::ROTRWI || Opcode == PPC::ROTRWI_rec || + Opcode == PPC::EXTLWI || Opcode == PPC::EXTLWI_rec || Opcode == PPC::MFVSRWZ) return true; @@ -4061,14 +4071,14 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt, return false; } - case PPC::ANDIo: - case PPC::ANDISo: + case PPC::ANDI_rec: + case PPC::ANDIS_rec: case PPC::ORI: case PPC::ORIS: case PPC::XORI: case PPC::XORIS: - case PPC::ANDI8o: - case PPC::ANDIS8o: + case PPC::ANDI8_rec: + case PPC::ANDIS8_rec: case PPC::ORI8: case PPC::ORIS8: case PPC::XORI8: diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/llvm/lib/Target/PowerPC/PPCInstrInfo.td index b02e676bc23f..986313e3154a 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.td +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.td @@ -486,7 +486,7 @@ def mul_without_simm16 : BinOpWithoutSImm16Operand; // PowerPC Flag Definitions. class isPPC64 { bit PPC64 = 1; } -class isDOT { bit RC = 1; } +class isRecordForm { bit RC = 1; } class RegConstraint { string Constraints = C; @@ -961,9 +961,9 @@ multiclass XForm_6r opcode, bits<10> xo, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CR0] in - def o : XForm_6, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -976,9 +976,9 @@ multiclass XForm_6rc opcode, bits<10> xo, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CARRY, CR0] in - def o : XForm_6, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -991,9 +991,9 @@ multiclass XForm_10rc opcode, bits<10> xo, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CARRY, CR0] in - def o : XForm_10, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -1005,9 +1005,9 @@ multiclass XForm_11r opcode, bits<10> xo, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CR0] in - def o : XForm_11, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -1019,9 +1019,9 @@ multiclass XOForm_1r opcode, bits<9> xo, bit oe, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CR0] in - def o : XOForm_1, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -1035,9 +1035,9 @@ multiclass XOForm_1rx opcode, bits<9> xo, bit oe, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CR0] in - def o : XOForm_1, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } let BaseName = !strconcat(asmbase, "O") in { let Defs = [XER] in @@ -1045,9 +1045,9 @@ multiclass XOForm_1rx opcode, bits<9> xo, bit oe, dag OOL, dag IOL, !strconcat(asmbase, !strconcat("o ", asmstr)), itin, []>, RecFormRel; let Defs = [XER, CR0] in - def Oo : XOForm_1, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -1061,9 +1061,9 @@ multiclass XOForm_1rcr opcode, bits<9> xo, bit oe, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CR0] in - def o : XOForm_1, isDOT, RecFormRel, PPC970_DGroup_First, + []>, isRecordForm, RecFormRel, PPC970_DGroup_First, PPC970_DGroup_Cracked; } let BaseName = !strconcat(asmbase, "O") in { @@ -1072,9 +1072,9 @@ multiclass XOForm_1rcr opcode, bits<9> xo, bit oe, dag OOL, dag IOL, !strconcat(asmbase, !strconcat("o ", asmstr)), itin, []>, RecFormRel; let Defs = [XER, CR0] in - def Oo : XOForm_1, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -1087,9 +1087,9 @@ multiclass XOForm_1rc opcode, bits<9> xo, bit oe, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CARRY, CR0] in - def o : XOForm_1, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } let BaseName = !strconcat(asmbase, "O") in { let Defs = [CARRY, XER] in @@ -1097,9 +1097,9 @@ multiclass XOForm_1rc opcode, bits<9> xo, bit oe, dag OOL, dag IOL, !strconcat(asmbase, !strconcat("o ", asmstr)), itin, []>, RecFormRel; let Defs = [CARRY, XER, CR0] in - def Oo : XOForm_1, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -1111,9 +1111,9 @@ multiclass XOForm_3r opcode, bits<9> xo, bit oe, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CR0] in - def o : XOForm_3, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } let BaseName = !strconcat(asmbase, "O") in { let Defs = [XER] in @@ -1121,9 +1121,9 @@ multiclass XOForm_3r opcode, bits<9> xo, bit oe, dag OOL, dag IOL, !strconcat(asmbase, !strconcat("o ", asmstr)), itin, []>, RecFormRel; let Defs = [XER, CR0] in - def Oo : XOForm_3, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -1136,9 +1136,9 @@ multiclass XOForm_3rc opcode, bits<9> xo, bit oe, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CARRY, CR0] in - def o : XOForm_3, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } let BaseName = !strconcat(asmbase, "O") in { let Defs = [CARRY, XER] in @@ -1146,9 +1146,9 @@ multiclass XOForm_3rc opcode, bits<9> xo, bit oe, dag OOL, dag IOL, !strconcat(asmbase, !strconcat("o ", asmstr)), itin, []>, RecFormRel; let Defs = [CARRY, XER, CR0] in - def Oo : XOForm_3, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -1160,9 +1160,9 @@ multiclass MForm_2r opcode, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CR0] in - def o : MForm_2, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -1174,9 +1174,9 @@ multiclass MDForm_1r opcode, bits<3> xo, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CR0] in - def o : MDForm_1, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -1188,9 +1188,9 @@ multiclass MDSForm_1r opcode, bits<4> xo, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CR0] in - def o : MDSForm_1, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -1203,9 +1203,9 @@ multiclass XSForm_1rc opcode, bits<9> xo, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CARRY, CR0] in - def o : XSForm_1, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -1217,9 +1217,9 @@ multiclass XSForm_1r opcode, bits<9> xo, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CR0] in - def o : XSForm_1, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -1231,9 +1231,9 @@ multiclass XForm_26r opcode, bits<10> xo, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CR1] in - def o : XForm_26, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -1245,9 +1245,9 @@ multiclass XForm_28r opcode, bits<10> xo, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CR1] in - def o : XForm_28, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -1259,9 +1259,9 @@ multiclass AForm_1r opcode, bits<5> xo, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CR1] in - def o : AForm_1, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -1273,9 +1273,9 @@ multiclass AForm_2r opcode, bits<5> xo, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CR1] in - def o : AForm_2, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -1287,9 +1287,9 @@ multiclass AForm_3r opcode, bits<5> xo, dag OOL, dag IOL, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, pattern>, RecFormRel; let Defs = [CR1] in - def o : AForm_3, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } } @@ -1914,15 +1914,15 @@ def LWARX : XForm_1_memOp<31, 20, (outs gprc:$rD), (ins memrr:$src), // Instructions to support lock versions of atomics // (EH=1 - see Power ISA 2.07 Book II 4.4.2) def LBARXL : XForm_1_memOp<31, 52, (outs gprc:$rD), (ins memrr:$src), - "lbarx $rD, $src, 1", IIC_LdStLWARX, []>, isDOT, + "lbarx $rD, $src, 1", IIC_LdStLWARX, []>, isRecordForm, Requires<[HasPartwordAtomics]>; def LHARXL : XForm_1_memOp<31, 116, (outs gprc:$rD), (ins memrr:$src), - "lharx $rD, $src, 1", IIC_LdStLWARX, []>, isDOT, + "lharx $rD, $src, 1", IIC_LdStLWARX, []>, isRecordForm, Requires<[HasPartwordAtomics]>; def LWARXL : XForm_1_memOp<31, 20, (outs gprc:$rD), (ins memrr:$src), - "lwarx $rD, $src, 1", IIC_LdStLWARX, []>, isDOT; + "lwarx $rD, $src, 1", IIC_LdStLWARX, []>, isRecordForm; // The atomic instructions use the destination register as well as the next one // or two registers in order (modulo 31). @@ -1935,14 +1935,14 @@ def LWAT : X_RD5_RS5_IM5<31, 582, (outs gprc:$rD), (ins gprc:$rA, u5imm:$FC), let Defs = [CR0], mayStore = 1, mayLoad = 0, hasSideEffects = 0 in { def STBCX : XForm_1_memOp<31, 694, (outs), (ins gprc:$rS, memrr:$dst), "stbcx. $rS, $dst", IIC_LdStSTWCX, []>, - isDOT, Requires<[HasPartwordAtomics]>; + isRecordForm, Requires<[HasPartwordAtomics]>; def STHCX : XForm_1_memOp<31, 726, (outs), (ins gprc:$rS, memrr:$dst), "sthcx. $rS, $dst", IIC_LdStSTWCX, []>, - isDOT, Requires<[HasPartwordAtomics]>; + isRecordForm, Requires<[HasPartwordAtomics]>; def STWCX : XForm_1_memOp<31, 150, (outs), (ins gprc:$rS, memrr:$dst), - "stwcx. $rS, $dst", IIC_LdStSTWCX, []>, isDOT; + "stwcx. $rS, $dst", IIC_LdStSTWCX, []>, isRecordForm; } let mayStore = 1, mayLoad = 0, hasSideEffects = 0 in @@ -2298,9 +2298,9 @@ def ADDIC : DForm_2<12, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm), [(set i32:$rD, (addc i32:$rA, imm32SExt16:$imm))]>, RecFormRel, PPC970_DGroup_Cracked; let Defs = [CARRY, CR0] in -def ADDICo : DForm_2<13, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm), +def ADDIC_rec : DForm_2<13, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm), "addic. $rD, $rA, $imm", IIC_IntGeneral, - []>, isDOT, RecFormRel; + []>, isRecordForm, RecFormRel; } def ADDIS : DForm_2<15, (outs gprc:$rD), (ins gprc_nor0:$rA, s17imm:$imm), "addis $rD, $rA, $imm", IIC_IntSimple, @@ -2330,14 +2330,14 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in { let PPC970_Unit = 1 in { // FXU Operations. let Defs = [CR0] in { -def ANDIo : DForm_4<28, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2), +def ANDI_rec : DForm_4<28, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2), "andi. $dst, $src1, $src2", IIC_IntGeneral, [(set i32:$dst, (and i32:$src1, immZExt16:$src2))]>, - isDOT; -def ANDISo : DForm_4<29, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2), + isRecordForm; +def ANDIS_rec : DForm_4<29, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2), "andis. $dst, $src1, $src2", IIC_IntGeneral, [(set i32:$dst, (and i32:$src1, imm16ShiftedZExt:$src2))]>, - isDOT; + isRecordForm; } def ORI : DForm_4<24, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2), "ori $dst, $src1, $src2", IIC_IntSimple, @@ -2811,8 +2811,8 @@ let Uses = [RM] in { PPC970_DGroup_Single, PPC970_Unit_FPU; let Defs = [CR1] in - def MFFSo : XForm_42<63, 583, (outs f8rc:$rT), (ins), - "mffs. $rT", IIC_IntMFFS, []>, isDOT; + def MFFS_rec : XForm_42<63, 583, (outs f8rc:$rT), (ins), + "mffs. $rT", IIC_IntMFFS, []>, isRecordForm; def MFFSCE : X_FRT5_XO2_XO3_XO10<63, 0, 1, 583, (outs f8rc:$rT), (ins), "mffsce $rT", IIC_IntMFFS, []>, @@ -3053,10 +3053,10 @@ def RLWINM : MForm_2<21, "rlwinm $rA, $rS, $SH, $MB, $ME", IIC_IntGeneral, []>, RecFormRel; let Defs = [CR0] in -def RLWINMo : MForm_2<21, +def RLWINM_rec : MForm_2<21, (outs gprc:$rA), (ins gprc:$rS, u5imm:$SH, u5imm:$MB, u5imm:$ME), "rlwinm. $rA, $rS, $SH, $MB, $ME", IIC_IntGeneral, - []>, isDOT, RecFormRel, PPC970_DGroup_Cracked; + []>, isRecordForm, RecFormRel, PPC970_DGroup_Cracked; } defm RLWNM : MForm_2r<23, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB, u5imm:$MB, u5imm:$ME), @@ -4084,24 +4084,24 @@ def : Pat<(v4i32 (selectcc i1:$lhs, i1:$rhs, v4i32:$tval, v4i32:$fval, SETUGT)), def : Pat<(v4i32 (selectcc i1:$lhs, i1:$rhs, v4i32:$tval, v4i32:$fval, SETNE)), (SELECT_VRRC (CRXOR $lhs, $rhs), $tval, $fval)>; -def ANDIo_1_EQ_BIT : PPCCustomInserterPseudo<(outs crbitrc:$dst), (ins gprc:$in), - "#ANDIo_1_EQ_BIT", +def ANDI_rec_1_EQ_BIT : PPCCustomInserterPseudo<(outs crbitrc:$dst), (ins gprc:$in), + "#ANDI_rec_1_EQ_BIT", [(set i1:$dst, (trunc (not i32:$in)))]>; -def ANDIo_1_GT_BIT : PPCCustomInserterPseudo<(outs crbitrc:$dst), (ins gprc:$in), - "#ANDIo_1_GT_BIT", +def ANDI_rec_1_GT_BIT : PPCCustomInserterPseudo<(outs crbitrc:$dst), (ins gprc:$in), + "#ANDI_rec_1_GT_BIT", [(set i1:$dst, (trunc i32:$in))]>; -def ANDIo_1_EQ_BIT8 : PPCCustomInserterPseudo<(outs crbitrc:$dst), (ins g8rc:$in), - "#ANDIo_1_EQ_BIT8", +def ANDI_rec_1_EQ_BIT8 : PPCCustomInserterPseudo<(outs crbitrc:$dst), (ins g8rc:$in), + "#ANDI_rec_1_EQ_BIT8", [(set i1:$dst, (trunc (not i64:$in)))]>; -def ANDIo_1_GT_BIT8 : PPCCustomInserterPseudo<(outs crbitrc:$dst), (ins g8rc:$in), - "#ANDIo_1_GT_BIT8", +def ANDI_rec_1_GT_BIT8 : PPCCustomInserterPseudo<(outs crbitrc:$dst), (ins g8rc:$in), + "#ANDI_rec_1_GT_BIT8", [(set i1:$dst, (trunc i64:$in))]>; def : Pat<(i1 (not (trunc i32:$in))), - (ANDIo_1_EQ_BIT $in)>; + (ANDI_rec_1_EQ_BIT $in)>; def : Pat<(i1 (not (trunc i64:$in))), - (ANDIo_1_EQ_BIT8 $in)>; + (ANDI_rec_1_EQ_BIT8 $in)>; //===----------------------------------------------------------------------===// // PowerPC Instructions used for assembler/disassembler only @@ -4185,22 +4185,22 @@ def MCRFS : XLForm_3<63, 64, (outs crrc:$BF), (ins crrc:$BFA), def MTFSFI : XLForm_4<63, 134, (outs crrc:$BF), (ins i32imm:$U, i32imm:$W), "mtfsfi $BF, $U, $W", IIC_IntMFFS>; -def MTFSFIo : XLForm_4<63, 134, (outs crrc:$BF), (ins i32imm:$U, i32imm:$W), - "mtfsfi. $BF, $U, $W", IIC_IntMFFS>, isDOT; +def MTFSFI_rec : XLForm_4<63, 134, (outs crrc:$BF), (ins i32imm:$U, i32imm:$W), + "mtfsfi. $BF, $U, $W", IIC_IntMFFS>, isRecordForm; def : InstAlias<"mtfsfi $BF, $U", (MTFSFI crrc:$BF, i32imm:$U, 0)>; -def : InstAlias<"mtfsfi. $BF, $U", (MTFSFIo crrc:$BF, i32imm:$U, 0)>; +def : InstAlias<"mtfsfi. $BF, $U", (MTFSFI_rec crrc:$BF, i32imm:$U, 0)>; let Predicates = [HasFPU] in { def MTFSF : XFLForm_1<63, 711, (outs), (ins i32imm:$FLM, f8rc:$FRB, i32imm:$L, i32imm:$W), "mtfsf $FLM, $FRB, $L, $W", IIC_IntMFFS, []>; -def MTFSFo : XFLForm_1<63, 711, (outs), +def MTFSF_rec : XFLForm_1<63, 711, (outs), (ins i32imm:$FLM, f8rc:$FRB, i32imm:$L, i32imm:$W), - "mtfsf. $FLM, $FRB, $L, $W", IIC_IntMFFS, []>, isDOT; + "mtfsf. $FLM, $FRB, $L, $W", IIC_IntMFFS, []>, isRecordForm; def : InstAlias<"mtfsf $FLM, $FRB", (MTFSF i32imm:$FLM, f8rc:$FRB, 0, 0)>; -def : InstAlias<"mtfsf. $FLM, $FRB", (MTFSFo i32imm:$FLM, f8rc:$FRB, 0, 0)>; +def : InstAlias<"mtfsf. $FLM, $FRB", (MTFSF_rec i32imm:$FLM, f8rc:$FRB, 0, 0)>; } def SLBIE : XForm_16b<31, 434, (outs), (ins gprc:$RB), @@ -4218,8 +4218,8 @@ def SLBMFEV : XLForm_1_gen<31, 851, (outs gprc:$RT), (ins gprc:$RB), def SLBIA : XForm_0<31, 498, (outs), (ins), "slbia", IIC_SprSLBIA, []>; let Defs = [CR0] in -def SLBFEEo : XForm_26<31, 979, (outs gprc:$RT), (ins gprc:$RB), - "slbfee. $RT, $RB", IIC_SprSLBFEE, []>, isDOT; +def SLBFEE_rec : XForm_26<31, 979, (outs gprc:$RT), (ins gprc:$RB), + "slbfee. $RT, $RB", IIC_SprSLBFEE, []>, isRecordForm; def TLBIA : XForm_0<31, 370, (outs), (ins), "tlbia", IIC_SprTLBIA, []>; @@ -4262,7 +4262,7 @@ def TLBSX2 : XForm_base_r3xo<31, 914, (outs), (ins gprc:$RST, gprc:$A, gprc:$B), def TLBSX2D : XForm_base_r3xo<31, 914, (outs), (ins gprc:$RST, gprc:$A, gprc:$B), "tlbsx. $RST, $A, $B", IIC_LdStLoad, []>, - Requires<[IsPPC4xx]>, isDOT; + Requires<[IsPPC4xx]>, isRecordForm; def RFID : XForm_0<19, 18, (outs), (ins), "rfid", IIC_IntRFID, []>; @@ -4480,10 +4480,10 @@ def : InstAlias<"mttbhi $Rx", (MTSPR 988, gprc:$Rx)>, Requires<[IsPPC4xx]>; def : InstAlias<"xnop", (XORI R0, R0, 0)>; def : InstAlias<"mr $rA, $rB", (OR8 g8rc:$rA, g8rc:$rB, g8rc:$rB)>; -def : InstAlias<"mr. $rA, $rB", (OR8o g8rc:$rA, g8rc:$rB, g8rc:$rB)>; +def : InstAlias<"mr. $rA, $rB", (OR8_rec g8rc:$rA, g8rc:$rB, g8rc:$rB)>; def : InstAlias<"not $rA, $rB", (NOR8 g8rc:$rA, g8rc:$rB, g8rc:$rB)>; -def : InstAlias<"not. $rA, $rB", (NOR8o g8rc:$rA, g8rc:$rB, g8rc:$rB)>; +def : InstAlias<"not. $rA, $rB", (NOR8_rec g8rc:$rA, g8rc:$rB, g8rc:$rB)>; def : InstAlias<"mtcr $rA", (MTCRF8 255, g8rc:$rA)>; @@ -4549,13 +4549,13 @@ def SUBIS : PPCAsmPseudo<"subis $rA, $rB, $imm", (ins gprc:$rA, gprc:$rB, s16imm:$imm)>; def SUBIC : PPCAsmPseudo<"subic $rA, $rB, $imm", (ins gprc:$rA, gprc:$rB, s16imm:$imm)>; -def SUBICo : PPCAsmPseudo<"subic. $rA, $rB, $imm", +def SUBIC_rec : PPCAsmPseudo<"subic. $rA, $rB, $imm", (ins gprc:$rA, gprc:$rB, s16imm:$imm)>; def : InstAlias<"sub $rA, $rB, $rC", (SUBF8 g8rc:$rA, g8rc:$rC, g8rc:$rB)>; -def : InstAlias<"sub. $rA, $rB, $rC", (SUBF8o g8rc:$rA, g8rc:$rC, g8rc:$rB)>; +def : InstAlias<"sub. $rA, $rB, $rC", (SUBF8_rec g8rc:$rA, g8rc:$rC, g8rc:$rB)>; def : InstAlias<"subc $rA, $rB, $rC", (SUBFC8 g8rc:$rA, g8rc:$rC, g8rc:$rB)>; -def : InstAlias<"subc. $rA, $rB, $rC", (SUBFC8o g8rc:$rA, g8rc:$rC, g8rc:$rB)>; +def : InstAlias<"subc. $rA, $rB, $rC", (SUBFC8_rec g8rc:$rA, g8rc:$rC, g8rc:$rB)>; def : InstAlias<"mtmsrd $RS", (MTMSRD gprc:$RS, 0)>; def : InstAlias<"mtmsr $RS", (MTMSR gprc:$RS, 0)>; @@ -4608,109 +4608,109 @@ def : InstAlias<"tlbwelo $RS, $A", (TLBWE2 gprc:$RS, gprc:$A, 1)>, def EXTLWI : PPCAsmPseudo<"extlwi $rA, $rS, $n, $b", (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>; -def EXTLWIo : PPCAsmPseudo<"extlwi. $rA, $rS, $n, $b", +def EXTLWI_rec : PPCAsmPseudo<"extlwi. $rA, $rS, $n, $b", (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>; def EXTRWI : PPCAsmPseudo<"extrwi $rA, $rS, $n, $b", (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>; -def EXTRWIo : PPCAsmPseudo<"extrwi. $rA, $rS, $n, $b", +def EXTRWI_rec : PPCAsmPseudo<"extrwi. $rA, $rS, $n, $b", (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>; def INSLWI : PPCAsmPseudo<"inslwi $rA, $rS, $n, $b", (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>; -def INSLWIo : PPCAsmPseudo<"inslwi. $rA, $rS, $n, $b", +def INSLWI_rec : PPCAsmPseudo<"inslwi. $rA, $rS, $n, $b", (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>; def INSRWI : PPCAsmPseudo<"insrwi $rA, $rS, $n, $b", (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>; -def INSRWIo : PPCAsmPseudo<"insrwi. $rA, $rS, $n, $b", +def INSRWI_rec : PPCAsmPseudo<"insrwi. $rA, $rS, $n, $b", (ins gprc:$rA, gprc:$rS, u5imm:$n, u5imm:$b)>; def ROTRWI : PPCAsmPseudo<"rotrwi $rA, $rS, $n", (ins gprc:$rA, gprc:$rS, u5imm:$n)>; -def ROTRWIo : PPCAsmPseudo<"rotrwi. $rA, $rS, $n", +def ROTRWI_rec : PPCAsmPseudo<"rotrwi. $rA, $rS, $n", (ins gprc:$rA, gprc:$rS, u5imm:$n)>; def SLWI : PPCAsmPseudo<"slwi $rA, $rS, $n", (ins gprc:$rA, gprc:$rS, u5imm:$n)>; -def SLWIo : PPCAsmPseudo<"slwi. $rA, $rS, $n", +def SLWI_rec : PPCAsmPseudo<"slwi. $rA, $rS, $n", (ins gprc:$rA, gprc:$rS, u5imm:$n)>; def SRWI : PPCAsmPseudo<"srwi $rA, $rS, $n", (ins gprc:$rA, gprc:$rS, u5imm:$n)>; -def SRWIo : PPCAsmPseudo<"srwi. $rA, $rS, $n", +def SRWI_rec : PPCAsmPseudo<"srwi. $rA, $rS, $n", (ins gprc:$rA, gprc:$rS, u5imm:$n)>; def CLRRWI : PPCAsmPseudo<"clrrwi $rA, $rS, $n", (ins gprc:$rA, gprc:$rS, u5imm:$n)>; -def CLRRWIo : PPCAsmPseudo<"clrrwi. $rA, $rS, $n", +def CLRRWI_rec : PPCAsmPseudo<"clrrwi. $rA, $rS, $n", (ins gprc:$rA, gprc:$rS, u5imm:$n)>; def CLRLSLWI : PPCAsmPseudo<"clrlslwi $rA, $rS, $b, $n", (ins gprc:$rA, gprc:$rS, u5imm:$b, u5imm:$n)>; -def CLRLSLWIo : PPCAsmPseudo<"clrlslwi. $rA, $rS, $b, $n", +def CLRLSLWI_rec : PPCAsmPseudo<"clrlslwi. $rA, $rS, $b, $n", (ins gprc:$rA, gprc:$rS, u5imm:$b, u5imm:$n)>; def : InstAlias<"rotlwi $rA, $rS, $n", (RLWINM gprc:$rA, gprc:$rS, u5imm:$n, 0, 31)>; -def : InstAlias<"rotlwi. $rA, $rS, $n", (RLWINMo gprc:$rA, gprc:$rS, u5imm:$n, 0, 31)>; +def : InstAlias<"rotlwi. $rA, $rS, $n", (RLWINM_rec gprc:$rA, gprc:$rS, u5imm:$n, 0, 31)>; def : InstAlias<"rotlw $rA, $rS, $rB", (RLWNM gprc:$rA, gprc:$rS, gprc:$rB, 0, 31)>; -def : InstAlias<"rotlw. $rA, $rS, $rB", (RLWNMo gprc:$rA, gprc:$rS, gprc:$rB, 0, 31)>; +def : InstAlias<"rotlw. $rA, $rS, $rB", (RLWNM_rec gprc:$rA, gprc:$rS, gprc:$rB, 0, 31)>; def : InstAlias<"clrlwi $rA, $rS, $n", (RLWINM gprc:$rA, gprc:$rS, 0, u5imm:$n, 31)>; -def : InstAlias<"clrlwi. $rA, $rS, $n", (RLWINMo gprc:$rA, gprc:$rS, 0, u5imm:$n, 31)>; +def : InstAlias<"clrlwi. $rA, $rS, $n", (RLWINM_rec gprc:$rA, gprc:$rS, 0, u5imm:$n, 31)>; def : InstAlias<"cntlzw $rA, $rS", (CNTLZW gprc:$rA, gprc:$rS)>; -def : InstAlias<"cntlzw. $rA, $rS", (CNTLZWo gprc:$rA, gprc:$rS)>; +def : InstAlias<"cntlzw. $rA, $rS", (CNTLZW_rec gprc:$rA, gprc:$rS)>; // The POWER variant def : MnemonicAlias<"cntlz", "cntlzw">; def : MnemonicAlias<"cntlz.", "cntlzw.">; def EXTLDI : PPCAsmPseudo<"extldi $rA, $rS, $n, $b", (ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>; -def EXTLDIo : PPCAsmPseudo<"extldi. $rA, $rS, $n, $b", +def EXTLDI_rec : PPCAsmPseudo<"extldi. $rA, $rS, $n, $b", (ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>; def EXTRDI : PPCAsmPseudo<"extrdi $rA, $rS, $n, $b", (ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>; -def EXTRDIo : PPCAsmPseudo<"extrdi. $rA, $rS, $n, $b", +def EXTRDI_rec : PPCAsmPseudo<"extrdi. $rA, $rS, $n, $b", (ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>; def INSRDI : PPCAsmPseudo<"insrdi $rA, $rS, $n, $b", (ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>; -def INSRDIo : PPCAsmPseudo<"insrdi. $rA, $rS, $n, $b", +def INSRDI_rec : PPCAsmPseudo<"insrdi. $rA, $rS, $n, $b", (ins g8rc:$rA, g8rc:$rS, u6imm:$n, u6imm:$b)>; def ROTRDI : PPCAsmPseudo<"rotrdi $rA, $rS, $n", (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>; -def ROTRDIo : PPCAsmPseudo<"rotrdi. $rA, $rS, $n", +def ROTRDI_rec : PPCAsmPseudo<"rotrdi. $rA, $rS, $n", (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>; def SLDI : PPCAsmPseudo<"sldi $rA, $rS, $n", (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>; -def SLDIo : PPCAsmPseudo<"sldi. $rA, $rS, $n", +def SLDI_rec : PPCAsmPseudo<"sldi. $rA, $rS, $n", (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>; def SRDI : PPCAsmPseudo<"srdi $rA, $rS, $n", (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>; -def SRDIo : PPCAsmPseudo<"srdi. $rA, $rS, $n", +def SRDI_rec : PPCAsmPseudo<"srdi. $rA, $rS, $n", (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>; def CLRRDI : PPCAsmPseudo<"clrrdi $rA, $rS, $n", (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>; -def CLRRDIo : PPCAsmPseudo<"clrrdi. $rA, $rS, $n", +def CLRRDI_rec : PPCAsmPseudo<"clrrdi. $rA, $rS, $n", (ins g8rc:$rA, g8rc:$rS, u6imm:$n)>; def CLRLSLDI : PPCAsmPseudo<"clrlsldi $rA, $rS, $b, $n", (ins g8rc:$rA, g8rc:$rS, u6imm:$b, u6imm:$n)>; -def CLRLSLDIo : PPCAsmPseudo<"clrlsldi. $rA, $rS, $b, $n", +def CLRLSLDI_rec : PPCAsmPseudo<"clrlsldi. $rA, $rS, $b, $n", (ins g8rc:$rA, g8rc:$rS, u6imm:$b, u6imm:$n)>; def SUBPCIS : PPCAsmPseudo<"subpcis $RT, $D", (ins g8rc:$RT, s16imm:$D)>; def : InstAlias<"rotldi $rA, $rS, $n", (RLDICL g8rc:$rA, g8rc:$rS, u6imm:$n, 0)>; -def : InstAlias<"rotldi. $rA, $rS, $n", (RLDICLo g8rc:$rA, g8rc:$rS, u6imm:$n, 0)>; +def : InstAlias<"rotldi. $rA, $rS, $n", (RLDICL_rec g8rc:$rA, g8rc:$rS, u6imm:$n, 0)>; def : InstAlias<"rotld $rA, $rS, $rB", (RLDCL g8rc:$rA, g8rc:$rS, gprc:$rB, 0)>; -def : InstAlias<"rotld. $rA, $rS, $rB", (RLDCLo g8rc:$rA, g8rc:$rS, gprc:$rB, 0)>; +def : InstAlias<"rotld. $rA, $rS, $rB", (RLDCL_rec g8rc:$rA, g8rc:$rS, gprc:$rB, 0)>; def : InstAlias<"clrldi $rA, $rS, $n", (RLDICL g8rc:$rA, g8rc:$rS, 0, u6imm:$n)>; def : InstAlias<"clrldi $rA, $rS, $n", (RLDICL_32_64 g8rc:$rA, gprc:$rS, 0, u6imm:$n)>; -def : InstAlias<"clrldi. $rA, $rS, $n", (RLDICLo g8rc:$rA, g8rc:$rS, 0, u6imm:$n)>; +def : InstAlias<"clrldi. $rA, $rS, $n", (RLDICL_rec g8rc:$rA, g8rc:$rS, 0, u6imm:$n)>; def : InstAlias<"lnia $RT", (ADDPCIS g8rc:$RT, 0)>; def RLWINMbm : PPCAsmPseudo<"rlwinm $rA, $rS, $n, $b", (ins g8rc:$rA, g8rc:$rS, u5imm:$n, i32imm:$b)>; -def RLWINMobm : PPCAsmPseudo<"rlwinm. $rA, $rS, $n, $b", +def RLWINMbm_rec : PPCAsmPseudo<"rlwinm. $rA, $rS, $n, $b", (ins g8rc:$rA, g8rc:$rS, u5imm:$n, i32imm:$b)>; def RLWIMIbm : PPCAsmPseudo<"rlwimi $rA, $rS, $n, $b", (ins g8rc:$rA, g8rc:$rS, u5imm:$n, i32imm:$b)>; -def RLWIMIobm : PPCAsmPseudo<"rlwimi. $rA, $rS, $n, $b", +def RLWIMIbm_rec : PPCAsmPseudo<"rlwimi. $rA, $rS, $n, $b", (ins g8rc:$rA, g8rc:$rS, u5imm:$n, i32imm:$b)>; def RLWNMbm : PPCAsmPseudo<"rlwnm $rA, $rS, $n, $b", (ins g8rc:$rA, g8rc:$rS, u5imm:$n, i32imm:$b)>; -def RLWNMobm : PPCAsmPseudo<"rlwnm. $rA, $rS, $n, $b", +def RLWNMbm_rec : PPCAsmPseudo<"rlwnm. $rA, $rS, $n, $b", (ins g8rc:$rA, g8rc:$rS, u5imm:$n, i32imm:$b)>; // These generic branch instruction forms are used for the assembler parser only. @@ -4939,7 +4939,7 @@ let mayStore = 1 in def CP_PASTE : X_L1_RA5_RB5<31, 902, "paste" , gprc, IIC_LdStPASTE, []>; let mayStore = 1, Defs = [CR0] in -def CP_PASTEo : X_L1_RA5_RB5<31, 902, "paste.", gprc, IIC_LdStPASTE, []>, isDOT; +def CP_PASTE_rec : X_L1_RA5_RB5<31, 902, "paste.", gprc, IIC_LdStPASTE, []>, isRecordForm; def CP_COPYx : PPCAsmPseudo<"copy $rA, $rB" , (ins gprc:$rA, gprc:$rB)>; def CP_PASTEx : PPCAsmPseudo<"paste $rA, $rB", (ins gprc:$rA, gprc:$rB)>; diff --git a/llvm/lib/Target/PowerPC/PPCInstrVSX.td b/llvm/lib/Target/PowerPC/PPCInstrVSX.td index e30a2ed020f0..be6b30ffa08b 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrVSX.td +++ b/llvm/lib/Target/PowerPC/PPCInstrVSX.td @@ -120,11 +120,11 @@ multiclass XX3Form_Rcr opcode, bits<7> xo, string asmbase, !strconcat(asmbase, !strconcat(" ", asmstr)), itin, [(set OutTy:$XT, (Int InTy:$XA, InTy:$XB))]>; let Defs = [CR6] in - def o : XX3Form_Rc, - isDOT; + isRecordForm; } } @@ -1961,7 +1961,7 @@ def VectorExtractions { - The order of elements after the move to GPR is reversed, so we invert the bits of the index prior to truncating to the range 0-7 */ - dag BE_VBYTE_PERM_VEC = (v16i8 (LVSL ZERO8, (ANDI8o $Idx, 8))); + dag BE_VBYTE_PERM_VEC = (v16i8 (LVSL ZERO8, (ANDI8_rec $Idx, 8))); dag BE_VBYTE_PERMUTE = (v16i8 (VPERM $S, $S, BE_VBYTE_PERM_VEC)); dag BE_MV_VBYTE = (MFVSRD (EXTRACT_SUBREG @@ -1980,7 +1980,7 @@ def VectorExtractions { the bits of the index prior to truncating to the range 0-3 */ dag BE_VHALF_PERM_VEC = (v16i8 (LVSL ZERO8, - (RLDICR (ANDI8o $Idx, 4), 1, 62))); + (RLDICR (ANDI8_rec $Idx, 4), 1, 62))); dag BE_VHALF_PERMUTE = (v16i8 (VPERM $S, $S, BE_VHALF_PERM_VEC)); dag BE_MV_VHALF = (MFVSRD (EXTRACT_SUBREG @@ -1998,7 +1998,7 @@ def VectorExtractions { the bits of the index prior to truncating to the range 0-1 */ dag BE_VWORD_PERM_VEC = (v16i8 (LVSL ZERO8, - (RLDICR (ANDI8o $Idx, 2), 2, 61))); + (RLDICR (ANDI8_rec $Idx, 2), 2, 61))); dag BE_VWORD_PERMUTE = (v16i8 (VPERM $S, $S, BE_VWORD_PERM_VEC)); dag BE_MV_VWORD = (MFVSRD (EXTRACT_SUBREG @@ -2014,7 +2014,7 @@ def VectorExtractions { element indices. */ dag BE_VDWORD_PERM_VEC = (v16i8 (LVSL ZERO8, - (RLDICR (ANDI8o $Idx, 1), 3, 60))); + (RLDICR (ANDI8_rec $Idx, 1), 3, 60))); dag BE_VDWORD_PERMUTE = (v16i8 (VPERM $S, $S, BE_VDWORD_PERM_VEC)); dag BE_VARIABLE_DWORD = (MFVSRD (EXTRACT_SUBREG @@ -2588,7 +2588,7 @@ let AddedComplexity = 400, Predicates = [HasP9Vector] in { // [PO VRT XO VRB XO RO], Round to Odd version of [PO VRT XO VRB XO /] class X_VT5_XO5_VB5_Ro opcode, bits<5> xo2, bits<10> xo, string opc, list pattern> - : X_VT5_XO5_VB5, isDOT; + : X_VT5_XO5_VB5, isRecordForm; // [PO VRT XO VRB XO /], but the VRB is only used the left 64 bits (or less), // So we use different operand class for VRB @@ -2606,7 +2606,7 @@ let AddedComplexity = 400, Predicates = [HasP9Vector] in { // [PO VRT XO VRB XO RO], Round to Odd version of [PO VRT XO VRB XO /] class X_VT5_XO5_VB5_VSFR_Ro opcode, bits<5> xo2, bits<10> xo, string opc, list pattern> - : X_VT5_XO5_VB5_VSFR, isDOT; + : X_VT5_XO5_VB5_VSFR, isRecordForm; // [PO T XO B XO BX /] class XX2_RT5_XO5_XB6 opcode, bits<5> xo2, bits<9> xo, string opc, @@ -2636,7 +2636,7 @@ let AddedComplexity = 400, Predicates = [HasP9Vector] in { // [PO VRT VRA VRB XO RO], Round to Odd version of [PO VRT VRA VRB XO /] class X_VT5_VA5_VB5_Ro opcode, bits<10> xo, string opc, list pattern> - : X_VT5_VA5_VB5, isDOT; + : X_VT5_VA5_VB5, isRecordForm; // [PO VRT VRA VRB XO /] class X_VT5_VA5_VB5_FMA opcode, bits<10> xo, string opc, @@ -2648,7 +2648,7 @@ let AddedComplexity = 400, Predicates = [HasP9Vector] in { // [PO VRT VRA VRB XO RO], Round to Odd version of [PO VRT VRA VRB XO /] class X_VT5_VA5_VB5_FMA_Ro opcode, bits<10> xo, string opc, list pattern> - : X_VT5_VA5_VB5_FMA, isDOT; + : X_VT5_VA5_VB5_FMA, isRecordForm; //===--------------------------------------------------------------------===// // Quad-Precision Scalar Move Instructions: diff --git a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp index 02a5cdcbcae6..1b67e1e55bf7 100644 --- a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp +++ b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp @@ -162,33 +162,33 @@ static MachineInstr *getVRegDefOrNull(MachineOperand *Op, static unsigned getKnownLeadingZeroCount(MachineInstr *MI, const PPCInstrInfo *TII) { unsigned Opcode = MI->getOpcode(); - if (Opcode == PPC::RLDICL || Opcode == PPC::RLDICLo || - Opcode == PPC::RLDCL || Opcode == PPC::RLDCLo) + if (Opcode == PPC::RLDICL || Opcode == PPC::RLDICL_rec || + Opcode == PPC::RLDCL || Opcode == PPC::RLDCL_rec) return MI->getOperand(3).getImm(); - if ((Opcode == PPC::RLDIC || Opcode == PPC::RLDICo) && - MI->getOperand(3).getImm() <= 63 - MI->getOperand(2).getImm()) + if ((Opcode == PPC::RLDIC || Opcode == PPC::RLDIC_rec) && + MI->getOperand(3).getImm() <= 63 - MI->getOperand(2).getImm()) return MI->getOperand(3).getImm(); - if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINMo || - Opcode == PPC::RLWNM || Opcode == PPC::RLWNMo || + if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec || + Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec || Opcode == PPC::RLWINM8 || Opcode == PPC::RLWNM8) && - MI->getOperand(3).getImm() <= MI->getOperand(4).getImm()) + MI->getOperand(3).getImm() <= MI->getOperand(4).getImm()) return 32 + MI->getOperand(3).getImm(); - if (Opcode == PPC::ANDIo) { + if (Opcode == PPC::ANDI_rec) { uint16_t Imm = MI->getOperand(2).getImm(); return 48 + countLeadingZeros(Imm); } - if (Opcode == PPC::CNTLZW || Opcode == PPC::CNTLZWo || - Opcode == PPC::CNTTZW || Opcode == PPC::CNTTZWo || + if (Opcode == PPC::CNTLZW || Opcode == PPC::CNTLZW_rec || + Opcode == PPC::CNTTZW || Opcode == PPC::CNTTZW_rec || Opcode == PPC::CNTLZW8 || Opcode == PPC::CNTTZW8) // The result ranges from 0 to 32. return 58; - if (Opcode == PPC::CNTLZD || Opcode == PPC::CNTLZDo || - Opcode == PPC::CNTTZD || Opcode == PPC::CNTTZDo) + if (Opcode == PPC::CNTLZD || Opcode == PPC::CNTLZD_rec || + Opcode == PPC::CNTTZD || Opcode == PPC::CNTTZD_rec) // The result ranges from 0 to 64. return 57; @@ -821,18 +821,18 @@ bool PPCMIPeephole::simplifyCode(void) { break; } case PPC::RLWINM: - case PPC::RLWINMo: + case PPC::RLWINM_rec: case PPC::RLWINM8: - case PPC::RLWINM8o: { + case PPC::RLWINM8_rec: { unsigned FoldingReg = MI.getOperand(1).getReg(); if (!Register::isVirtualRegister(FoldingReg)) break; MachineInstr *SrcMI = MRI->getVRegDef(FoldingReg); if (SrcMI->getOpcode() != PPC::RLWINM && - SrcMI->getOpcode() != PPC::RLWINMo && + SrcMI->getOpcode() != PPC::RLWINM_rec && SrcMI->getOpcode() != PPC::RLWINM8 && - SrcMI->getOpcode() != PPC::RLWINM8o) + SrcMI->getOpcode() != PPC::RLWINM8_rec) break; assert((MI.getOperand(2).isImm() && MI.getOperand(3).isImm() && MI.getOperand(4).isImm() && SrcMI->getOperand(2).isImm() && @@ -895,7 +895,7 @@ bool PPCMIPeephole::simplifyCode(void) { // If final mask is 0, MI result should be 0 too. if (FinalMask.isNullValue()) { bool Is64Bit = (MI.getOpcode() == PPC::RLWINM8 || - MI.getOpcode() == PPC::RLWINM8o); + MI.getOpcode() == PPC::RLWINM8_rec); LLVM_DEBUG(dbgs() << "Replace Instr: "); LLVM_DEBUG(MI.dump()); @@ -908,11 +908,11 @@ bool PPCMIPeephole::simplifyCode(void) { MI.getOperand(1).ChangeToImmediate(0); MI.setDesc(TII->get(Is64Bit ? PPC::LI8 : PPC::LI)); } else { - // Replace MI with "ANDIo reg, 0" + // Replace MI with "ANDI_rec reg, 0" MI.RemoveOperand(4); MI.RemoveOperand(3); MI.getOperand(2).setImm(0); - MI.setDesc(TII->get(Is64Bit ? PPC::ANDI8o : PPC::ANDIo)); + MI.setDesc(TII->get(Is64Bit ? PPC::ANDI8_rec : PPC::ANDI_rec)); } Simplified = true; NumRotatesCollapsed++; @@ -925,8 +925,8 @@ bool PPCMIPeephole::simplifyCode(void) { // than NewME. Otherwise we get a 64 bit value after folding, but MI // return a 32 bit value. - // If FoldingReg has only one use and it it not RLWINMo and - // RLWINM8o, safe to delete its def SrcMI. Otherwise keep it. + // If FoldingReg has only one use and it it not RLWINM_rec and + // RLWINM8_rec, safe to delete its def SrcMI. Otherwise keep it. if (MRI->hasOneNonDBGUse(FoldingReg) && (SrcMI->getOpcode() == PPC::RLWINM || SrcMI->getOpcode() == PPC::RLWINM8)) { diff --git a/llvm/test/CodeGen/PowerPC/block-placement.mir b/llvm/test/CodeGen/PowerPC/block-placement.mir index 34c4f1c7b695..cb6ceb4066f7 100644 --- a/llvm/test/CodeGen/PowerPC/block-placement.mir +++ b/llvm/test/CodeGen/PowerPC/block-placement.mir @@ -149,7 +149,7 @@ body: | successors: %bb.4(0x04000000), %bb.10(0x7c000000) liveins: $r8, $x3, $x4, $x5, $x6, $x7 - dead renamable $r8 = ANDIo killed renamable $r8, 65535, implicit-def $cr0 + dead renamable $r8 = ANDI_rec killed renamable $r8, 65535, implicit-def $cr0 BCC 68, killed renamable $cr0, %bb.10 bb.4: diff --git a/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-out-of-range.mir b/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-out-of-range.mir index 2e1bdc32e078..e14bc1f6ff10 100644 --- a/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-out-of-range.mir +++ b/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-out-of-range.mir @@ -24,7 +24,7 @@ } ; Function Attrs: norecurse nounwind readnone - define zeroext i32 @testRLWNMo(i32 zeroext %a, i32 zeroext %b) local_unnamed_addr #0 { + define zeroext i32 @testRLWNM_rec(i32 zeroext %a, i32 zeroext %b) local_unnamed_addr #0 { entry: %and = and i32 %a, 255 %tobool = icmp eq i32 %and, 0 @@ -33,7 +33,7 @@ } ; Function Attrs: norecurse nounwind readnone - define i64 @testRLWNM8o(i64 %a, i64 %b) local_unnamed_addr #0 { + define i64 @testRLWNM8_rec(i64 %a, i64 %b) local_unnamed_addr #0 { entry: %a.tr = trunc i64 %a to i32 %0 = shl i32 %a.tr, 4 @@ -52,7 +52,7 @@ } ; Function Attrs: norecurse nounwind readnone - define zeroext i32 @testSLWo(i32 zeroext %a, i32 zeroext %b) local_unnamed_addr #0 { + define zeroext i32 @testSLW_rec(i32 zeroext %a, i32 zeroext %b) local_unnamed_addr #0 { entry: %shl = shl i32 %a, %b %tobool = icmp eq i32 %shl, 0 @@ -68,7 +68,7 @@ } ; Function Attrs: norecurse nounwind readnone - define zeroext i32 @testSRWo(i32 zeroext %a, i32 zeroext %b) local_unnamed_addr #0 { + define zeroext i32 @testSRW_rec(i32 zeroext %a, i32 zeroext %b) local_unnamed_addr #0 { entry: %shr = lshr i32 %a, %b %tobool = icmp eq i32 %shr, 0 @@ -84,7 +84,7 @@ } ; Function Attrs: norecurse nounwind readnone - define signext i32 @testSRAWo(i32 signext %a, i32 signext %b) local_unnamed_addr #0 { + define signext i32 @testSRAW_rec(i32 signext %a, i32 signext %b) local_unnamed_addr #0 { entry: %shr = ashr i32 %a, %b %tobool = icmp eq i32 %shr, 0 @@ -104,7 +104,7 @@ } ; Function Attrs: norecurse nounwind readnone - define i64 @testRLDCLo(i64 %a, i64 %b) local_unnamed_addr #0 { + define i64 @testRLDCL_rec(i64 %a, i64 %b) local_unnamed_addr #0 { entry: %and = and i64 %b, 63 %shl = shl i64 %a, %and @@ -128,7 +128,7 @@ } ; Function Attrs: norecurse nounwind readnone - define i64 @testRLDCRo(i64 %a, i64 %b) local_unnamed_addr #0 { + define i64 @testRLDCR_rec(i64 %a, i64 %b) local_unnamed_addr #0 { entry: %and = and i64 %b, 63 %shl = shl i64 %a, %and @@ -147,7 +147,7 @@ } ; Function Attrs: norecurse nounwind readnone - define i64 @testSLDo(i64 %a, i64 %b) local_unnamed_addr #0 { + define i64 @testSLD_rec(i64 %a, i64 %b) local_unnamed_addr #0 { entry: %shl = shl i64 %a, %b %tobool = icmp eq i64 %shl, 0 @@ -163,7 +163,7 @@ } ; Function Attrs: norecurse nounwind readnone - define i64 @testSRDo(i64 %a, i64 %b) local_unnamed_addr #0 { + define i64 @testSRD_rec(i64 %a, i64 %b) local_unnamed_addr #0 { entry: %shr = lshr i64 %a, %b %tobool = icmp eq i64 %shr, 0 @@ -179,7 +179,7 @@ } ; Function Attrs: norecurse nounwind readnone - define i64 @testSRADo(i64 %a, i64 %b) local_unnamed_addr #0 { + define i64 @testSRAD_rec(i64 %a, i64 %b) local_unnamed_addr #0 { entry: %shr = ashr i64 %a, %b %tobool = icmp eq i64 %shr, 0 @@ -311,8 +311,8 @@ body: | ... --- -name: testRLWNMo -# CHECK-ALL: name: testRLWNMo +name: testRLWNM_rec +# CHECK-ALL: name: testRLWNM_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -361,8 +361,8 @@ body: | %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = LI -22 - %4 = RLWNMo %2, %3, 24, 31, implicit-def $cr0 - ; CHECK: RLWINMo %2, 10, 24, 31, implicit-def $cr0 + %4 = RLWNM_rec %2, %3, 24, 31, implicit-def $cr0 + ; CHECK: RLWINM_rec %2, 10, 24, 31, implicit-def $cr0 ; CHECK-LATE: li 3, -22 ; CHECK-LATE: rlwinm. 5, 4, 10, 24, 31 %5 = COPY killed $cr0 @@ -375,8 +375,8 @@ body: | ... --- -name: testRLWNM8o -# CHECK-ALL: name: testRLWNM8o +name: testRLWNM8_rec +# CHECK-ALL: name: testRLWNM8_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -423,8 +423,8 @@ body: | %1 = COPY $x4 %0 = COPY $x3 %2 = LI8 -18 - %3 = RLWNM8o %1, %2, 20, 27, implicit-def $cr0 - ; CHECK: RLWINM8o %1, 14, 20, 27, implicit-def $cr0 + %3 = RLWNM8_rec %1, %2, 20, 27, implicit-def $cr0 + ; CHECK: RLWINM8_rec %1, 14, 20, 27, implicit-def $cr0 ; CHECK-LATE: rlwinm. 3, 4, 14, 20, 27 %7 = COPY killed $cr0 %6 = RLDICL killed %3, 0, 32 @@ -491,8 +491,8 @@ body: | ... --- -name: testSLWo -# CHECK-ALL: name: testSLWo +name: testSLW_rec +# CHECK-ALL: name: testSLW_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -541,8 +541,8 @@ body: | %0 = COPY $x3 %2 = LI 35 %3 = COPY %0.sub_32 - %4 = SLWo %3, %2, implicit-def $cr0 - ; CHECK: ANDIo %3, 0, implicit-def $cr0 + %4 = SLW_rec %3, %2, implicit-def $cr0 + ; CHECK: ANDI_rec %3, 0, implicit-def $cr0 ; CHECK-LATE: andi. 5, 3, 0 %5 = COPY killed $cr0 %6 = ISEL %2, %3, %5.sub_eq @@ -611,8 +611,8 @@ body: | ... --- -name: testSRWo -# CHECK-ALL: name: testSRWo +name: testSRW_rec +# CHECK-ALL: name: testSRW_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -661,8 +661,8 @@ body: | %0 = COPY $x3 %2 = LI -7 %3 = COPY %0.sub_32 - %4 = SRWo %3, %2, implicit-def $cr0 - ; CHECK: ANDIo %3, 0, implicit-def $cr0 + %4 = SRW_rec %3, %2, implicit-def $cr0 + ; CHECK: ANDI_rec %3, 0, implicit-def $cr0 ; CHECK-LATE: andi. 5, 3, 0 %5 = COPY killed $cr0 %6 = ISEL %2, %3, %5.sub_eq @@ -730,8 +730,8 @@ body: | ... --- -name: testSRAWo -# CHECK-ALL: name: testSRAWo +name: testSRAW_rec +# CHECK-ALL: name: testSRAW_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -778,8 +778,8 @@ body: | %0 = COPY $x3 %2 = LI 80 %3 = COPY %0.sub_32 - %4 = SRAWo killed %3, %2, implicit-def dead $carry, implicit-def $cr0 - ; CHECK: SRAWo killed %3, %2, implicit-def dead $carry, implicit-def $cr0 + %4 = SRAW_rec killed %3, %2, implicit-def dead $carry, implicit-def $cr0 + ; CHECK: SRAW_rec killed %3, %2, implicit-def dead $carry, implicit-def $cr0 ; CHECK-LATE: sraw. 3, 3, 4 %5 = COPY killed $cr0 %6 = ISEL %2, %4, %5.sub_eq @@ -842,8 +842,8 @@ body: | ... --- -name: testRLDCLo -# CHECK-ALL: name: testRLDCLo +name: testRLDCL_rec +# CHECK-ALL: name: testRLDCL_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -889,8 +889,8 @@ body: | %0 = COPY $x3 %2 = RLDICL %1, 0, 58 %3 = LI -37 - %4 = RLDCLo %0, killed %3, 0, implicit-def $cr0 - ; CHECK: RLDICLo %0, 27, 0, implicit-def $cr0 + %4 = RLDCL_rec %0, killed %3, 0, implicit-def $cr0 + ; CHECK: RLDICL_rec %0, 27, 0, implicit-def $cr0 ; CHECK-LATE: rldicl. 5, 3, 27, 0 %5 = COPY killed $cr0 %6 = ISEL8 %2, %0, %5.sub_eq @@ -952,8 +952,8 @@ body: | ... --- -name: testRLDCRo -# CHECK-ALL: name: testRLDCRo +name: testRLDCR_rec +# CHECK-ALL: name: testRLDCR_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -999,8 +999,8 @@ body: | %0 = COPY $x3 %2 = RLDICL %1, 0, 58 %3 = LI -18 - %4 = RLDCRo %0, killed %3, 0, implicit-def $cr0 - ; CHECK: RLDICRo %0, 46, 0, implicit-def $cr0 + %4 = RLDCR_rec %0, killed %3, 0, implicit-def $cr0 + ; CHECK: RLDICR_rec %0, 46, 0, implicit-def $cr0 ; CHECK-LATE: rldicr. 5, 3, 46, 0 %5 = COPY killed $cr0 %6 = ISEL8 %2, %0, %5.sub_eq @@ -1060,8 +1060,8 @@ body: | ... --- -name: testSLDo -# CHECK-ALL: name: testSLDo +name: testSLD_rec +# CHECK-ALL: name: testSLD_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -1105,8 +1105,8 @@ body: | %1 = COPY $x4 %0 = COPY $x3 %2 = LI 88 - %3 = SLDo %0, killed %2, implicit-def $cr0 - ; CHECK: ANDI8o %0, 0, implicit-def $cr0 + %3 = SLD_rec %0, killed %2, implicit-def $cr0 + ; CHECK: ANDI8_rec %0, 0, implicit-def $cr0 ; CHECK-LATE: andi. 5, 3, 0 %4 = COPY killed $cr0 %5 = ISEL8 %1, %0, %4.sub_eq @@ -1166,8 +1166,8 @@ body: | ... --- -name: testSRDo -# CHECK-ALL: name: testSRDo +name: testSRD_rec +# CHECK-ALL: name: testSRD_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -1211,8 +1211,8 @@ body: | %1 = COPY $x4 %0 = COPY $x3 %2 = LI 64 - %3 = SRDo %0, killed %2, implicit-def $cr0 - ; CHECK: ANDI8o %0, 0, implicit-def $cr0 + %3 = SRD_rec %0, killed %2, implicit-def $cr0 + ; CHECK: ANDI8_rec %0, 0, implicit-def $cr0 ; CHECK-LATE: andi. 5, 3, 0 %4 = COPY killed $cr0 %5 = ISEL8 %1, %0, %4.sub_eq @@ -1272,8 +1272,8 @@ body: | ... --- -name: testSRADo -# CHECK-ALL: name: testSRADo +name: testSRAD_rec +# CHECK-ALL: name: testSRAD_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -1317,8 +1317,8 @@ body: | %1 = COPY $x4 %0 = COPY $x3 %2 = LI 68 - %3 = SRADo %0, killed %2, implicit-def dead $carry, implicit-def $cr0 - ; CHECK: SRADo %0, killed %2, implicit-def dead $carry, implicit-def $cr0 + %3 = SRAD_rec %0, killed %2, implicit-def dead $carry, implicit-def $cr0 + ; CHECK: SRAD_rec %0, killed %2, implicit-def dead $carry, implicit-def $cr0 ; CHECK-LATE: srad. 3, 3, 5 %4 = COPY killed $cr0 %5 = ISEL8 %1, %3, %4.sub_eq diff --git a/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir b/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir index 3504f1d229a1..df5b040b5dcd 100644 --- a/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir +++ b/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir @@ -38,7 +38,7 @@ } ; Function Attrs: norecurse nounwind readnone - define i64 @testADDCo(i64 %a, i64 %b) local_unnamed_addr #0 { + define i64 @testADDC_rec(i64 %a, i64 %b) local_unnamed_addr #0 { entry: %add = add nsw i64 %b, %a %cmp = icmp eq i64 %add, 0 @@ -62,7 +62,7 @@ } ; Function Attrs: norecurse nounwind readnone - define signext i32 @testANDo(i64 %a, i64 %b) local_unnamed_addr #0 { + define signext i32 @testAND_rec(i64 %a, i64 %b) local_unnamed_addr #0 { entry: %and = and i64 %b, %a %tobool = icmp eq i64 %and, 0 @@ -72,7 +72,7 @@ } ; Function Attrs: norecurse nounwind readnone - define i64 @testAND8o(i64 %a, i64 %b) local_unnamed_addr #0 { + define i64 @testAND8_rec(i64 %a, i64 %b) local_unnamed_addr #0 { entry: %and = and i64 %b, %a %tobool = icmp eq i64 %and, 0 @@ -506,7 +506,7 @@ } ; Function Attrs: norecurse nounwind readnone - define i64 @testRLDCLo(i64 %a, i64 %b) local_unnamed_addr #0 { + define i64 @testRLDCL_rec(i64 %a, i64 %b) local_unnamed_addr #0 { entry: %and = and i64 %b, 63 %shl = shl i64 %a, %and @@ -530,7 +530,7 @@ } ; Function Attrs: norecurse nounwind readnone - define i64 @testRLDCRo(i64 %a, i64 %b) local_unnamed_addr #0 { + define i64 @testRLDCR_rec(i64 %a, i64 %b) local_unnamed_addr #0 { entry: %and = and i64 %b, 63 %shl = shl i64 %a, %and @@ -551,7 +551,7 @@ } ; Function Attrs: norecurse nounwind readnone - define i64 @testRLDICLo(i64 %a, i64 %b) local_unnamed_addr #0 { + define i64 @testRLDICL_rec(i64 %a, i64 %b) local_unnamed_addr #0 { entry: %shr = lshr i64 %a, 11 %and = and i64 %shr, 16777215 @@ -561,7 +561,7 @@ } ; Function Attrs: norecurse nounwind readnone - define i64 @testRLDICLo2(i64 %a, i64 %b) local_unnamed_addr #0 { + define i64 @testRLDICL_rec2(i64 %a, i64 %b) local_unnamed_addr #0 { entry: %shr = lshr i64 %a, 11 %and = and i64 %shr, 16777215 @@ -571,7 +571,7 @@ } ; Function Attrs: norecurse nounwind readnone - define i64 @testRLDICLo3(i64 %a, i64 %b) local_unnamed_addr #0 { + define i64 @testRLDICL_rec3(i64 %a, i64 %b) local_unnamed_addr #0 { entry: %shr = lshr i64 %a, 11 %and = and i64 %shr, 16777215 @@ -613,7 +613,7 @@ } ; Function Attrs: norecurse nounwind readnone - define zeroext i32 @testRLWINMo(i32 zeroext %a, i32 zeroext %b) local_unnamed_addr #0 { + define zeroext i32 @testRLWINM_rec(i32 zeroext %a, i32 zeroext %b) local_unnamed_addr #0 { entry: %and = and i32 %a, 255 %tobool = icmp eq i32 %and, 0 @@ -622,7 +622,7 @@ } ; Function Attrs: norecurse nounwind readnone - define zeroext i32 @testRLWINMo2(i32 zeroext %a, i32 zeroext %b) local_unnamed_addr #0 { + define zeroext i32 @testRLWINM_rec2(i32 zeroext %a, i32 zeroext %b) local_unnamed_addr #0 { entry: %and = and i32 %a, 255 %tobool = icmp eq i32 %and, 0 @@ -631,7 +631,7 @@ } ; Function Attrs: norecurse nounwind readnone - define i64 @testRLWINM8o(i64 %a, i64 %b) local_unnamed_addr #0 { + define i64 @testRLWINM8_rec(i64 %a, i64 %b) local_unnamed_addr #0 { entry: %a.tr = trunc i64 %a to i32 %0 = shl i32 %a.tr, 4 @@ -650,7 +650,7 @@ } ; Function Attrs: norecurse nounwind readnone - define i64 @testSLDo(i64 %a, i64 %b) local_unnamed_addr #0 { + define i64 @testSLD_rec(i64 %a, i64 %b) local_unnamed_addr #0 { entry: %shl = shl i64 %a, %b %tobool = icmp eq i64 %shl, 0 @@ -666,7 +666,7 @@ } ; Function Attrs: norecurse nounwind readnone - define i64 @testSRDo(i64 %a, i64 %b) local_unnamed_addr #0 { + define i64 @testSRD_rec(i64 %a, i64 %b) local_unnamed_addr #0 { entry: %shr = lshr i64 %a, %b %tobool = icmp eq i64 %shr, 0 @@ -682,7 +682,7 @@ } ; Function Attrs: norecurse nounwind readnone - define zeroext i32 @testSLWo(i32 zeroext %a, i32 zeroext %b) local_unnamed_addr #0 { + define zeroext i32 @testSLW_rec(i32 zeroext %a, i32 zeroext %b) local_unnamed_addr #0 { entry: %shl = shl i32 %a, %b %tobool = icmp eq i32 %shl, 0 @@ -698,7 +698,7 @@ } ; Function Attrs: norecurse nounwind readnone - define zeroext i32 @testSRWo(i32 zeroext %a, i32 zeroext %b) local_unnamed_addr #0 { + define zeroext i32 @testSRW_rec(i32 zeroext %a, i32 zeroext %b) local_unnamed_addr #0 { entry: %shr = lshr i32 %a, %b %tobool = icmp eq i32 %shr, 0 @@ -714,7 +714,7 @@ } ; Function Attrs: norecurse nounwind readnone - define signext i32 @testSRAWo(i32 signext %a, i32 signext %b) local_unnamed_addr #0 { + define signext i32 @testSRAW_rec(i32 signext %a, i32 signext %b) local_unnamed_addr #0 { entry: %shr = ashr i32 %a, %b %tobool = icmp eq i32 %shr, 0 @@ -730,7 +730,7 @@ } ; Function Attrs: norecurse nounwind readnone - define i64 @testSRADo(i64 %a, i64 %b) local_unnamed_addr #0 { + define i64 @testSRAD_rec(i64 %a, i64 %b) local_unnamed_addr #0 { entry: %shr = ashr i64 %a, %b %tobool = icmp eq i64 %shr, 0 @@ -1236,8 +1236,8 @@ body: | ... --- -name: testADDCo -# CHECK-ALL: name: testADDCo +name: testADDC_rec +# CHECK-ALL: name: testADDC_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -1284,8 +1284,8 @@ body: | %1 = LI 433 %0 = COPY $x3 %2 = COPY %0.sub_32 - %3 = ADDCo %1, %2, implicit-def $cr0, implicit-def $carry - ; CHECK: ADDICo %2, 433, implicit-def $cr0, implicit-def $carry + %3 = ADDC_rec %1, %2, implicit-def $cr0, implicit-def $carry + ; CHECK: ADDIC_rec %2, 433, implicit-def $cr0, implicit-def $carry ; CHECK-LATE: addic. 3, 3, 433 %4 = COPY killed $cr0 %5 = COPY %4.sub_eq @@ -1397,8 +1397,8 @@ body: | ... --- -name: testANDo -# CHECK-ALL: name: testANDo +name: testAND_rec +# CHECK-ALL: name: testAND_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -1443,8 +1443,8 @@ body: | %1 = LI 78 %0 = COPY $x3 %2 = COPY %0.sub_32 - %3 = ANDo %1, %2, implicit-def $cr0 - ; CHECK: ANDIo %2, 78, implicit-def $cr0 + %3 = AND_rec %1, %2, implicit-def $cr0 + ; CHECK: ANDI_rec %2, 78, implicit-def $cr0 ; CHECK-LATE: andi. 5, 3, 78 %4 = COPY killed $cr0 %5 = ISEL %2, %1, %4.sub_eq @@ -1454,8 +1454,8 @@ body: | ... --- -name: testAND8o -# CHECK-ALL: name: testAND8o +name: testAND8_rec +# CHECK-ALL: name: testAND8_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -1497,8 +1497,8 @@ body: | %1 = LI8 321 %0 = COPY $x3 - %2 = AND8o %1, %0, implicit-def $cr0 - ; CHECK: ANDI8o %0, 321, implicit-def $cr0 + %2 = AND8_rec %1, %0, implicit-def $cr0 + ; CHECK: ANDI8_rec %0, 321, implicit-def $cr0 ; CHECK-LATE: andi. 5, 3, 321 %3 = COPY killed $cr0 %4 = ISEL8 %1, %0, %3.sub_eq @@ -3671,8 +3671,8 @@ body: | ... --- -name: testRLDCLo -# CHECK-ALL: name: testRLDCLo +name: testRLDCL_rec +# CHECK-ALL: name: testRLDCL_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -3718,8 +3718,8 @@ body: | %0 = COPY $x3 %2 = RLDICL %1, 0, 58 %3 = LI 37 - %4 = RLDCLo %0, killed %3, 0, implicit-def $cr0 - ; CHECK: RLDICLo %0, 37, 0, implicit-def $cr0 + %4 = RLDCL_rec %0, killed %3, 0, implicit-def $cr0 + ; CHECK: RLDICL_rec %0, 37, 0, implicit-def $cr0 ; CHECK-LATE: rldicl. 5, 3, 37, 0 %5 = COPY killed $cr0 %6 = ISEL8 %2, %0, %5.sub_eq @@ -3781,8 +3781,8 @@ body: | ... --- -name: testRLDCRo -# CHECK-ALL: name: testRLDCRo +name: testRLDCR_rec +# CHECK-ALL: name: testRLDCR_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -3828,8 +3828,8 @@ body: | %0 = COPY $x3 %2 = RLDICL %1, 0, 58 %3 = LI 18 - %4 = RLDCRo %0, killed %3, 0, implicit-def $cr0 - ; CHECK: RLDICRo %0, 18, 0, implicit-def $cr0 + %4 = RLDCR_rec %0, killed %3, 0, implicit-def $cr0 + ; CHECK: RLDICR_rec %0, 18, 0, implicit-def $cr0 ; CHECK-LATE: rldicr. 5, 3, 18, 0 %5 = COPY killed $cr0 %6 = ISEL8 %2, %0, %5.sub_eq @@ -3884,8 +3884,8 @@ body: | ... --- -name: testRLDICLo -# CHECK-ALL: name: testRLDICLo +name: testRLDICL_rec +# CHECK-ALL: name: testRLDICL_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -3927,8 +3927,8 @@ body: | %1 = COPY $x4 %0 = LI8 -1 - %2 = RLDICLo %0, 53, 48, implicit-def $cr0 - ; CHECK: ANDI8o %0, 65535 + %2 = RLDICL_rec %0, 53, 48, implicit-def $cr0 + ; CHECK: ANDI8_rec %0, 65535 ; CHECK-LATE: li 3, -1 ; CHECK-LATE: andi. 3, 3, 65535 %3 = COPY killed $cr0 @@ -3938,8 +3938,8 @@ body: | ... --- -name: testRLDICLo2 -# CHECK-ALL: name: testRLDICLo2 +name: testRLDICL_rec2 +# CHECK-ALL: name: testRLDICL_rec2 alignment: 16 exposesReturnsTwice: false legalized: false @@ -3981,9 +3981,9 @@ body: | %1 = COPY $x4 %0 = LI8 200 - %2 = RLDICLo %0, 61, 3, implicit-def $cr0 + %2 = RLDICL_rec %0, 61, 3, implicit-def $cr0 ; CHECK: LI8 25 - ; CHECK: ANDI8o %0, 25 + ; CHECK: ANDI8_rec %0, 25 ; CHECK-LATE-NOT: andi. %3 = COPY killed $cr0 %4 = ISEL8 %1, %2, %3.sub_eq @@ -3992,8 +3992,8 @@ body: | ... --- -name: testRLDICLo3 -# CHECK-ALL: name: testRLDICLo3 +name: testRLDICL_rec3 +# CHECK-ALL: name: testRLDICL_rec3 alignment: 16 exposesReturnsTwice: false legalized: false @@ -4035,8 +4035,8 @@ body: | %1 = COPY $x4 %0 = LI8 2 - %2 = RLDICLo %0, 32, 32, implicit-def $cr0 - ; CHECK: ANDI8o %0, 0 + %2 = RLDICL_rec %0, 32, 32, implicit-def $cr0 + ; CHECK: ANDI8_rec %0, 0 ; CHECK-LATE: li 3, 2 ; CHECK-LATE: andi. 3, 3, 0 %3 = COPY killed $cr0 @@ -4248,8 +4248,8 @@ body: | ... --- -name: testRLWINMo -# CHECK-ALL: name: testRLWINMo +name: testRLWINM_rec +# CHECK-ALL: name: testRLWINM_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -4298,9 +4298,9 @@ body: | %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = LI -22 - %4 = RLWINMo %3, 0, 24, 31, implicit-def $cr0 + %4 = RLWINM_rec %3, 0, 24, 31, implicit-def $cr0 ; CHECK: LI -22 - ; CHECK: ANDIo %3, 65514 + ; CHECK: ANDI_rec %3, 65514 ; CHECK-LATE: li 3, -22 ; CHECK-LATE: andi. 5, 3, 234 %5 = COPY killed $cr0 @@ -4313,8 +4313,8 @@ body: | ... --- -name: testRLWINMo2 -# CHECK-ALL: name: testRLWINMo2 +name: testRLWINM_rec2 +# CHECK-ALL: name: testRLWINM_rec2 alignment: 16 exposesReturnsTwice: false legalized: false @@ -4363,9 +4363,9 @@ body: | %0 = COPY $x3 %2 = COPY %1.sub_32 %3 = LI -22 - %4 = RLWINMo %3, 5, 24, 31, implicit-def $cr0 + %4 = RLWINM_rec %3, 5, 24, 31, implicit-def $cr0 ; CHECK: LI -22 - ; CHECK-NOT: ANDI8o %3, 65514 + ; CHECK-NOT: ANDI8_rec %3, 65514 ; CHECK-LATE-NOT: andi. %5 = COPY killed $cr0 %6 = ISEL %2, %3, %5.sub_eq @@ -4377,8 +4377,8 @@ body: | ... --- -name: testRLWINM8o -# CHECK-ALL: name: testRLWINM8o +name: testRLWINM8_rec +# CHECK-ALL: name: testRLWINM8_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -4425,8 +4425,8 @@ body: | %1 = COPY $x4 %0 = COPY $x3 %2 = LI8 -18 - %3 = RLWINM8o %2, 4, 20, 27, implicit-def $cr0 - ; CHECK: ANDI8o %2, 3808 + %3 = RLWINM8_rec %2, 4, 20, 27, implicit-def $cr0 + ; CHECK: ANDI8_rec %2, 3808 ; CHECK-LATE: li 3, -18 ; CHECK-LATE: andi. 3, 3, 3808 %7 = COPY killed $cr0 @@ -4488,8 +4488,8 @@ body: | ... --- -name: testSLDo -# CHECK-ALL: name: testSLDo +name: testSLD_rec +# CHECK-ALL: name: testSLD_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -4533,8 +4533,8 @@ body: | %1 = COPY $x4 %0 = COPY $x3 %2 = LI 17 - %3 = SLDo %0, killed %2, implicit-def $cr0 - ; CHECK: RLDICRo %0, 17, 46, implicit-def $cr0 + %3 = SLD_rec %0, killed %2, implicit-def $cr0 + ; CHECK: RLDICR_rec %0, 17, 46, implicit-def $cr0 ; CHECK-LATE: rldicr. 5, 3, 17, 46 %4 = COPY killed $cr0 %5 = ISEL8 %1, %0, %4.sub_eq @@ -4594,8 +4594,8 @@ body: | ... --- -name: testSRDo -# CHECK-ALL: name: testSRDo +name: testSRD_rec +# CHECK-ALL: name: testSRD_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -4639,8 +4639,8 @@ body: | %1 = COPY $x4 %0 = COPY $x3 %2 = LI 17 - %3 = SRDo %0, killed %2, implicit-def $cr0 - ; CHECK: RLDICLo %0, 47, 17, implicit-def $cr0 + %3 = SRD_rec %0, killed %2, implicit-def $cr0 + ; CHECK: RLDICL_rec %0, 47, 17, implicit-def $cr0 ; CHECK-LATE: rldicl. 5, 3, 47, 17 %4 = COPY killed $cr0 %5 = ISEL8 %1, %0, %4.sub_eq @@ -4706,8 +4706,8 @@ body: | ... --- -name: testSLWo -# CHECK-ALL: name: testSLWo +name: testSLW_rec +# CHECK-ALL: name: testSLW_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -4756,8 +4756,8 @@ body: | %0 = COPY $x3 %2 = LI 11 %3 = COPY %0.sub_32 - %4 = SLWo %3, %2, implicit-def $cr0 - ; CHECK: RLWINMo %3, 11, 0, 20, implicit-def $cr0 + %4 = SLW_rec %3, %2, implicit-def $cr0 + ; CHECK: RLWINM_rec %3, 11, 0, 20, implicit-def $cr0 ; CHECK-LATE: rlwinm. 5, 3, 11, 0, 20 %5 = COPY killed $cr0 %6 = ISEL %2, %3, %5.sub_eq @@ -4826,8 +4826,8 @@ body: | ... --- -name: testSRWo -# CHECK-ALL: name: testSRWo +name: testSRW_rec +# CHECK-ALL: name: testSRW_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -4876,8 +4876,8 @@ body: | %0 = COPY $x3 %2 = LI 7 %3 = COPY %0.sub_32 - %4 = SRWo %3, %2, implicit-def $cr0 - ; CHECK: RLWINMo %3, 25, 7, 31 + %4 = SRW_rec %3, %2, implicit-def $cr0 + ; CHECK: RLWINM_rec %3, 25, 7, 31 ; CHECK-LATE: rlwinm. 5, 3, 25, 7, 31 %5 = COPY killed $cr0 %6 = ISEL %2, %3, %5.sub_eq @@ -4944,8 +4944,8 @@ body: | ... --- -name: testSRAWo -# CHECK-ALL: name: testSRAWo +name: testSRAW_rec +# CHECK-ALL: name: testSRAW_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -4992,8 +4992,8 @@ body: | %0 = COPY $x3 %2 = LI 8 %3 = COPY %0.sub_32 - %4 = SRAWo killed %3, %2, implicit-def dead $carry, implicit-def $cr0 - ; CHECK: SRAWIo killed %3, 8, implicit-def dead $carry, implicit-def $cr0 + %4 = SRAW_rec killed %3, %2, implicit-def dead $carry, implicit-def $cr0 + ; CHECK: SRAWI_rec killed %3, 8, implicit-def dead $carry, implicit-def $cr0 ; CHECK-LATE: srawi. 3, 3, 8 %5 = COPY killed $cr0 %6 = ISEL %2, %4, %5.sub_eq @@ -5054,8 +5054,8 @@ body: | ... --- -name: testSRADo -# CHECK-ALL: name: testSRADo +name: testSRAD_rec +# CHECK-ALL: name: testSRAD_rec alignment: 16 exposesReturnsTwice: false legalized: false @@ -5099,8 +5099,8 @@ body: | %1 = COPY $x4 %0 = COPY $x3 %2 = LI 61 - %3 = SRADo %0, killed %2, implicit-def dead $carry, implicit-def $cr0 - ; CHECK: SRADIo %0, 61, implicit-def dead $carry, implicit-def $cr0 + %3 = SRAD_rec %0, killed %2, implicit-def dead $carry, implicit-def $cr0 + ; CHECK: SRADI_rec %0, 61, implicit-def dead $carry, implicit-def $cr0 ; CHECK-LATE: sradi. 3, 3, 61 %4 = COPY killed $cr0 %5 = ISEL8 %1, %3, %4.sub_eq diff --git a/llvm/test/CodeGen/PowerPC/fold-rlwinm.mir b/llvm/test/CodeGen/PowerPC/fold-rlwinm.mir index bc7c461d3197..f2e576ed73b6 100644 --- a/llvm/test/CodeGen/PowerPC/fold-rlwinm.mir +++ b/llvm/test/CodeGen/PowerPC/fold-rlwinm.mir @@ -124,8 +124,8 @@ body: | BLR8 implicit $lr8, implicit $rm ... --- -name: testFoldRLWINMoToZero -#CHECK : name : testFoldRLWINMoToZero +name: testFoldRLWINM_recToZero +#CHECK : name : testFoldRLWINM_recToZero tracksRegLiveness: true body: | bb.0.entry: @@ -134,8 +134,8 @@ body: | %1:gprc = COPY %0.sub_32:g8rc %2:gprc = RLWINM %1:gprc, 27, 5, 10 ; CHECK: %2:gprc = RLWINM %1, 27, 5, 10 - %3:gprc = RLWINMo %2:gprc, 8, 5, 10, implicit-def $cr0 - ; CHECK: %3:gprc = ANDIo %2, 0, implicit-def $cr0 + %3:gprc = RLWINM_rec %2:gprc, 8, 5, 10, implicit-def $cr0 + ; CHECK: %3:gprc = ANDI_rec %2, 0, implicit-def $cr0 BLR8 implicit $lr8, implicit $rm ... --- diff --git a/llvm/test/CodeGen/PowerPC/ifcvt-diamond-ret.mir b/llvm/test/CodeGen/PowerPC/ifcvt-diamond-ret.mir index 5f52bde1edec..744c61c6480d 100644 --- a/llvm/test/CodeGen/PowerPC/ifcvt-diamond-ret.mir +++ b/llvm/test/CodeGen/PowerPC/ifcvt-diamond-ret.mir @@ -6,7 +6,7 @@ body: | liveins: $x0, $x3 successors: %bb.1(0x40000000), %bb.2(0x40000000) - dead renamable $x3 = ANDI8o killed renamable $x3, 1, implicit-def dead $cr0, implicit-def $cr0gt + dead renamable $x3 = ANDI8_rec killed renamable $x3, 1, implicit-def dead $cr0, implicit-def $cr0gt $cr2lt = CROR $cr0gt, $cr0gt BCn killed renamable $cr2lt, %bb.2 B %bb.1 @@ -26,7 +26,7 @@ body: | # CHECK: body: | # CHECK: bb.0: -# CHECK: dead renamable $x3 = ANDI8o killed renamable $x3, 1, implicit-def dead $cr0, implicit-def $cr0gt +# CHECK: dead renamable $x3 = ANDI8_rec killed renamable $x3, 1, implicit-def dead $cr0, implicit-def $cr0gt # CHECK: $cr2lt = CROR $cr0gt, $cr0gt # CHECK: renamable $x3 = LIS8 4096 # CHECK: MTLR8 $x0, implicit-def $lr8 diff --git a/llvm/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll b/llvm/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll index 4a3cd710332f..406a3035a978 100644 --- a/llvm/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll +++ b/llvm/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll @@ -11,7 +11,7 @@ define signext i32 @fn1(i32 %baz) { ; CHECK: CMPLDI ; CHECK: BCC -; CHECK: ANDI8o {{[^,]+}}, 65520, implicit-def $cr0 +; CHECK: ANDI8_rec {{[^,]+}}, 65520, implicit-def $cr0 ; CHECK: COPY killed $cr0 ; CHECK: BCC %5 = icmp eq i64 %4, 0 @@ -26,7 +26,7 @@ bar: ; CHECK-LABEL: fn2 define signext i32 @fn2(i64 %a, i64 %b) { -; CHECK: OR8o {{[^, ]+}}, {{[^, ]+}}, implicit-def $cr0 +; CHECK: OR8_rec {{[^, ]+}}, {{[^, ]+}}, implicit-def $cr0 ; CHECK: [[CREG:[^, ]+]]:crrc = COPY killed $cr ; CHECK: BCC 12, killed [[CREG]] %1 = or i64 %b, %a @@ -42,7 +42,7 @@ bar: ; CHECK-LABEL: fn3 define signext i32 @fn3(i32 %a) { -; CHECK: ANDIo killed {{[%0-9]+}}{{[^,]*}}, 10, implicit-def $cr0 +; CHECK: ANDI_rec killed {{[%0-9]+}}{{[^,]*}}, 10, implicit-def $cr0 ; CHECK: [[CREG:[^, ]+]]:crrc = COPY $cr0 ; CHECK: BCC 76, killed [[CREG]] %1 = and i32 %a, 10 @@ -61,7 +61,7 @@ bar: ; CHECK-LABEL: fn4 define i64 @fn4(i64 %a, i64 %b) { -; CHECK: ADD8o +; CHECK: ADD8_rec ; CHECK-NOT: CMP ; CHECK: BCC 71 @@ -81,11 +81,11 @@ if.end: declare void @exit(i32 signext) ; Since %v1 and %v2 are zero-extended 32-bit values, %1 is also zero-extended. -; In this case, we want to use ORo instead of OR + CMPLWI. +; In this case, we want to use OR_rec instead of OR + CMPLWI. ; CHECK-LABEL: fn5 define zeroext i32 @fn5(i32* %p1, i32* %p2) { -; CHECK: ORo +; CHECK: OR_rec ; CHECK-NOT: CMP ; CHECK: BCC %v1 = load i32, i32* %p1 @@ -107,11 +107,11 @@ bar: ; CHECK-LABEL: fn6 define i8* @fn6(i8* readonly %p) { ; CHECK: LBZU -; CHECK: EXTSBo +; CHECK: EXTSB_rec ; CHECK-NOT: CMP ; CHECK: BCC ; CHECK: LBZU -; CHECK: EXTSBo +; CHECK: EXTSB_rec ; CHECK-NOT: CMP ; CHECK: BCC diff --git a/llvm/test/CodeGen/PowerPC/opt-sub-inst-cr0-live.mir b/llvm/test/CodeGen/PowerPC/opt-sub-inst-cr0-live.mir index 3d6d21565e9d..2a3123a636ed 100644 --- a/llvm/test/CodeGen/PowerPC/opt-sub-inst-cr0-live.mir +++ b/llvm/test/CodeGen/PowerPC/opt-sub-inst-cr0-live.mir @@ -114,7 +114,7 @@ body: | %24 = CNTLZD killed %20 %25 = CMPLDI %15, 0 BCC 76, %25, %bb.2.loop - ; CHECK: SUBFC8o %3, %1, implicit-def $carry, implicit-def $cr0 + ; CHECK: SUBFC8_rec %3, %1, implicit-def $carry, implicit-def $cr0 ; CHECK: COPY killed $cr0 ; CHECK: BCC diff --git a/llvm/test/CodeGen/PowerPC/peephole-miscompile-extswsli.mir b/llvm/test/CodeGen/PowerPC/peephole-miscompile-extswsli.mir index 54bc7cdd05fc..86dcda229f08 100644 --- a/llvm/test/CodeGen/PowerPC/peephole-miscompile-extswsli.mir +++ b/llvm/test/CodeGen/PowerPC/peephole-miscompile-extswsli.mir @@ -14,14 +14,14 @@ body: | ; CHECK: [[COPY1:%[0-9]+]]:g8rc = COPY $x5 ; CHECK: [[COPY2:%[0-9]+]]:g8rc = COPY $x4 ; CHECK: [[COPY3:%[0-9]+]]:g8rc_and_g8rc_nox0 = COPY $x3 - ; CHECK: [[ANDI8o_:%[0-9]+]]:g8rc = ANDI8o [[COPY1]], 1, implicit-def $cr0 + ; CHECK: [[ANDI8_rec_:%[0-9]+]]:g8rc = ANDI8_rec [[COPY1]], 1, implicit-def $cr0 ; CHECK: [[COPY4:%[0-9]+]]:crbitrc = COPY $cr0gt ; CHECK: BCn killed [[COPY4]], %bb.2 ; CHECK: B %bb.1 ; CHECK: bb.1: ; CHECK: liveins: $x3 ; CHECK: [[EXTSW:%[0-9]+]]:g8rc = EXTSW $x3 - ; CHECK: [[RLDICR:%[0-9]+]]:g8rc = RLDICR [[ANDI8o_]], 2, 61 + ; CHECK: [[RLDICR:%[0-9]+]]:g8rc = RLDICR [[ANDI8_rec_]], 2, 61 ; CHECK: $x3 = COPY [[RLDICR]] ; CHECK: [[RLDICR1:%[0-9]+]]:g8rc = RLDICR [[EXTSW]], 2, 61 ; CHECK: [[ADD8_:%[0-9]+]]:g8rc = ADD8 [[COPY3]], [[RLDICR1]] @@ -41,7 +41,7 @@ body: | %3:g8rc = COPY $x5 %2:g8rc = COPY $x4 %1:g8rc_and_g8rc_nox0 = COPY $x3 - %11:g8rc = ANDI8o %3, 1, implicit-def $cr0 + %11:g8rc = ANDI8_rec %3, 1, implicit-def $cr0 %6:crbitrc = COPY $cr0gt BCn killed %6, %bb.2 B %bb.1 diff --git a/llvm/test/CodeGen/PowerPC/rlwinm_rldicl_to_andi.mir b/llvm/test/CodeGen/PowerPC/rlwinm_rldicl_to_andi.mir index 4ca8607964a6..e3aeb5605b42 100644 --- a/llvm/test/CodeGen/PowerPC/rlwinm_rldicl_to_andi.mir +++ b/llvm/test/CodeGen/PowerPC/rlwinm_rldicl_to_andi.mir @@ -120,9 +120,9 @@ body: | %0:g8rc = COPY $x3 %2:gprc_and_gprc_nor0 = COPY %1.sub_32 %3:gprc = LI -11 - %4:gprc_and_gprc_nor0 = RLWINMo %3, 2, 20, 31, implicit-def $cr0 + %4:gprc_and_gprc_nor0 = RLWINM_rec %3, 2, 20, 31, implicit-def $cr0 ; CHECK: LI 4055 - ; CHECK: ANDIo %3, 4055 + ; CHECK: ANDI_rec %3, 4055 ; CHECK-LATE-NOT: andi. ; CHECK-LATE: rlwinm. %5:crrc = COPY killed $cr0 @@ -180,9 +180,9 @@ body: | %0:g8rc = COPY $x3 %2:gprc_and_gprc_nor0 = COPY %1.sub_32 %3:gprc_and_gprc_nor0 = LI 1 - %4:gprc = RLWINMo %3, 21, 20, 31, implicit-def $cr0 + %4:gprc = RLWINM_rec %3, 21, 20, 31, implicit-def $cr0 ; CHECK: LI 1 - ; CHECK: ANDIo %3, 0 + ; CHECK: ANDI_rec %3, 0 ; CHECK-LATE: li [[IMM:[0-9]+]], 1 ; CHECK-LATE: andi. {{[0-9]+}}, [[IMM]], 0 %5:crrc = COPY killed $cr0 @@ -240,9 +240,9 @@ body: | %0:g8rc = COPY $x3 %2:gprc_and_gprc_nor0 = COPY %1.sub_32 %3:gprc_and_gprc_nor0 = LI -11 - %4:gprc = RLWINMo %3, 2, 20, 31, implicit-def $cr0 + %4:gprc = RLWINM_rec %3, 2, 20, 31, implicit-def $cr0 ; CHECK: LI -11 - ; CHECK: ANDIo %3, 65525 + ; CHECK: ANDI_rec %3, 65525 ; CHECK-LATE-NOT: andi. ; CHECK-LATE: rlwinm. %5:crrc = COPY killed $cr0 @@ -295,9 +295,9 @@ body: | %1:g8rc_and_g8rc_nox0 = COPY $x4 %0:g8rc = LI8 -11 - %2:g8rc_and_g8rc_nox0 = RLDICLo %0, 2, 49, implicit-def $cr0 + %2:g8rc_and_g8rc_nox0 = RLDICL_rec %0, 2, 49, implicit-def $cr0 ; CHECK: LI8 32727 - ; CHECK: ANDI8o %0, 32727 + ; CHECK: ANDI8_rec %0, 32727 ; CHECK-LATE-NOT: andi. ; CHECK-LATE: rldicl. %3:crrc = COPY killed $cr0 @@ -349,9 +349,9 @@ body: | %1:g8rc_and_g8rc_nox0 = COPY $x4 %0:g8rc_and_g8rc_nox0 = LI8 1 - %2:g8rc = RLDICLo %0, 32, 33, implicit-def $cr0 + %2:g8rc = RLDICL_rec %0, 32, 33, implicit-def $cr0 ; CHECK: LI8 1 - ; CHECK: ANDI8o %0, 0 + ; CHECK: ANDI8_rec %0, 0 ; CHECK-LATE: li [[IMM:[0-9]+]], 1 ; CHECK-LATE: andi. {{[0-9]+}}, [[IMM]], 0 %3:crrc = COPY killed $cr0 @@ -403,9 +403,9 @@ body: | %1:g8rc_and_g8rc_nox0 = COPY $x4 %0:g8rc_and_g8rc_nox0 = LI8 -11 - %2:g8rc = RLDICLo %0, 2, 49, implicit-def $cr0 + %2:g8rc = RLDICL_rec %0, 2, 49, implicit-def $cr0 ; CHECK: LI8 -11 - ; CHECK: ANDI8o %0, 65525 + ; CHECK: ANDI8_rec %0, 65525 ; CHECK-LATE-NOT: andi. ; CHECK-LATE: rldicl. %3:crrc = COPY killed $cr0