2005-10-16 13:39:50 +08:00
|
|
|
//===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===//
|
2005-08-17 01:14:42 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2005-08-17 01:14:42 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines the interfaces that PPC uses to lower LLVM code into a
|
|
|
|
// selection DAG.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-08-14 00:26:38 +08:00
|
|
|
#ifndef LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
|
|
|
|
#define LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
|
2005-08-17 01:14:42 +08:00
|
|
|
|
2013-04-06 07:29:01 +08:00
|
|
|
#include "PPCInstrInfo.h"
|
2013-06-13 00:39:22 +08:00
|
|
|
#include "llvm/CodeGen/CallingConvLower.h"
|
2017-01-13 08:58:58 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
2014-01-07 19:48:04 +08:00
|
|
|
#include "llvm/CodeGen/SelectionDAG.h"
|
2017-01-13 08:58:58 +08:00
|
|
|
#include "llvm/CodeGen/SelectionDAGNodes.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetLowering.h"
|
2018-03-30 01:21:10 +08:00
|
|
|
#include "llvm/CodeGen/ValueTypes.h"
|
2017-01-13 08:58:58 +08:00
|
|
|
#include "llvm/IR/Attributes.h"
|
|
|
|
#include "llvm/IR/CallingConv.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/InlineAsm.h"
|
|
|
|
#include "llvm/IR/Metadata.h"
|
|
|
|
#include "llvm/IR/Type.h"
|
2018-03-24 07:58:25 +08:00
|
|
|
#include "llvm/Support/MachineValueType.h"
|
2017-01-13 08:58:58 +08:00
|
|
|
#include <utility>
|
2005-08-17 01:14:42 +08:00
|
|
|
|
|
|
|
namespace llvm {
|
2017-01-13 08:58:58 +08:00
|
|
|
|
2005-08-27 04:25:03 +08:00
|
|
|
namespace PPCISD {
|
2017-01-13 08:58:58 +08:00
|
|
|
|
2017-07-26 21:44:59 +08:00
|
|
|
// When adding a NEW PPCISD node please add it to the correct position in
|
|
|
|
// the enum. The order of elements in this enum matters!
|
|
|
|
// Values that are added after this entry:
|
|
|
|
// STBRX = ISD::FIRST_TARGET_MEMORY_OPCODE
|
2019-04-09 16:40:02 +08:00
|
|
|
// are considered memory opcodes and are treated differently than entries
|
2017-07-26 21:44:59 +08:00
|
|
|
// that come before it. For example, ADD or MUL should be placed before
|
|
|
|
// the ISD::FIRST_TARGET_MEMORY_OPCODE while a LOAD or STORE should come
|
|
|
|
// after it.
|
2020-01-07 03:05:12 +08:00
|
|
|
enum NodeType : unsigned {
|
|
|
|
// Start the numbering where the builtin ops and target ops leave off.
|
|
|
|
FIRST_NUMBER = ISD::BUILTIN_OP_END,
|
|
|
|
|
|
|
|
/// FSEL - Traditional three-operand fsel node.
|
|
|
|
///
|
|
|
|
FSEL,
|
|
|
|
|
|
|
|
/// XSMAXCDP, XSMINCDP - C-type min/max instructions.
|
|
|
|
XSMAXCDP,
|
|
|
|
XSMINCDP,
|
|
|
|
|
|
|
|
/// FCFID - The FCFID instruction, taking an f64 operand and producing
|
|
|
|
/// and f64 value containing the FP representation of the integer that
|
|
|
|
/// was temporarily in the f64 operand.
|
|
|
|
FCFID,
|
|
|
|
|
|
|
|
/// Newer FCFID[US] integer-to-floating-point conversion instructions for
|
|
|
|
/// unsigned integers and single-precision outputs.
|
|
|
|
FCFIDU,
|
|
|
|
FCFIDS,
|
|
|
|
FCFIDUS,
|
|
|
|
|
|
|
|
/// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64
|
|
|
|
/// operand, producing an f64 value containing the integer representation
|
|
|
|
/// of that FP value.
|
|
|
|
FCTIDZ,
|
|
|
|
FCTIWZ,
|
|
|
|
|
|
|
|
/// Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for
|
|
|
|
/// unsigned integers with round toward zero.
|
|
|
|
FCTIDUZ,
|
|
|
|
FCTIWUZ,
|
|
|
|
|
|
|
|
/// Floating-point-to-interger conversion instructions
|
|
|
|
FP_TO_UINT_IN_VSR,
|
|
|
|
FP_TO_SINT_IN_VSR,
|
|
|
|
|
|
|
|
/// VEXTS, ByteWidth - takes an input in VSFRC and produces an output in
|
|
|
|
/// VSFRC that is sign-extended from ByteWidth to a 64-byte integer.
|
|
|
|
VEXTS,
|
|
|
|
|
|
|
|
/// Reciprocal estimate instructions (unary FP ops).
|
|
|
|
FRE,
|
|
|
|
FRSQRTE,
|
|
|
|
|
|
|
|
/// VPERM - The PPC VPERM Instruction.
|
|
|
|
///
|
|
|
|
VPERM,
|
|
|
|
|
|
|
|
/// XXSPLT - The PPC VSX splat instructions
|
|
|
|
///
|
|
|
|
XXSPLT,
|
|
|
|
|
|
|
|
/// VECINSERT - The PPC vector insert instruction
|
|
|
|
///
|
|
|
|
VECINSERT,
|
|
|
|
|
|
|
|
/// VECSHL - The PPC vector shift left instruction
|
|
|
|
///
|
|
|
|
VECSHL,
|
|
|
|
|
|
|
|
/// XXPERMDI - The PPC XXPERMDI instruction
|
|
|
|
///
|
|
|
|
XXPERMDI,
|
|
|
|
|
|
|
|
/// The CMPB instruction (takes two operands of i32 or i64).
|
|
|
|
CMPB,
|
|
|
|
|
|
|
|
/// Hi/Lo - These represent the high and low 16-bit parts of a global
|
|
|
|
/// address respectively. These nodes have two operands, the first of
|
|
|
|
/// which must be a TargetGlobalAddress, and the second of which must be a
|
|
|
|
/// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C',
|
|
|
|
/// though these are usually folded into other nodes.
|
|
|
|
Hi,
|
|
|
|
Lo,
|
|
|
|
|
|
|
|
/// The following two target-specific nodes are used for calls through
|
|
|
|
/// function pointers in the 64-bit SVR4 ABI.
|
|
|
|
|
|
|
|
/// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX)
|
|
|
|
/// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
|
|
|
|
/// compute an allocation on the stack.
|
|
|
|
DYNALLOC,
|
|
|
|
|
|
|
|
/// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
|
|
|
|
/// compute an offset from native SP to the address of the most recent
|
|
|
|
/// dynamic alloca.
|
|
|
|
DYNAREAOFFSET,
|
|
|
|
|
|
|
|
/// GlobalBaseReg - On Darwin, this node represents the result of the mflr
|
|
|
|
/// at function entry, used for PIC code.
|
|
|
|
GlobalBaseReg,
|
|
|
|
|
|
|
|
/// These nodes represent PPC shifts.
|
|
|
|
///
|
|
|
|
/// For scalar types, only the last `n + 1` bits of the shift amounts
|
|
|
|
/// are used, where n is log2(sizeof(element) * 8). See sld/slw, etc.
|
|
|
|
/// for exact behaviors.
|
|
|
|
///
|
|
|
|
/// For vector types, only the last n bits are used. See vsld.
|
|
|
|
SRL,
|
|
|
|
SRA,
|
|
|
|
SHL,
|
|
|
|
|
2020-06-04 16:30:17 +08:00
|
|
|
/// FNMSUB - Negated multiply-subtract instruction.
|
|
|
|
FNMSUB,
|
|
|
|
|
2020-01-07 03:05:12 +08:00
|
|
|
/// EXTSWSLI = The PPC extswsli instruction, which does an extend-sign
|
|
|
|
/// word and shift left immediate.
|
|
|
|
EXTSWSLI,
|
|
|
|
|
|
|
|
/// The combination of sra[wd]i and addze used to implemented signed
|
|
|
|
/// integer division by a power of 2. The first operand is the dividend,
|
|
|
|
/// and the second is the constant shift amount (representing the
|
|
|
|
/// divisor).
|
|
|
|
SRA_ADDZE,
|
|
|
|
|
|
|
|
/// CALL - A direct function call.
|
|
|
|
/// CALL_NOP is a call with the special NOP which follows 64-bit
|
2020-04-08 21:07:35 +08:00
|
|
|
/// CALL_NOTOC the caller does not use the TOC.
|
2020-01-07 03:05:12 +08:00
|
|
|
/// SVR4 calls and 32-bit/64-bit AIX calls.
|
|
|
|
CALL,
|
|
|
|
CALL_NOP,
|
2020-04-08 21:07:35 +08:00
|
|
|
CALL_NOTOC,
|
2020-01-07 03:05:12 +08:00
|
|
|
|
|
|
|
/// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a
|
|
|
|
/// MTCTR instruction.
|
|
|
|
MTCTR,
|
|
|
|
|
|
|
|
/// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a
|
|
|
|
/// BCTRL instruction.
|
|
|
|
BCTRL,
|
|
|
|
|
|
|
|
/// CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl
|
|
|
|
/// instruction and the TOC reload required on 64-bit ELF, 32-bit AIX
|
|
|
|
/// and 64-bit AIX.
|
|
|
|
BCTRL_LOAD_TOC,
|
|
|
|
|
|
|
|
/// Return with a flag operand, matched by 'blr'
|
|
|
|
RET_FLAG,
|
|
|
|
|
|
|
|
/// R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
|
|
|
|
/// This copies the bits corresponding to the specified CRREG into the
|
|
|
|
/// resultant GPR. Bits corresponding to other CR regs are undefined.
|
|
|
|
MFOCRF,
|
|
|
|
|
|
|
|
/// Direct move from a VSX register to a GPR
|
|
|
|
MFVSR,
|
|
|
|
|
|
|
|
/// Direct move from a GPR to a VSX register (algebraic)
|
|
|
|
MTVSRA,
|
|
|
|
|
|
|
|
/// Direct move from a GPR to a VSX register (zero)
|
|
|
|
MTVSRZ,
|
|
|
|
|
|
|
|
/// Direct move of 2 consecutive GPR to a VSX register.
|
|
|
|
BUILD_FP128,
|
|
|
|
|
|
|
|
/// BUILD_SPE64 and EXTRACT_SPE are analogous to BUILD_PAIR and
|
|
|
|
/// EXTRACT_ELEMENT but take f64 arguments instead of i64, as i64 is
|
|
|
|
/// unsupported for this target.
|
|
|
|
/// Merge 2 GPRs to a single SPE register.
|
|
|
|
BUILD_SPE64,
|
|
|
|
|
|
|
|
/// Extract SPE register component, second argument is high or low.
|
|
|
|
EXTRACT_SPE,
|
|
|
|
|
|
|
|
/// Extract a subvector from signed integer vector and convert to FP.
|
|
|
|
/// It is primarily used to convert a (widened) illegal integer vector
|
|
|
|
/// type to a legal floating point vector type.
|
|
|
|
/// For example v2i32 -> widened to v4i32 -> v2f64
|
|
|
|
SINT_VEC_TO_FP,
|
|
|
|
|
|
|
|
/// Extract a subvector from unsigned integer vector and convert to FP.
|
|
|
|
/// As with SINT_VEC_TO_FP, used for converting illegal types.
|
|
|
|
UINT_VEC_TO_FP,
|
|
|
|
|
|
|
|
// FIXME: Remove these once the ANDI glue bug is fixed:
|
|
|
|
/// i1 = ANDI_rec_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the
|
|
|
|
/// eq or gt bit of CR0 after executing andi. x, 1. This is used to
|
|
|
|
/// implement truncation of i32 or i64 to i1.
|
|
|
|
ANDI_rec_1_EQ_BIT,
|
|
|
|
ANDI_rec_1_GT_BIT,
|
|
|
|
|
|
|
|
// READ_TIME_BASE - A read of the 64-bit time-base register on a 32-bit
|
|
|
|
// target (returns (Lo, Hi)). It takes a chain operand.
|
|
|
|
READ_TIME_BASE,
|
|
|
|
|
|
|
|
// EH_SJLJ_SETJMP - SjLj exception handling setjmp.
|
|
|
|
EH_SJLJ_SETJMP,
|
|
|
|
|
|
|
|
// EH_SJLJ_LONGJMP - SjLj exception handling longjmp.
|
|
|
|
EH_SJLJ_LONGJMP,
|
|
|
|
|
|
|
|
/// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP*
|
|
|
|
/// instructions. For lack of better number, we use the opcode number
|
|
|
|
/// encoding for the OPC field to identify the compare. For example, 838
|
|
|
|
/// is VCMPGTSH.
|
|
|
|
VCMP,
|
|
|
|
|
|
|
|
/// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the
|
|
|
|
/// altivec VCMP*o instructions. For lack of better number, we use the
|
|
|
|
/// opcode number encoding for the OPC field to identify the compare. For
|
|
|
|
/// example, 838 is VCMPGTSH.
|
|
|
|
VCMPo,
|
|
|
|
|
|
|
|
/// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This
|
|
|
|
/// corresponds to the COND_BRANCH pseudo instruction. CRRC is the
|
|
|
|
/// condition register to branch on, OPC is the branch opcode to use (e.g.
|
|
|
|
/// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is
|
|
|
|
/// an optional input flag argument.
|
|
|
|
COND_BRANCH,
|
|
|
|
|
|
|
|
/// CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based
|
|
|
|
/// loops.
|
|
|
|
BDNZ,
|
|
|
|
BDZ,
|
|
|
|
|
|
|
|
/// F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding
|
|
|
|
/// towards zero. Used only as part of the long double-to-int
|
|
|
|
/// conversion sequence.
|
|
|
|
FADDRTZ,
|
|
|
|
|
|
|
|
/// F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
|
|
|
|
MFFS,
|
|
|
|
|
|
|
|
/// TC_RETURN - A tail call return.
|
|
|
|
/// operand #0 chain
|
|
|
|
/// operand #1 callee (register or absolute)
|
|
|
|
/// operand #2 stack adjustment
|
|
|
|
/// operand #3 optional in flag
|
|
|
|
TC_RETURN,
|
|
|
|
|
|
|
|
/// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
|
|
|
|
CR6SET,
|
|
|
|
CR6UNSET,
|
|
|
|
|
|
|
|
/// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by initial-exec TLS
|
|
|
|
/// for non-position independent code on PPC32.
|
|
|
|
PPC32_GOT,
|
|
|
|
|
|
|
|
/// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and
|
|
|
|
/// local dynamic TLS and position indendepent code on PPC32.
|
|
|
|
PPC32_PICGOT,
|
|
|
|
|
|
|
|
/// G8RC = ADDIS_GOT_TPREL_HA %x2, Symbol - Used by the initial-exec
|
|
|
|
/// TLS model, produces an ADDIS8 instruction that adds the GOT
|
|
|
|
/// base to sym\@got\@tprel\@ha.
|
|
|
|
ADDIS_GOT_TPREL_HA,
|
|
|
|
|
|
|
|
/// G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec
|
|
|
|
/// TLS model, produces a LD instruction with base register G8RReg
|
|
|
|
/// and offset sym\@got\@tprel\@l. This completes the addition that
|
|
|
|
/// finds the offset of "sym" relative to the thread pointer.
|
|
|
|
LD_GOT_TPREL_L,
|
|
|
|
|
|
|
|
/// G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS
|
|
|
|
/// model, produces an ADD instruction that adds the contents of
|
|
|
|
/// G8RReg to the thread pointer. Symbol contains a relocation
|
|
|
|
/// sym\@tls which is to be replaced by the thread pointer and
|
|
|
|
/// identifies to the linker that the instruction is part of a
|
|
|
|
/// TLS sequence.
|
|
|
|
ADD_TLS,
|
|
|
|
|
|
|
|
/// G8RC = ADDIS_TLSGD_HA %x2, Symbol - For the general-dynamic TLS
|
|
|
|
/// model, produces an ADDIS8 instruction that adds the GOT base
|
|
|
|
/// register to sym\@got\@tlsgd\@ha.
|
|
|
|
ADDIS_TLSGD_HA,
|
|
|
|
|
|
|
|
/// %x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS
|
|
|
|
/// model, produces an ADDI8 instruction that adds G8RReg to
|
|
|
|
/// sym\@got\@tlsgd\@l and stores the result in X3. Hidden by
|
|
|
|
/// ADDIS_TLSGD_L_ADDR until after register assignment.
|
|
|
|
ADDI_TLSGD_L,
|
|
|
|
|
|
|
|
/// %x3 = GET_TLS_ADDR %x3, Symbol - For the general-dynamic TLS
|
|
|
|
/// model, produces a call to __tls_get_addr(sym\@tlsgd). Hidden by
|
|
|
|
/// ADDIS_TLSGD_L_ADDR until after register assignment.
|
|
|
|
GET_TLS_ADDR,
|
|
|
|
|
|
|
|
/// G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that
|
|
|
|
/// combines ADDI_TLSGD_L and GET_TLS_ADDR until expansion following
|
|
|
|
/// register assignment.
|
|
|
|
ADDI_TLSGD_L_ADDR,
|
|
|
|
|
|
|
|
/// G8RC = ADDIS_TLSLD_HA %x2, Symbol - For the local-dynamic TLS
|
|
|
|
/// model, produces an ADDIS8 instruction that adds the GOT base
|
|
|
|
/// register to sym\@got\@tlsld\@ha.
|
|
|
|
ADDIS_TLSLD_HA,
|
|
|
|
|
|
|
|
/// %x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS
|
|
|
|
/// model, produces an ADDI8 instruction that adds G8RReg to
|
|
|
|
/// sym\@got\@tlsld\@l and stores the result in X3. Hidden by
|
|
|
|
/// ADDIS_TLSLD_L_ADDR until after register assignment.
|
|
|
|
ADDI_TLSLD_L,
|
|
|
|
|
|
|
|
/// %x3 = GET_TLSLD_ADDR %x3, Symbol - For the local-dynamic TLS
|
|
|
|
/// model, produces a call to __tls_get_addr(sym\@tlsld). Hidden by
|
|
|
|
/// ADDIS_TLSLD_L_ADDR until after register assignment.
|
|
|
|
GET_TLSLD_ADDR,
|
|
|
|
|
|
|
|
/// G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that
|
|
|
|
/// combines ADDI_TLSLD_L and GET_TLSLD_ADDR until expansion
|
|
|
|
/// following register assignment.
|
|
|
|
ADDI_TLSLD_L_ADDR,
|
|
|
|
|
|
|
|
/// G8RC = ADDIS_DTPREL_HA %x3, Symbol - For the local-dynamic TLS
|
|
|
|
/// model, produces an ADDIS8 instruction that adds X3 to
|
|
|
|
/// sym\@dtprel\@ha.
|
|
|
|
ADDIS_DTPREL_HA,
|
|
|
|
|
|
|
|
/// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS
|
|
|
|
/// model, produces an ADDI8 instruction that adds G8RReg to
|
|
|
|
/// sym\@got\@dtprel\@l.
|
|
|
|
ADDI_DTPREL_L,
|
|
|
|
|
|
|
|
/// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded
|
|
|
|
/// during instruction selection to optimize a BUILD_VECTOR into
|
|
|
|
/// operations on splats. This is necessary to avoid losing these
|
|
|
|
/// optimizations due to constant folding.
|
|
|
|
VADD_SPLAT,
|
|
|
|
|
|
|
|
/// CHAIN = SC CHAIN, Imm128 - System call. The 7-bit unsigned
|
|
|
|
/// operand identifies the operating system entry point.
|
|
|
|
SC,
|
|
|
|
|
|
|
|
/// CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
|
|
|
|
CLRBHRB,
|
|
|
|
|
|
|
|
/// GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch
|
|
|
|
/// history rolling buffer entry.
|
|
|
|
MFBHRBE,
|
|
|
|
|
|
|
|
/// CHAIN = RFEBB CHAIN, State - Return from event-based branch.
|
|
|
|
RFEBB,
|
|
|
|
|
|
|
|
/// VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little
|
|
|
|
/// endian. Maps to an xxswapd instruction that corrects an lxvd2x
|
|
|
|
/// or stxvd2x instruction. The chain is necessary because the
|
|
|
|
/// sequence replaces a load and needs to provide the same number
|
|
|
|
/// of outputs.
|
|
|
|
XXSWAPD,
|
|
|
|
|
|
|
|
/// An SDNode for swaps that are not associated with any loads/stores
|
|
|
|
/// and thereby have no chain.
|
|
|
|
SWAP_NO_CHAIN,
|
|
|
|
|
|
|
|
/// An SDNode for Power9 vector absolute value difference.
|
|
|
|
/// operand #0 vector
|
|
|
|
/// operand #1 vector
|
|
|
|
/// operand #2 constant i32 0 or 1, to indicate whether needs to patch
|
|
|
|
/// the most significant bit for signed i32
|
|
|
|
///
|
|
|
|
/// Power9 VABSD* instructions are designed to support unsigned integer
|
|
|
|
/// vectors (byte/halfword/word), if we want to make use of them for signed
|
|
|
|
/// integer vectors, we have to flip their sign bits first. To flip sign bit
|
|
|
|
/// for byte/halfword integer vector would become inefficient, but for word
|
|
|
|
/// integer vector, we can leverage XVNEGSP to make it efficiently. eg:
|
|
|
|
/// abs(sub(a,b)) => VABSDUW(a+0x80000000, b+0x80000000)
|
|
|
|
/// => VABSDUW((XVNEGSP a), (XVNEGSP b))
|
|
|
|
VABSD,
|
|
|
|
|
|
|
|
/// QVFPERM = This corresponds to the QPX qvfperm instruction.
|
|
|
|
QVFPERM,
|
|
|
|
|
|
|
|
/// QVGPCI = This corresponds to the QPX qvgpci instruction.
|
|
|
|
QVGPCI,
|
|
|
|
|
|
|
|
/// QVALIGNI = This corresponds to the QPX qvaligni instruction.
|
|
|
|
QVALIGNI,
|
|
|
|
|
|
|
|
/// QVESPLATI = This corresponds to the QPX qvesplati instruction.
|
|
|
|
QVESPLATI,
|
|
|
|
|
|
|
|
/// QBFLT = Access the underlying QPX floating-point boolean
|
|
|
|
/// representation.
|
|
|
|
QBFLT,
|
|
|
|
|
|
|
|
/// FP_EXTEND_HALF(VECTOR, IDX) - Custom extend upper (IDX=0) half or
|
|
|
|
/// lower (IDX=1) half of v4f32 to v2f64.
|
|
|
|
FP_EXTEND_HALF,
|
|
|
|
|
2020-04-10 00:17:23 +08:00
|
|
|
/// MAT_PCREL_ADDR = Materialize a PC Relative address. This can be done
|
|
|
|
/// either through an add like PADDI or through a PC Relative load like
|
|
|
|
/// PLD.
|
|
|
|
MAT_PCREL_ADDR,
|
|
|
|
|
2020-01-07 03:05:12 +08:00
|
|
|
/// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
|
|
|
|
/// byte-swapping store instruction. It byte-swaps the low "Type" bits of
|
|
|
|
/// the GPRC input, then stores it through Ptr. Type can be either i16 or
|
|
|
|
/// i32.
|
|
|
|
STBRX = ISD::FIRST_TARGET_MEMORY_OPCODE,
|
|
|
|
|
|
|
|
/// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a
|
|
|
|
/// byte-swapping load instruction. It loads "Type" bits, byte swaps it,
|
|
|
|
/// then puts it in the bottom bits of the GPRC. TYPE can be either i16
|
|
|
|
/// or i32.
|
|
|
|
LBRX,
|
|
|
|
|
|
|
|
/// STFIWX - The STFIWX instruction. The first operand is an input token
|
|
|
|
/// chain, then an f64 value to store, then an address to store it to.
|
|
|
|
STFIWX,
|
|
|
|
|
|
|
|
/// GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point
|
|
|
|
/// load which sign-extends from a 32-bit integer value into the
|
|
|
|
/// destination 64-bit register.
|
|
|
|
LFIWAX,
|
|
|
|
|
|
|
|
/// GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point
|
|
|
|
/// load which zero-extends from a 32-bit integer value into the
|
|
|
|
/// destination 64-bit register.
|
|
|
|
LFIWZX,
|
|
|
|
|
|
|
|
/// GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an
|
|
|
|
/// integer smaller than 64 bits into a VSR. The integer is zero-extended.
|
|
|
|
/// This can be used for converting loaded integers to floating point.
|
|
|
|
LXSIZX,
|
|
|
|
|
|
|
|
/// STXSIX - The STXSI[bh]X instruction. The first operand is an input
|
|
|
|
/// chain, then an f64 value to store, then an address to store it to,
|
|
|
|
/// followed by a byte-width for the store.
|
|
|
|
STXSIX,
|
|
|
|
|
|
|
|
/// VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
|
|
|
|
/// Maps directly to an lxvd2x instruction that will be followed by
|
|
|
|
/// an xxswapd.
|
|
|
|
LXVD2X,
|
|
|
|
|
|
|
|
/// VSRC, CHAIN = LOAD_VEC_BE CHAIN, Ptr - Occurs only for little endian.
|
|
|
|
/// Maps directly to one of lxvd2x/lxvw4x/lxvh8x/lxvb16x depending on
|
|
|
|
/// the vector type to load vector in big-endian element order.
|
|
|
|
LOAD_VEC_BE,
|
|
|
|
|
|
|
|
/// VSRC, CHAIN = LD_VSX_LH CHAIN, Ptr - This is a floating-point load of a
|
|
|
|
/// v2f32 value into the lower half of a VSR register.
|
|
|
|
LD_VSX_LH,
|
|
|
|
|
|
|
|
/// VSRC, CHAIN = LD_SPLAT, CHAIN, Ptr - a splatting load memory
|
|
|
|
/// instructions such as LXVDSX, LXVWSX.
|
|
|
|
LD_SPLAT,
|
|
|
|
|
|
|
|
/// CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
|
|
|
|
/// Maps directly to an stxvd2x instruction that will be preceded by
|
|
|
|
/// an xxswapd.
|
|
|
|
STXVD2X,
|
|
|
|
|
|
|
|
/// CHAIN = STORE_VEC_BE CHAIN, VSRC, Ptr - Occurs only for little endian.
|
|
|
|
/// Maps directly to one of stxvd2x/stxvw4x/stxvh8x/stxvb16x depending on
|
|
|
|
/// the vector type to store vector in big-endian element order.
|
|
|
|
STORE_VEC_BE,
|
|
|
|
|
|
|
|
/// Store scalar integers from VSR.
|
|
|
|
ST_VSR_SCAL_INT,
|
|
|
|
|
|
|
|
/// QBRC, CHAIN = QVLFSb CHAIN, Ptr
|
|
|
|
/// The 4xf32 load used for v4i1 constants.
|
|
|
|
QVLFSb,
|
|
|
|
|
|
|
|
/// ATOMIC_CMP_SWAP - the exact same as the target-independent nodes
|
|
|
|
/// except they ensure that the compare input is zero-extended for
|
|
|
|
/// sub-word versions because the atomic loads zero-extend.
|
|
|
|
ATOMIC_CMP_SWAP_8,
|
|
|
|
ATOMIC_CMP_SWAP_16,
|
|
|
|
|
|
|
|
/// GPRC = TOC_ENTRY GA, TOC
|
|
|
|
/// Loads the entry for GA from the TOC, where the TOC base is given by
|
|
|
|
/// the last operand.
|
|
|
|
TOC_ENTRY
|
|
|
|
};
|
2017-01-13 08:58:58 +08:00
|
|
|
|
|
|
|
} // end namespace PPCISD
|
2006-03-20 14:15:45 +08:00
|
|
|
|
|
|
|
/// Define some predicates that are used for node matching.
|
|
|
|
namespace PPC {
|
2017-01-13 08:58:58 +08:00
|
|
|
|
2006-04-07 01:23:16 +08:00
|
|
|
/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
|
|
|
|
/// VPKUHUM instruction.
|
2014-08-04 21:53:40 +08:00
|
|
|
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
|
2014-06-10 22:35:01 +08:00
|
|
|
SelectionDAG &DAG);
|
2011-02-26 05:41:48 +08:00
|
|
|
|
2006-04-07 01:23:16 +08:00
|
|
|
/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
|
|
|
|
/// VPKUWUM instruction.
|
2014-08-04 21:53:40 +08:00
|
|
|
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
|
2014-06-10 22:35:01 +08:00
|
|
|
SelectionDAG &DAG);
|
2006-04-07 05:11:54 +08:00
|
|
|
|
2015-05-16 09:02:12 +08:00
|
|
|
/// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
|
|
|
|
/// VPKUDUM instruction.
|
|
|
|
bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
|
|
|
|
SelectionDAG &DAG);
|
|
|
|
|
2006-04-07 05:11:54 +08:00
|
|
|
/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
|
|
|
|
/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
|
2009-04-28 02:41:29 +08:00
|
|
|
bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
|
2014-07-25 09:55:55 +08:00
|
|
|
unsigned ShuffleKind, SelectionDAG &DAG);
|
2006-04-07 05:11:54 +08:00
|
|
|
|
|
|
|
/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
|
|
|
|
/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
|
2009-04-28 02:41:29 +08:00
|
|
|
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
|
2014-07-25 09:55:55 +08:00
|
|
|
unsigned ShuffleKind, SelectionDAG &DAG);
|
2011-02-26 05:41:48 +08:00
|
|
|
|
2015-06-25 23:17:40 +08:00
|
|
|
/// isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for
|
|
|
|
/// a VMRGEW or VMRGOW instruction
|
|
|
|
bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
|
|
|
|
unsigned ShuffleKind, SelectionDAG &DAG);
|
2017-05-25 07:48:29 +08:00
|
|
|
/// isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable
|
|
|
|
/// for a XXSLDWI instruction.
|
|
|
|
bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
|
|
|
|
bool &Swap, bool IsLE);
|
2017-06-13 02:24:36 +08:00
|
|
|
|
|
|
|
/// isXXBRHShuffleMask - Return true if this is a shuffle mask suitable
|
|
|
|
/// for a XXBRH instruction.
|
|
|
|
bool isXXBRHShuffleMask(ShuffleVectorSDNode *N);
|
|
|
|
|
|
|
|
/// isXXBRWShuffleMask - Return true if this is a shuffle mask suitable
|
|
|
|
/// for a XXBRW instruction.
|
|
|
|
bool isXXBRWShuffleMask(ShuffleVectorSDNode *N);
|
|
|
|
|
|
|
|
/// isXXBRDShuffleMask - Return true if this is a shuffle mask suitable
|
|
|
|
/// for a XXBRD instruction.
|
|
|
|
bool isXXBRDShuffleMask(ShuffleVectorSDNode *N);
|
|
|
|
|
|
|
|
/// isXXBRQShuffleMask - Return true if this is a shuffle mask suitable
|
|
|
|
/// for a XXBRQ instruction.
|
|
|
|
bool isXXBRQShuffleMask(ShuffleVectorSDNode *N);
|
|
|
|
|
2017-05-31 21:09:57 +08:00
|
|
|
/// isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable
|
|
|
|
/// for a XXPERMDI instruction.
|
|
|
|
bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
|
|
|
|
bool &Swap, bool IsLE);
|
2017-05-25 07:48:29 +08:00
|
|
|
|
2014-08-06 04:47:25 +08:00
|
|
|
/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the
|
|
|
|
/// shift amount, otherwise return -1.
|
|
|
|
int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
|
|
|
|
SelectionDAG &DAG);
|
2011-02-26 05:41:48 +08:00
|
|
|
|
2006-03-20 14:15:45 +08:00
|
|
|
/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a splat of a single element that is suitable for input to
|
|
|
|
/// VSPLTB/VSPLTH/VSPLTW.
|
2009-04-28 02:41:29 +08:00
|
|
|
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
|
2011-02-26 05:41:48 +08:00
|
|
|
|
2016-07-13 05:00:10 +08:00
|
|
|
/// isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by
|
|
|
|
/// the XXINSERTW instruction introduced in ISA 3.0. This is essentially any
|
|
|
|
/// shuffle of v4f32/v4i32 vectors that just inserts one element from one
|
|
|
|
/// vector into the other. This function will also set a couple of
|
|
|
|
/// output parameters for how much the source vector needs to be shifted and
|
|
|
|
/// what byte number needs to be specified for the instruction to put the
|
|
|
|
/// element in the desired location of the target vector.
|
|
|
|
bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
|
|
|
|
unsigned &InsertAtByte, bool &Swap, bool IsLE);
|
|
|
|
|
2019-09-18 00:45:20 +08:00
|
|
|
/// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
|
|
|
|
/// appropriate for PPC mnemonics (which have a big endian bias - namely
|
|
|
|
/// elements are counted from the left of the vector register).
|
|
|
|
unsigned getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
|
|
|
|
SelectionDAG &DAG);
|
2011-02-26 05:41:48 +08:00
|
|
|
|
2006-04-13 01:37:20 +08:00
|
|
|
/// get_VSPLTI_elt - If this is a build_vector of constants which can be
|
2006-04-08 14:46:53 +08:00
|
|
|
/// formed by using a vspltis[bhw] instruction of the specified element
|
|
|
|
/// size, return the constant being splatted. The ByteSize field indicates
|
|
|
|
/// the number of bytes of each element [124] -> [bhw].
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
|
[PowerPC] Add support for the QPX vector instruction set
This adds support for the QPX vector instruction set, which is used by the
enhanced A2 cores on the IBM BG/Q supercomputers. QPX vectors are 256 bytes
wide, holding 4 double-precision floating-point values. Boolean values, modeled
here as <4 x i1> are actually also represented as floating-point values
(essentially { -1, 1 } for { false, true }). QPX shares many features with
Altivec and VSX, but is distinct from both of them. One major difference is
that, instead of adding completely-separate vector registers, QPX vector
registers are extensions of the scalar floating-point registers (lane 0 is the
corresponding scalar floating-point value). The operations supported on QPX
vectors mirrors that supported on the scalar floating-point values (with some
additional ones for permutations and logical/comparison operations).
I've been maintaining this support out-of-tree, as part of the bgclang project,
for several years. This is not the entire bgclang patch set, but is most of the
subset that can be cleanly integrated into LLVM proper at this time. Adding
this to the LLVM backend is part of my efforts to rebase bgclang to the current
LLVM trunk, but is independently useful (especially for codes that use LLVM as
a JIT in library form).
The assembler/disassembler test coverage is complete. The CodeGen test coverage
is not, but I've included some tests, and more will be added as follow-up work.
llvm-svn: 230413
2015-02-25 09:06:45 +08:00
|
|
|
|
|
|
|
/// If this is a qvaligni shuffle mask, return the shift
|
|
|
|
/// amount, otherwise return -1.
|
|
|
|
int isQVALIGNIShuffleMask(SDNode *N);
|
2017-01-13 08:58:58 +08:00
|
|
|
|
|
|
|
} // end namespace PPC
|
2011-02-26 05:41:48 +08:00
|
|
|
|
2005-10-16 13:39:50 +08:00
|
|
|
class PPCTargetLowering : public TargetLowering {
|
2014-06-13 06:38:18 +08:00
|
|
|
const PPCSubtarget &Subtarget;
|
2010-04-17 22:41:14 +08:00
|
|
|
|
2005-08-17 01:14:42 +08:00
|
|
|
public:
|
2015-01-31 06:02:31 +08:00
|
|
|
explicit PPCTargetLowering(const PPCTargetMachine &TM,
|
|
|
|
const PPCSubtarget &STI);
|
2011-02-26 05:41:48 +08:00
|
|
|
|
2006-01-10 07:52:17 +08:00
|
|
|
/// getTargetNodeName() - This method returns the name of a target specific
|
|
|
|
/// DAG node.
|
2014-04-29 15:57:37 +08:00
|
|
|
const char *getTargetNodeName(unsigned Opcode) const override;
|
2006-11-08 10:15:41 +08:00
|
|
|
|
2018-12-20 14:19:59 +08:00
|
|
|
bool isSelectSupported(SelectSupportKind Kind) const override {
|
|
|
|
// PowerPC does not support scalar condition selects on vectors.
|
|
|
|
return (Kind != SelectSupportKind::ScalarCondVectorVal);
|
|
|
|
}
|
|
|
|
|
2016-07-05 17:22:29 +08:00
|
|
|
/// getPreferredVectorAction - The code we generate when vector types are
|
|
|
|
/// legalized by promoting the integer element type is often much worse
|
|
|
|
/// than code we generate if we widen the type for applicable vector types.
|
|
|
|
/// The issue with promoting is that the vector is scalaraized, individual
|
|
|
|
/// elements promoted and then the vector is rebuilt. So say we load a pair
|
|
|
|
/// of v4i8's and shuffle them. This will turn into a mess of 8 extending
|
|
|
|
/// loads, moves back into VSR's (or memory ops if we don't have moves) and
|
|
|
|
/// then the VPERM for the shuffle. All in all a very slow sequence.
|
2018-11-06 07:26:13 +08:00
|
|
|
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT)
|
2016-07-05 17:22:29 +08:00
|
|
|
const override {
|
2020-05-22 20:48:09 +08:00
|
|
|
if (VT.getVectorNumElements() != 1 && VT.getScalarSizeInBits() % 8 == 0)
|
2016-07-05 17:22:29 +08:00
|
|
|
return TypeWidenVector;
|
|
|
|
return TargetLoweringBase::getPreferredVectorAction(VT);
|
|
|
|
}
|
2017-01-13 08:58:58 +08:00
|
|
|
|
2015-12-15 01:57:33 +08:00
|
|
|
bool useSoftFloat() const override;
|
|
|
|
|
2018-07-18 12:25:10 +08:00
|
|
|
bool hasSPE() const;
|
|
|
|
|
2015-07-09 23:12:23 +08:00
|
|
|
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
|
2015-07-09 10:09:20 +08:00
|
|
|
return MVT::i32;
|
|
|
|
}
|
2011-02-26 05:41:48 +08:00
|
|
|
|
2015-01-05 13:24:42 +08:00
|
|
|
bool isCheapToSpeculateCttz() const override {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool isCheapToSpeculateCtlz() const override {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-08-16 21:53:53 +08:00
|
|
|
bool isCtlzFast() const override {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
[CGP] Make ICMP_EQ use CR result of ICMP_S(L|G)T dominators
For example:
long long test(long long a, long long b) {
if (a << b > 0)
return b;
if (a << b < 0)
return a;
return a*b;
}
Produces:
sld. 5, 3, 4
ble 0, .LBB0_2
mr 3, 4
blr
.LBB0_2: # %if.end
cmpldi 5, 0
li 5, 1
isel 4, 4, 5, 2
mulld 3, 4, 3
blr
But the compare (cmpldi 5, 0) is redundant and can be removed (CR0 already
contains the result of that comparison).
The root cause of this is that LLVM converts signed comparisons into equality
comparison based on dominance. Equality comparisons are unsigned by default, so
we get either a record-form or cmp (without the l for logical) feeding a cmpl.
That is the situation we want to avoid here.
Differential Revision: https://reviews.llvm.org/D60506
2019-11-12 00:15:52 +08:00
|
|
|
bool isEqualityCmpFoldedWithSignedCmp() const override {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-09-02 10:58:25 +08:00
|
|
|
bool hasAndNotCompare(SDValue) const override {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
[Codegen][X86][AArch64][ARM][PowerPC] Inc-of-add vs sub-of-not (PR42457)
Summary:
This is the backend part of [[ https://bugs.llvm.org/show_bug.cgi?id=42457 | PR42457 ]].
In middle-end, we'd want to prefer the form with two adds - D63992,
but as this diff shows, not every target will prefer that pattern.
Out of 4 targets for which i added tests all seem to be ok with inc-of-add for scalars,
but only X86 prefer that same pattern for vectors.
Here i'm adding a new TLI hook, always defaulting to the inc-of-add,
but adding AArch64,ARM,PowerPC overrides to prefer inc-of-add only for scalars.
Reviewers: spatel, RKSimon, efriedma, t.p.northover, hfinkel
Reviewed By: efriedma
Subscribers: nemanjai, javed.absar, kristof.beyls, kbarton, jsji, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64090
llvm-svn: 365010
2019-07-03 17:41:35 +08:00
|
|
|
bool preferIncOfAddToSubOfNot(EVT VT) const override;
|
|
|
|
|
2017-04-05 22:09:39 +08:00
|
|
|
bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
|
|
|
|
return VT.isScalarInteger();
|
|
|
|
}
|
|
|
|
|
2020-06-04 16:30:17 +08:00
|
|
|
SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps,
|
|
|
|
bool OptForSize, NegatibleCost &Cost,
|
|
|
|
unsigned Depth = 0) const override;
|
|
|
|
|
2008-03-10 23:42:14 +08:00
|
|
|
/// getSetCCResultType - Return the ISD::SETCC ValueType
|
2015-07-09 10:09:04 +08:00
|
|
|
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
|
|
|
|
EVT VT) const override;
|
2008-03-10 23:42:14 +08:00
|
|
|
|
Optionally enable more-aggressive FMA formation in DAGCombine
The heuristic used by DAGCombine to form FMAs checks that the FMUL has only one
use, but this is overly-conservative on some systems. Specifically, if the FMA
and the FADD have the same latency (and the FMA does not compete for resources
with the FMUL any more than the FADD does), there is no need for the
restriction, and furthermore, forming the FMA leaving the FMUL can still allow
for higher overall throughput and decreased critical-path length.
Here we add a new TLI callback, enableAggressiveFMAFusion, false by default, to
elide the hasOneUse check. This is enabled for PowerPC by default, as most
PowerPC systems will benefit.
Patch by Olivier Sallenave, thanks!
llvm-svn: 218120
2014-09-19 19:42:56 +08:00
|
|
|
/// Return true if target always beneficiates from combining into FMA for a
|
|
|
|
/// given value type. This must typically return false on targets where FMA
|
|
|
|
/// takes more cycles to execute than FADD.
|
|
|
|
bool enableAggressiveFMAFusion(EVT VT) const override;
|
|
|
|
|
2006-11-08 10:15:41 +08:00
|
|
|
/// getPreIndexedAddressParts - returns true by value, base pointer and
|
|
|
|
/// offset pointer and addressing mode by reference if the node's address
|
|
|
|
/// can be legally represented as pre-indexed load / store address.
|
2014-04-29 15:57:37 +08:00
|
|
|
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
|
|
|
|
SDValue &Offset,
|
|
|
|
ISD::MemIndexedMode &AM,
|
|
|
|
SelectionDAG &DAG) const override;
|
2011-02-26 05:41:48 +08:00
|
|
|
|
PowerPC/SPE: Fix load/store handling for SPE
Summary:
Pointed out in a comment for D49754, register spilling will currently
spill SPE registers at almost any offset. However, the instructions
`evstdd` and `evldd` require a) 8-byte alignment, and b) a limit of 256
(unsigned) bytes from the base register, as the offset must fix into a
5-bit offset, which ranges from 0-31 (indexed in double-words).
The update to the register spill test is taken partially from the test
case shown in D49754.
Additionally, pointed out by Kei Thomsen, globals will currently use
evldd/evstdd, though the offset isn't known at compile time, so may
exceed the 8-bit (unsigned) offset permitted. This fixes that as well,
by forcing it to always use evlddx/evstddx when accessing globals.
Part of the patch contributed by Kei Thomsen.
Reviewers: nemanjai, hfinkel, joerg
Subscribers: kbarton, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54409
llvm-svn: 366318
2019-07-17 20:30:04 +08:00
|
|
|
/// SelectAddressEVXRegReg - Given the specified addressed, check to see if
|
|
|
|
/// it can be more efficiently represented as [r+imm].
|
|
|
|
bool SelectAddressEVXRegReg(SDValue N, SDValue &Base, SDValue &Index,
|
|
|
|
SelectionDAG &DAG) const;
|
|
|
|
|
2006-11-08 10:15:41 +08:00
|
|
|
/// SelectAddressRegReg - Given the specified addressed, check to see if it
|
2019-05-22 10:57:31 +08:00
|
|
|
/// can be more efficiently represented as [r+imm]. If \p EncodingAlignment
|
|
|
|
/// is non-zero, only accept displacement which is not suitable for [r+imm].
|
|
|
|
/// Returns false if it can be represented by [r+imm], which are preferred.
|
2008-07-28 05:46:04 +08:00
|
|
|
bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index,
|
2019-05-22 10:57:31 +08:00
|
|
|
SelectionDAG &DAG,
|
|
|
|
unsigned EncodingAlignment = 0) const;
|
2011-02-26 05:41:48 +08:00
|
|
|
|
2006-11-08 10:15:41 +08:00
|
|
|
/// SelectAddressRegImm - Returns true if the address N can be represented
|
|
|
|
/// by a base register plus a signed 16-bit displacement [r+imm], and if it
|
2019-05-22 10:57:31 +08:00
|
|
|
/// is not better represented as reg+reg. If \p EncodingAlignment is
|
|
|
|
/// non-zero, only accept displacements suitable for instruction encoding
|
|
|
|
/// requirement, i.e. multiples of 4 for DS form.
|
2008-07-28 05:46:04 +08:00
|
|
|
bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base,
|
2019-05-22 10:57:31 +08:00
|
|
|
SelectionDAG &DAG,
|
|
|
|
unsigned EncodingAlignment) const;
|
2011-02-26 05:41:48 +08:00
|
|
|
|
2006-11-08 10:15:41 +08:00
|
|
|
/// SelectAddressRegRegOnly - Given the specified addressed, force it to be
|
|
|
|
/// represented as an indexed [r+r] operation.
|
2008-07-28 05:46:04 +08:00
|
|
|
bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index,
|
2009-01-16 00:29:45 +08:00
|
|
|
SelectionDAG &DAG) const;
|
2006-11-08 10:15:41 +08:00
|
|
|
|
2020-04-10 00:17:23 +08:00
|
|
|
/// SelectAddressPCRel - Represent the specified address as pc relative to
|
|
|
|
/// be represented as [pc+imm]
|
|
|
|
bool SelectAddressPCRel(SDValue N, SDValue &Base) const;
|
|
|
|
|
2014-04-29 15:57:37 +08:00
|
|
|
Sched::Preference getSchedulingPreference(SDNode *N) const override;
|
2011-02-26 05:41:48 +08:00
|
|
|
|
2005-08-26 08:52:45 +08:00
|
|
|
/// LowerOperation - Provide custom lowering hooks for some operations.
|
|
|
|
///
|
2014-04-29 15:57:37 +08:00
|
|
|
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
|
2007-11-29 02:44:47 +08:00
|
|
|
|
2008-12-01 19:39:25 +08:00
|
|
|
/// ReplaceNodeResults - Replace the results of node with an illegal result
|
|
|
|
/// type with new values built out of custom code.
|
|
|
|
///
|
2014-04-29 15:57:37 +08:00
|
|
|
void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
|
|
|
|
SelectionDAG &DAG) const override;
|
2008-12-01 19:39:25 +08:00
|
|
|
|
[PowerPC 1/4] Little-endian adjustments for VSX loads/stores
This patch addresses the inherent big-endian bias in the lxvd2x,
lxvw4x, stxvd2x, and stxvw4x instructions. These instructions load
vector elements into registers left-to-right (with the first element
loaded into the high-order bits of the register), regardless of the
endian setting of the processor. However, these are the only
vector memory instructions that permit unaligned storage accesses, so
we want to use them for little-endian.
To make this work, a lxvd2x or lxvw4x is replaced with an lxvd2x
followed by an xxswapd, which swaps the doublewords. This works for
lxvw4x as well as lxvd2x, because for lxvw4x on an LE system the
vector elements are in LE order (right-to-left) within each
doubleword. (Thus after lxvw2x of a <4 x float> the elements will
appear as 1, 0, 3, 2. Following the swap, they will appear as 3, 2,
0, 1, as desired.) For stores, an stxvd2x or stxvw4x is replaced
with an stxvd2x preceded by an xxswapd.
Introduction of extra swap instructions provides correctness, but
obviously is not ideal from a performance perspective. Future patches
will address this with optimizations to remove most of the introduced
swaps, which have proven effective in other implementations.
The introduction of the swaps is performed during lowering of LOAD,
STORE, INTRINSIC_W_CHAIN, and INTRINSIC_VOID operations. The latter
are used to translate intrinsics that specify the VSX loads and stores
directly into equivalent sequences for little endian. Thus code that
uses vec_vsx_ld and vec_vsx_st does not have to be modified to be
ported from BE to LE.
We introduce new PPCISD opcodes for LXVD2X, STXVD2X, and XXSWAPD for
use during this lowering step. In PPCInstrVSX.td, we add new SDType
and SDNode definitions for these (PPClxvd2x, PPCstxvd2x, PPCxxswapd).
These are recognized during instruction selection and mapped to the
correct instructions.
Several tests that were written to use -mcpu=pwr7 or pwr8 are modified
to disable VSX on LE variants because code generation changes with
this and subsequent patches in this set. I chose to include all of
these in the first patch than try to rigorously sort out which tests
were broken by one or another of the patches. Sorry about that.
The new test vsx-ldst-builtin-le.ll, and the changes to vsx-ldst.ll,
are disabled until LE support is enabled because of breakages that
occur as noted in those tests. They are re-enabled in patch 4/4.
llvm-svn: 223783
2014-12-10 00:35:51 +08:00
|
|
|
SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const;
|
|
|
|
SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const;
|
|
|
|
|
2014-04-29 15:57:37 +08:00
|
|
|
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
|
2011-02-26 05:41:48 +08:00
|
|
|
|
2014-12-12 02:37:52 +08:00
|
|
|
SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
|
2018-07-31 07:22:00 +08:00
|
|
|
SmallVectorImpl<SDNode *> &Created) const override;
|
2014-12-12 02:37:52 +08:00
|
|
|
|
2019-12-28 22:18:56 +08:00
|
|
|
Register getRegisterByName(const char* RegName, LLT VT,
|
2019-10-01 09:44:39 +08:00
|
|
|
const MachineFunction &MF) const override;
|
2014-05-12 03:29:11 +08:00
|
|
|
|
2014-05-15 05:14:37 +08:00
|
|
|
void computeKnownBitsForTargetNode(const SDValue Op,
|
2017-04-28 13:31:46 +08:00
|
|
|
KnownBits &Known,
|
2017-03-31 19:24:16 +08:00
|
|
|
const APInt &DemandedElts,
|
2014-05-15 05:14:37 +08:00
|
|
|
const SelectionDAG &DAG,
|
|
|
|
unsigned Depth = 0) const override;
|
2005-10-19 07:23:37 +08:00
|
|
|
|
2019-09-27 20:54:21 +08:00
|
|
|
Align getPrefLoopAlignment(MachineLoop *ML) const override;
|
2015-01-04 01:58:24 +08:00
|
|
|
|
2016-03-17 06:12:04 +08:00
|
|
|
bool shouldInsertFencesForAtomic(const Instruction *I) const override {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-05-09 23:27:17 +08:00
|
|
|
Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
|
|
|
|
AtomicOrdering Ord) const override;
|
|
|
|
Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst,
|
|
|
|
AtomicOrdering Ord) const override;
|
2014-09-24 04:46:49 +08:00
|
|
|
|
2014-04-29 15:57:37 +08:00
|
|
|
MachineBasicBlock *
|
2016-07-01 06:52:52 +08:00
|
|
|
EmitInstrWithCustomInserter(MachineInstr &MI,
|
|
|
|
MachineBasicBlock *MBB) const override;
|
|
|
|
MachineBasicBlock *EmitAtomicBinary(MachineInstr &MI,
|
2015-03-11 04:51:07 +08:00
|
|
|
MachineBasicBlock *MBB,
|
|
|
|
unsigned AtomicSize,
|
2016-08-29 00:17:58 +08:00
|
|
|
unsigned BinOpcode,
|
|
|
|
unsigned CmpOpcode = 0,
|
|
|
|
unsigned CmpPred = 0) const;
|
2016-07-01 06:52:52 +08:00
|
|
|
MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr &MI,
|
2011-02-26 05:41:48 +08:00
|
|
|
MachineBasicBlock *MBB,
|
2016-07-01 06:52:52 +08:00
|
|
|
bool is8bit,
|
2016-08-29 00:17:58 +08:00
|
|
|
unsigned Opcode,
|
|
|
|
unsigned CmpOpcode = 0,
|
|
|
|
unsigned CmpPred = 0) const;
|
2011-02-26 05:41:48 +08:00
|
|
|
|
2016-07-01 06:52:52 +08:00
|
|
|
MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
|
2013-03-22 05:37:52 +08:00
|
|
|
MachineBasicBlock *MBB) const;
|
|
|
|
|
2016-07-01 06:52:52 +08:00
|
|
|
MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
|
2013-03-22 05:37:52 +08:00
|
|
|
MachineBasicBlock *MBB) const;
|
|
|
|
|
2015-07-06 03:29:18 +08:00
|
|
|
ConstraintType getConstraintType(StringRef Constraint) const override;
|
2010-10-30 01:29:13 +08:00
|
|
|
|
|
|
|
/// Examine constraint string and operand type and determine a weight value.
|
|
|
|
/// The operand object must already have been set up with the operand type.
|
|
|
|
ConstraintWeight getSingleConstraintMatchWeight(
|
2014-04-29 15:57:37 +08:00
|
|
|
AsmOperandInfo &info, const char *constraint) const override;
|
2010-10-30 01:29:13 +08:00
|
|
|
|
2015-02-27 06:38:43 +08:00
|
|
|
std::pair<unsigned, const TargetRegisterClass *>
|
|
|
|
getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
|
2015-07-06 03:29:18 +08:00
|
|
|
StringRef Constraint, MVT VT) const override;
|
2006-03-14 07:20:37 +08:00
|
|
|
|
2008-02-29 06:31:51 +08:00
|
|
|
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
|
|
|
|
/// function arguments in the caller parameter area. This is the actual
|
|
|
|
/// alignment, not its logarithm.
|
2015-07-09 10:09:28 +08:00
|
|
|
unsigned getByValTypeAlignment(Type *Ty,
|
|
|
|
const DataLayout &DL) const override;
|
2008-02-29 06:31:51 +08:00
|
|
|
|
2007-08-25 08:47:38 +08:00
|
|
|
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
|
2010-06-26 05:55:36 +08:00
|
|
|
/// vector. If it is invalid, don't add anything to Ops.
|
2014-04-29 15:57:37 +08:00
|
|
|
void LowerAsmOperandForConstraint(SDValue Op,
|
|
|
|
std::string &Constraint,
|
|
|
|
std::vector<SDValue> &Ops,
|
|
|
|
SelectionDAG &DAG) const override;
|
2011-02-26 05:41:48 +08:00
|
|
|
|
2015-07-06 03:29:18 +08:00
|
|
|
unsigned
|
|
|
|
getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
|
2015-03-17 19:09:13 +08:00
|
|
|
if (ConstraintCode == "es")
|
|
|
|
return InlineAsm::Constraint_es;
|
|
|
|
else if (ConstraintCode == "o")
|
|
|
|
return InlineAsm::Constraint_o;
|
|
|
|
else if (ConstraintCode == "Q")
|
|
|
|
return InlineAsm::Constraint_Q;
|
|
|
|
else if (ConstraintCode == "Z")
|
|
|
|
return InlineAsm::Constraint_Z;
|
|
|
|
else if (ConstraintCode == "Zy")
|
|
|
|
return InlineAsm::Constraint_Zy;
|
|
|
|
return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
|
2015-03-16 21:13:41 +08:00
|
|
|
}
|
|
|
|
|
2007-03-31 07:15:24 +08:00
|
|
|
/// isLegalAddressingMode - Return true if the addressing mode represented
|
|
|
|
/// by AM is legal for this target, for a load/store of the specified type.
|
2015-07-09 10:09:40 +08:00
|
|
|
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
|
2017-07-21 19:59:37 +08:00
|
|
|
Type *Ty, unsigned AS,
|
|
|
|
Instruction *I = nullptr) const override;
|
2011-02-26 05:41:48 +08:00
|
|
|
|
2014-04-13 05:52:38 +08:00
|
|
|
/// isLegalICmpImmediate - Return true if the specified immediate is legal
|
|
|
|
/// icmp immediate, that is the target has icmp instructions which can
|
|
|
|
/// compare a register against the immediate without having to materialize
|
|
|
|
/// the immediate into a register.
|
|
|
|
bool isLegalICmpImmediate(int64_t Imm) const override;
|
|
|
|
|
|
|
|
/// isLegalAddImmediate - Return true if the specified immediate is legal
|
|
|
|
/// add immediate, that is the target has add instructions which can
|
|
|
|
/// add a register and the immediate without having to materialize
|
|
|
|
/// the immediate into a register.
|
|
|
|
bool isLegalAddImmediate(int64_t Imm) const override;
|
|
|
|
|
|
|
|
/// isTruncateFree - Return true if it's free to truncate a value of
|
|
|
|
/// type Ty1 to type Ty2. e.g. On PPC it's free to truncate a i64 value in
|
|
|
|
/// register X1 to i32 by referencing its sub-register R1.
|
|
|
|
bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
|
|
|
|
bool isTruncateFree(EVT VT1, EVT VT2) const override;
|
|
|
|
|
2015-01-10 16:21:59 +08:00
|
|
|
bool isZExtFree(SDValue Val, EVT VT2) const override;
|
|
|
|
|
2017-10-14 03:55:45 +08:00
|
|
|
bool isFPExtFree(EVT DestVT, EVT SrcVT) const override;
|
2015-01-13 23:06:36 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Returns true if it is beneficial to convert a load of a constant
|
2014-04-13 05:52:38 +08:00
|
|
|
/// to just the constant itself.
|
|
|
|
bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
|
|
|
|
Type *Ty) const override;
|
|
|
|
|
2017-08-25 07:24:43 +08:00
|
|
|
bool convertSelectOfConstantsToMath(EVT VT) const override {
|
[DAGCombiner] allow transforming (select Cond, C +/- 1, C) to (add(ext Cond), C)
select Cond, C +/- 1, C --> add(ext Cond), C -- with a target hook.
This is part of the ongoing process to obsolete D24480. The motivation is to
canonicalize to select IR in InstCombine whenever possible, so we need to have a way to
undo that easily in codegen.
PowerPC is an obvious winner for this kind of transform because it has fast and complete
bit-twiddling abilities but generally lousy conditional execution perf (although this might
have changed in recent implementations).
x86 also sees some wins, but the effect is limited because these transforms already mostly
exist in its target-specific combineSelectOfTwoConstants(). The fact that we see any x86
changes just shows that that code is a mess of special-case holes. We may be able to remove
some of that logic now.
My guess is that other targets will want to enable this hook for most cases. The likely
follow-ups would be to add value type and/or the constants themselves as parameters for the
hook. As the tests in select_const.ll show, we can transform any select-of-constants to
math/logic, but the general transform for any 2 constants needs one more instruction
(multiply or 'and').
ARM is one target that I think may not want this for most cases. I see infinite loops there
because it wants to use selects to enable conditionally executed instructions.
Differential Revision: https://reviews.llvm.org/D30537
llvm-svn: 296977
2017-03-05 03:18:09 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-07-23 11:34:40 +08:00
|
|
|
bool isDesirableToTransformToIntegerOp(unsigned Opc,
|
|
|
|
EVT VT) const override {
|
|
|
|
// Only handle float load/store pair because float(fpr) load/store
|
|
|
|
// instruction has more cycles than integer(gpr) load/store in PPC.
|
|
|
|
if (Opc != ISD::LOAD && Opc != ISD::STORE)
|
|
|
|
return false;
|
|
|
|
if (VT != MVT::f32 && VT != MVT::f64)
|
|
|
|
return false;
|
|
|
|
|
2020-04-08 22:29:30 +08:00
|
|
|
return true;
|
2019-07-23 11:34:40 +08:00
|
|
|
}
|
|
|
|
|
2018-12-03 11:32:16 +08:00
|
|
|
// Returns true if the address of the global is stored in TOC entry.
|
|
|
|
bool isAccessedAsGotIndirect(SDValue N) const;
|
|
|
|
|
2014-04-29 15:57:37 +08:00
|
|
|
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
|
2011-02-26 05:41:48 +08:00
|
|
|
|
2014-08-13 09:15:40 +08:00
|
|
|
bool getTgtMemIntrinsic(IntrinsicInfo &Info,
|
|
|
|
const CallInst &I,
|
2017-12-15 06:34:10 +08:00
|
|
|
MachineFunction &MF,
|
2014-08-13 09:15:40 +08:00
|
|
|
unsigned Intrinsic) const override;
|
|
|
|
|
2010-04-17 04:11:05 +08:00
|
|
|
/// It returns EVT::Other if the type should be determined using generic
|
|
|
|
/// target-independent logic.
|
[NFC] Introduce a type to model memory operation
Summary: This is a first step before changing the types to llvm::Align and introduce functions to ease client code.
Reviewers: courbet
Subscribers: arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, jrtc27, atanasyan, jsji, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D73785
2020-01-31 22:40:31 +08:00
|
|
|
EVT getOptimalMemOpType(const MemOp &Op,
|
|
|
|
const AttributeList &FuncAttributes) const override;
|
2008-10-21 11:41:46 +08:00
|
|
|
|
2013-03-15 23:27:13 +08:00
|
|
|
/// Is unaligned memory access allowed for the given type, and is it fast
|
|
|
|
/// relative to software emulation.
|
2019-06-13 01:14:03 +08:00
|
|
|
bool allowsMisalignedMemoryAccesses(
|
|
|
|
EVT VT, unsigned AddrSpace, unsigned Align = 1,
|
|
|
|
MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
|
|
|
|
bool *Fast = nullptr) const override;
|
2013-03-15 23:27:13 +08:00
|
|
|
|
AArch64/PowerPC/SystemZ/X86: This patch fixes the interface, usage, and all
in-tree implementations of TargetLoweringBase::isFMAFasterThanMulAndAdd in
order to resolve the following issues with fmuladd (i.e. optional FMA)
intrinsics:
1. On X86(-64) targets, ISD::FMA nodes are formed when lowering fmuladd
intrinsics even if the subtarget does not support FMA instructions, leading
to laughably bad code generation in some situations.
2. On AArch64 targets, ISD::FMA nodes are formed for operations on fp128,
resulting in a call to a software fp128 FMA implementation.
3. On PowerPC targets, FMAs are not generated from fmuladd intrinsics on types
like v2f32, v8f32, v4f64, etc., even though they promote, split, scalarize,
etc. to types that support hardware FMAs.
The function has also been slightly renamed for consistency and to force a
merge/build conflict for any out-of-tree target implementing it. To resolve,
see comments and fixed in-tree examples.
llvm-svn: 185956
2013-07-10 02:16:56 +08:00
|
|
|
/// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
|
|
|
|
/// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
|
|
|
|
/// expanded to FMAs when this method returns true, otherwise fmuladd is
|
|
|
|
/// expanded to fmul + fadd.
|
2019-10-29 08:38:44 +08:00
|
|
|
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
|
|
|
|
EVT VT) const override;
|
2012-06-22 08:49:52 +08:00
|
|
|
|
2020-03-19 11:24:40 +08:00
|
|
|
bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *Ty) const override;
|
|
|
|
|
2020-03-19 12:04:00 +08:00
|
|
|
/// isProfitableToHoist - Check if it is profitable to hoist instruction
|
|
|
|
/// \p I to its dominator block.
|
|
|
|
/// For example, it is not profitable if \p I and it's only user can form a
|
|
|
|
/// FMA instruction, because Powerpc prefers FMADD.
|
|
|
|
bool isProfitableToHoist(Instruction *I) const override;
|
|
|
|
|
Revert "r225811 - Revert "r225808 - [PowerPC] Add StackMap/PatchPoint support""
This re-applies r225808, fixed to avoid problems with SDAG dependencies along
with the preceding fix to ScheduleDAGSDNodes::RegDefIter::InitNodeNumDefs.
These problems caused the original regression tests to assert/segfault on many
(but not all) systems.
Original commit message:
This commit does two things:
1. Refactors PPCFastISel to use more of the common infrastructure for call
lowering (this lets us take advantage of this common code for lowering some
common intrinsics, stackmap/patchpoint among them).
2. Adds support for stackmap/patchpoint lowering. For the most part, this is
very similar to the support in the AArch64 target, with the obvious differences
(different registers, NOP instructions, etc.). The test cases are adapted
from the AArch64 test cases.
One difference of note is that the patchpoint call sequence takes 24 bytes, so
you can't use less than that (on AArch64 you can go down to 16). Also, as noted
in the docs, we take the patchpoint address to be the actual code address
(assuming the call is local in the TOC-sharing sense), which should yield
higher performance than generating the full cross-DSO indirect-call sequence
and is likely just as useful for JITed code (if not, we'll change it).
StackMaps and Patchpoints are still marked as experimental, and so this support
is doubly experimental. So go ahead and experiment!
llvm-svn: 225909
2015-01-14 09:07:51 +08:00
|
|
|
const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
|
|
|
|
|
2014-04-01 01:48:16 +08:00
|
|
|
// Should we expand the build vector with shuffles?
|
2014-04-29 15:57:37 +08:00
|
|
|
bool
|
2014-04-01 01:48:16 +08:00
|
|
|
shouldExpandBuildVectorWithShuffles(EVT VT,
|
2014-04-29 15:57:37 +08:00
|
|
|
unsigned DefinedValues) const override;
|
2014-04-01 01:48:16 +08:00
|
|
|
|
2013-07-30 08:50:39 +08:00
|
|
|
/// createFastISel - This method returns a target-specific FastISel object,
|
|
|
|
/// or null if the target does not support "fast" instruction selection.
|
2014-04-29 15:57:37 +08:00
|
|
|
FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
|
|
|
|
const TargetLibraryInfo *LibInfo) const override;
|
2013-07-30 08:50:39 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Returns true if an argument of type Ty needs to be passed in a
|
[PowerPC] ELFv2 aggregate passing support
This patch adds infrastructure support for passing array types
directly. These can be used by the front-end to pass aggregate
types (coerced to an appropriate array type). The details of the
array type being used inform the back-end about ABI-relevant
properties. Specifically, the array element type encodes:
- whether the parameter should be passed in FPRs, VRs, or just
GPRs/stack slots (for float / vector / integer element types,
respectively)
- what the alignment requirements of the parameter are when passed in
GPRs/stack slots (8 for float / 16 for vector / the element type
size for integer element types) -- this corresponds to the
"byval align" field
Using the infrastructure provided by this patch, a companion patch
to clang will enable two features:
- In the ELFv2 ABI, pass (and return) "homogeneous" floating-point
or vector aggregates in FPRs and VRs (this is similar to the ARM
homogeneous aggregate ABI)
- As an optimization for both ELFv1 and ELFv2 ABIs, pass aggregates
that fit fully in registers without using the "byval" mechanism
The patch uses the functionArgumentNeedsConsecutiveRegisters callback
to encode that special treatment is required for all directly-passed
array types. The isInConsecutiveRegs / isInConsecutiveRegsLast bits set
as a results are then used to implement the required size and alignment
rules in CalculateStackSlotSize / CalculateStackSlotAlignment etc.
As a related change, the ABI routines have to be modified to support
passing floating-point types in GPRs. This is necessary because with
homogeneous aggregates of 4-byte float type we can now run out of FPRs
*before* we run out of the 64-byte argument save area that is shadowed
by GPRs. Any extra floating-point arguments that no longer fit in FPRs
must now be passed in GPRs until we run out of those too.
Note that there was already code to pass floating-point arguments in
GPRs used with vararg parameters, which was done by writing the argument
out to the argument save area first and then reloading into GPRs. The
patch re-implements this, however, in favor of code packing float arguments
directly via extension/truncation, BITCAST, and BUILD_PAIR operations.
This is required to support the ELFv2 ABI, since we cannot unconditionally
write to the argument save area (which the caller might not have allocated).
The change does, however, affect ELFv1 varags routines too; but even here
the overall effect should be advantageous: Instead of loading the argument
into the FPR, then storing the argument to the stack slot, and finally
reloading the argument from the stack slot into a GPR, the new code now
just loads the argument into the FPR, and subsequently loads the argument
into the GPR (via BITCAST). That BITCAST might imply a save/reload from
a stack temporary (in which case we're no worse than before); but it
might be implemented more efficiently in some cases.
The final part of the patch enables up to 8 FPRs and VRs for argument
return in PPCCallingConv.td; this is required to support returning
ELFv2 homogeneous aggregates. (Note that this doesn't affect other ABIs
since LLVM wil only look for which register to use if the parameter is
marked as "direct" return anyway.)
Reviewed by Hal Finkel.
llvm-svn: 213493
2014-07-21 08:13:26 +08:00
|
|
|
/// contiguous block of registers in calling convention CallConv.
|
|
|
|
bool functionArgumentNeedsConsecutiveRegisters(
|
|
|
|
Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override {
|
|
|
|
// We support any array type as "consecutive" block in the parameter
|
|
|
|
// save area. The element type defines the alignment requirement and
|
|
|
|
// whether the argument should go in GPRs, FPRs, or VRs if available.
|
|
|
|
//
|
|
|
|
// Note that clang uses this capability both to implement the ELFv2
|
|
|
|
// homogeneous float/vector aggregate ABI, and to avoid having to use
|
|
|
|
// "byval" when passing aggregates that might fully fit in registers.
|
|
|
|
return Ty->isArrayTy();
|
|
|
|
}
|
|
|
|
|
2015-11-07 09:11:31 +08:00
|
|
|
/// If a physical register, this returns the register that receives the
|
|
|
|
/// exception address on entry to an EH pad.
|
2020-04-08 22:29:30 +08:00
|
|
|
Register
|
2015-11-07 09:11:31 +08:00
|
|
|
getExceptionPointerRegister(const Constant *PersonalityFn) const override;
|
2015-01-07 06:31:02 +08:00
|
|
|
|
2015-11-07 09:11:31 +08:00
|
|
|
/// If a physical register, this returns the register that receives the
|
|
|
|
/// exception typeid on entry to a landing pad.
|
2020-04-08 22:29:30 +08:00
|
|
|
Register
|
2015-11-07 09:11:31 +08:00
|
|
|
getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
|
|
|
|
|
2020-05-24 05:46:33 +08:00
|
|
|
/// isMulhCheaperThanMulShift - Return true if a mulh[s|u] node for a
|
|
|
|
/// specific type is cheaper than a multiply followed by a shift.
|
|
|
|
/// This is true for words and doublewords on 64-bit PowerPC.
|
|
|
|
bool isMulhCheaperThanMulShift(EVT Type) const override;
|
|
|
|
|
2016-04-20 04:14:52 +08:00
|
|
|
/// Override to support customized stack guard loading.
|
|
|
|
bool useLoadStackGuardNode() const override;
|
|
|
|
void insertSSPDeclarations(Module &M) const override;
|
|
|
|
|
2019-03-19 02:40:07 +08:00
|
|
|
bool isFPImmLegal(const APFloat &Imm, EVT VT,
|
|
|
|
bool ForCodeSize) const override;
|
2016-11-16 08:37:30 +08:00
|
|
|
|
|
|
|
unsigned getJumpTableEncoding() const override;
|
|
|
|
bool isJumpTableRelative() const override;
|
|
|
|
SDValue getPICJumpTableRelocBase(SDValue Table,
|
|
|
|
SelectionDAG &DAG) const override;
|
|
|
|
const MCExpr *getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
|
|
|
|
unsigned JTI,
|
|
|
|
MCContext &Ctx) const override;
|
|
|
|
|
2020-01-22 04:24:50 +08:00
|
|
|
/// Structure that collects some common arguments that get passed around
|
|
|
|
/// between the functions for call lowering.
|
|
|
|
struct CallFlags {
|
|
|
|
const CallingConv::ID CallConv;
|
|
|
|
const bool IsTailCall : 1;
|
|
|
|
const bool IsVarArg : 1;
|
|
|
|
const bool IsPatchPoint : 1;
|
|
|
|
const bool IsIndirect : 1;
|
|
|
|
const bool HasNest : 1;
|
2020-05-30 03:15:07 +08:00
|
|
|
const bool NoMerge : 1;
|
2020-01-22 04:24:50 +08:00
|
|
|
|
|
|
|
CallFlags(CallingConv::ID CC, bool IsTailCall, bool IsVarArg,
|
2020-05-30 03:15:07 +08:00
|
|
|
bool IsPatchPoint, bool IsIndirect, bool HasNest, bool NoMerge)
|
2020-01-22 04:24:50 +08:00
|
|
|
: CallConv(CC), IsTailCall(IsTailCall), IsVarArg(IsVarArg),
|
|
|
|
IsPatchPoint(IsPatchPoint), IsIndirect(IsIndirect),
|
2020-05-30 03:15:07 +08:00
|
|
|
HasNest(HasNest), NoMerge(NoMerge) {}
|
2020-01-22 04:24:50 +08:00
|
|
|
};
|
|
|
|
|
2015-11-07 09:11:31 +08:00
|
|
|
private:
|
2015-01-07 06:31:02 +08:00
|
|
|
struct ReuseLoadInfo {
|
|
|
|
SDValue Ptr;
|
|
|
|
SDValue Chain;
|
|
|
|
SDValue ResChain;
|
|
|
|
MachinePointerInfo MPI;
|
2017-01-13 08:58:58 +08:00
|
|
|
bool IsDereferenceable = false;
|
|
|
|
bool IsInvariant = false;
|
[Alignment][NFC] Transitionning more getMachineMemOperand call sites
Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: arsenm, dylanmckay, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, jrtc27, atanasyan, Jim, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D77121
2020-03-31 16:05:00 +08:00
|
|
|
Align Alignment;
|
2015-01-07 06:31:02 +08:00
|
|
|
AAMDNodes AAInfo;
|
2017-01-13 08:58:58 +08:00
|
|
|
const MDNode *Ranges = nullptr;
|
2015-01-07 06:31:02 +08:00
|
|
|
|
2017-01-13 08:58:58 +08:00
|
|
|
ReuseLoadInfo() = default;
|
[CodeGen] Split out the notions of MI invariance and MI dereferenceability.
Summary:
An IR load can be invariant, dereferenceable, neither, or both. But
currently, MI's notion of invariance is IR-invariant &&
IR-dereferenceable.
This patch splits up the notions of invariance and dereferenceability at
the MI level. It's NFC, so adds some probably-unnecessary
"is-dereferenceable" checks, which we can remove later if desired.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D23371
llvm-svn: 281151
2016-09-11 09:38:58 +08:00
|
|
|
|
|
|
|
MachineMemOperand::Flags MMOFlags() const {
|
|
|
|
MachineMemOperand::Flags F = MachineMemOperand::MONone;
|
|
|
|
if (IsDereferenceable)
|
|
|
|
F |= MachineMemOperand::MODereferenceable;
|
|
|
|
if (IsInvariant)
|
|
|
|
F |= MachineMemOperand::MOInvariant;
|
|
|
|
return F;
|
|
|
|
}
|
2015-01-07 06:31:02 +08:00
|
|
|
};
|
|
|
|
|
2018-03-20 02:50:02 +08:00
|
|
|
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
|
|
|
|
// Addrspacecasts are always noops.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-01-07 06:31:02 +08:00
|
|
|
bool canReuseLoadAddress(SDValue Op, EVT MemVT, ReuseLoadInfo &RLI,
|
2015-01-09 09:34:30 +08:00
|
|
|
SelectionDAG &DAG,
|
|
|
|
ISD::LoadExtType ET = ISD::NON_EXTLOAD) const;
|
2015-01-07 06:31:02 +08:00
|
|
|
void spliceIntoChain(SDValue ResChain, SDValue NewResChain,
|
|
|
|
SelectionDAG &DAG) const;
|
|
|
|
|
|
|
|
void LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
|
2016-06-12 23:39:02 +08:00
|
|
|
SelectionDAG &DAG, const SDLoc &dl) const;
|
2015-04-11 18:40:42 +08:00
|
|
|
SDValue LowerFP_TO_INTDirectMove(SDValue Op, SelectionDAG &DAG,
|
2016-06-12 23:39:02 +08:00
|
|
|
const SDLoc &dl) const;
|
2016-12-13 06:09:02 +08:00
|
|
|
|
|
|
|
bool directMoveIsProfitable(const SDValue &Op) const;
|
2015-04-11 18:40:42 +08:00
|
|
|
SDValue LowerINT_TO_FPDirectMove(SDValue Op, SelectionDAG &DAG,
|
2016-06-12 23:39:02 +08:00
|
|
|
const SDLoc &dl) const;
|
2015-01-07 06:31:02 +08:00
|
|
|
|
2018-10-26 11:19:13 +08:00
|
|
|
SDValue LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
|
|
|
|
const SDLoc &dl) const;
|
|
|
|
|
2019-02-12 01:29:14 +08:00
|
|
|
SDValue LowerTRUNCATEVector(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const;
|
|
|
|
SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const;
|
2008-04-30 17:16:33 +08:00
|
|
|
|
2010-01-27 08:07:07 +08:00
|
|
|
bool
|
|
|
|
IsEligibleForTailCallOptimization(SDValue Callee,
|
|
|
|
CallingConv::ID CalleeCC,
|
|
|
|
bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
SelectionDAG& DAG) const;
|
|
|
|
|
2020-04-14 01:44:59 +08:00
|
|
|
bool IsEligibleForTailCallOptimization_64SVR4(
|
|
|
|
SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB,
|
|
|
|
bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
|
2016-04-06 10:04:38 +08:00
|
|
|
|
2016-06-12 23:39:02 +08:00
|
|
|
SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG &DAG, int SPDiff,
|
|
|
|
SDValue Chain, SDValue &LROpOut,
|
2016-07-07 09:08:21 +08:00
|
|
|
SDValue &FPOpOut,
|
2016-06-12 23:39:02 +08:00
|
|
|
const SDLoc &dl) const;
|
2010-04-17 23:26:15 +08:00
|
|
|
|
2019-08-14 04:29:01 +08:00
|
|
|
SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, SDValue GA) const;
|
|
|
|
|
2010-04-17 23:26:15 +08:00
|
|
|
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
|
2012-06-05 01:36:38 +08:00
|
|
|
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
|
2010-04-17 23:26:15 +08:00
|
|
|
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
|
2011-09-06 21:37:06 +08:00
|
|
|
SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
|
2016-07-07 08:39:27 +08:00
|
|
|
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
|
Add ISD::EH_DWARF_CFA, simplify @llvm.eh.dwarf.cfa on Mips, fix on PowerPC
LLVM has an @llvm.eh.dwarf.cfa intrinsic, used to lower the GCC-compatible
__builtin_dwarf_cfa() builtin. As pointed out in PR26761, this is currently
broken on PowerPC (and likely on ARM as well). Currently, @llvm.eh.dwarf.cfa is
lowered using:
ADD(FRAMEADDR, FRAME_TO_ARGS_OFFSET)
where FRAME_TO_ARGS_OFFSET defaults to the constant zero. On x86,
FRAME_TO_ARGS_OFFSET is lowered to 2*SlotSize. This setup, however, does not
work for PowerPC. Because of the way that the stack layout works, the canonical
frame address is not exactly (FRAMEADDR + FRAME_TO_ARGS_OFFSET) on PowerPC
(there is a lower save-area offset as well), so it is not just a matter of
implementing FRAME_TO_ARGS_OFFSET for PowerPC (unless we redefine its
semantics -- We can do that, since it is currently used only for
@llvm.eh.dwarf.cfa lowering, but the better to directly lower the CFA construct
itself (since it can be easily represented as a fixed-offset FrameIndex)). Mips
currently does this, but by using a custom lowering for ADD that specifically
recognizes the (FRAMEADDR, FRAME_TO_ARGS_OFFSET) pattern.
This change introduces a ISD::EH_DWARF_CFA node, which by default expands using
the existing logic, but can be directly lowered by the target. Mips is updated
to use this method (which simplifies its implementation, and I suspect makes it
more robust), and updates PowerPC to do the same.
Fixes PR26761.
Differential Revision: https://reviews.llvm.org/D24038
llvm-svn: 280350
2016-09-01 18:28:47 +08:00
|
|
|
SDValue LowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
|
2010-04-17 23:26:15 +08:00
|
|
|
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
|
2016-06-12 23:39:02 +08:00
|
|
|
SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
|
|
|
|
const SDLoc &dl) const;
|
2013-04-02 01:52:07 +08:00
|
|
|
SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
|
2010-04-17 23:26:15 +08:00
|
|
|
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
|
2016-09-14 22:19:09 +08:00
|
|
|
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
|
[PowerPC] Add support for the QPX vector instruction set
This adds support for the QPX vector instruction set, which is used by the
enhanced A2 cores on the IBM BG/Q supercomputers. QPX vectors are 256 bytes
wide, holding 4 double-precision floating-point values. Boolean values, modeled
here as <4 x i1> are actually also represented as floating-point values
(essentially { -1, 1 } for { false, true }). QPX shares many features with
Altivec and VSX, but is distinct from both of them. One major difference is
that, instead of adding completely-separate vector registers, QPX vector
registers are extensions of the scalar floating-point registers (lane 0 is the
corresponding scalar floating-point value). The operations supported on QPX
vectors mirrors that supported on the scalar floating-point values (with some
additional ones for permutations and logical/comparison operations).
I've been maintaining this support out-of-tree, as part of the bgclang project,
for several years. This is not the entire bgclang patch set, but is most of the
subset that can be cleanly integrated into LLVM proper at this time. Adding
this to the LLVM backend is part of my efforts to rebase bgclang to the current
LLVM trunk, but is independently useful (especially for codes that use LLVM as
a JIT in library form).
The assembler/disassembler test coverage is complete. The CodeGen test coverage
is not, but I've included some tests, and more will be added as follow-up work.
llvm-svn: 230413
2015-02-25 09:06:45 +08:00
|
|
|
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
|
2010-04-17 23:26:15 +08:00
|
|
|
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
|
2017-05-17 04:18:06 +08:00
|
|
|
SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
|
2017-06-13 01:58:42 +08:00
|
|
|
SDValue LowerREM(SDValue Op, SelectionDAG &DAG) const;
|
[PPC] Use xxbrd to speed up bswap64
Power doesn't have bswap instructions, so llvm generates following code sequence for bswap64.
rotldi 5, 3, 16
rotldi 4, 3, 8
rotldi 9, 3, 24
rotldi 10, 3, 32
rotldi 11, 3, 48
rotldi 12, 3, 56
rldimi 4, 5, 8, 48
rldimi 4, 9, 16, 40
rldimi 4, 10, 24, 32
rldimi 4, 11, 40, 16
rldimi 4, 12, 48, 8
rldimi 4, 3, 56, 0
But Power9 has vector bswap instructions, they can also be used to speed up scalar bswap intrinsic. With this patch, bswap64 can be translated to:
mtvsrdd 34, 3, 3
xxbrd 34, 34
mfvsrld 3, 34
Differential Revision: https://reviews.llvm.org/D39510
llvm-svn: 317499
2017-11-07 03:09:38 +08:00
|
|
|
SDValue LowerBSWAP(SDValue Op, SelectionDAG &DAG) const;
|
2018-01-12 22:58:41 +08:00
|
|
|
SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
|
2010-04-17 23:26:15 +08:00
|
|
|
SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
|
2014-03-30 21:22:59 +08:00
|
|
|
SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
|
2010-04-17 23:26:15 +08:00
|
|
|
SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
|
2018-12-18 11:16:43 +08:00
|
|
|
SDValue LowerABS(SDValue Op, SelectionDAG &DAG) const;
|
[PowerPC] custom lower `v2f64 fpext v2f32`
Reduces scalarization overhead via custom lowering of v2f64 fpext v2f32.
eg. For the following IR
%0 = load <2 x float>, <2 x float>* %Ptr, align 8
%1 = fpext <2 x float> %0 to <2 x double>
ret <2 x double> %1
Pre custom lowering:
ld r3, 0(r3)
mtvsrd f0, r3
xxswapd vs34, vs0
xscvspdpn f0, vs0
xxsldwi vs1, vs34, vs34, 3
xscvspdpn f1, vs1
xxmrghd vs34, vs0, vs1
After custom lowering:
lfd f0, 0(r3)
xxmrghw vs0, vs0, vs0
xvcvspdp vs34, vs0
Differential Revision: https://reviews.llvm.org/D57857
llvm-svn: 360429
2019-05-10 22:04:06 +08:00
|
|
|
SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
|
[PowerPC] Add support for the QPX vector instruction set
This adds support for the QPX vector instruction set, which is used by the
enhanced A2 cores on the IBM BG/Q supercomputers. QPX vectors are 256 bytes
wide, holding 4 double-precision floating-point values. Boolean values, modeled
here as <4 x i1> are actually also represented as floating-point values
(essentially { -1, 1 } for { false, true }). QPX shares many features with
Altivec and VSX, but is distinct from both of them. One major difference is
that, instead of adding completely-separate vector registers, QPX vector
registers are extensions of the scalar floating-point registers (lane 0 is the
corresponding scalar floating-point value). The operations supported on QPX
vectors mirrors that supported on the scalar floating-point values (with some
additional ones for permutations and logical/comparison operations).
I've been maintaining this support out-of-tree, as part of the bgclang project,
for several years. This is not the entire bgclang patch set, but is most of the
subset that can be cleanly integrated into LLVM proper at this time. Adding
this to the LLVM backend is part of my efforts to rebase bgclang to the current
LLVM trunk, but is independently useful (especially for codes that use LLVM as
a JIT in library form).
The assembler/disassembler test coverage is complete. The CodeGen test coverage
is not, but I've included some tests, and more will be added as follow-up work.
llvm-svn: 230413
2015-02-25 09:06:45 +08:00
|
|
|
SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
|
2009-09-02 16:44:58 +08:00
|
|
|
CallingConv::ID CallConv, bool isVarArg,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
2016-06-12 23:39:02 +08:00
|
|
|
const SDLoc &dl, SelectionDAG &DAG,
|
2010-04-17 23:26:15 +08:00
|
|
|
SmallVectorImpl<SDValue> &InVals) const;
|
2020-01-22 04:24:50 +08:00
|
|
|
|
|
|
|
SDValue FinishCall(CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
|
2016-06-12 23:39:02 +08:00
|
|
|
SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
|
[PowerPC] Loosen ELFv1 PPC64 func descriptor loads for indirect calls
Function pointers under PPC64 ELFv1 (which is used on PPC64/Linux on the
POWER7, A2 and earlier cores) are really pointers to a function descriptor, a
structure with three pointers: the actual pointer to the code to which to jump,
the pointer to the TOC needed by the callee, and an environment pointer. We
used to chain these loads, and make them opaque to the rest of the optimizer,
so that they'd always occur directly before the call. This is not necessary,
and in fact, highly suboptimal on embedded cores. Once the function pointer is
known, the loads can be performed ahead of time; in fact, they can be hoisted
out of loops.
Now these function descriptors are almost always generated by the linker, and
thus the contents of the descriptors are invariant. As a result, by default,
we'll mark the associated loads as invariant (allowing them to be hoisted out
of loops). I've added a target feature to turn this off, however, just in case
someone needs that option (constructing an on-stack descriptor, casting it to a
function pointer, and then calling it cannot be well-defined C/C++ code, but I
can imagine some JIT-compilation system doing so).
Consider this simple test:
$ cat call.c
typedef void (*fp)();
void bar(fp x) {
for (int i = 0; i < 1600000000; ++i)
x();
}
$ cat main.c
typedef void (*fp)();
void bar(fp x);
void foo() {}
int main() {
bar(foo);
}
On the PPC A2 (the BG/Q supercomputer), marking the function-descriptor loads
as invariant brings the execution time down to ~8 seconds from ~32 seconds with
the loads in the loop.
The difference on the POWER7 is smaller. Compiling with:
gcc -std=c99 -O3 -mcpu=native call.c main.c : ~6 seconds [this is 4.8.2]
clang -O3 -mcpu=native call.c main.c : ~5.3 seconds
clang -O3 -mcpu=native call.c main.c -mno-invariant-function-descriptors : ~4 seconds
(looks like we'd benefit from additional loop unrolling here, as a first
guess, because this is faster with the extra loads)
The -mno-invariant-function-descriptors will be added to Clang shortly.
llvm-svn: 226207
2015-01-16 05:17:34 +08:00
|
|
|
SDValue InFlag, SDValue Chain, SDValue CallSeqStart,
|
2016-06-12 23:39:02 +08:00
|
|
|
SDValue &Callee, int SPDiff, unsigned NumBytes,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
[PowerPC] Loosen ELFv1 PPC64 func descriptor loads for indirect calls
Function pointers under PPC64 ELFv1 (which is used on PPC64/Linux on the
POWER7, A2 and earlier cores) are really pointers to a function descriptor, a
structure with three pointers: the actual pointer to the code to which to jump,
the pointer to the TOC needed by the callee, and an environment pointer. We
used to chain these loads, and make them opaque to the rest of the optimizer,
so that they'd always occur directly before the call. This is not necessary,
and in fact, highly suboptimal on embedded cores. Once the function pointer is
known, the loads can be performed ahead of time; in fact, they can be hoisted
out of loops.
Now these function descriptors are almost always generated by the linker, and
thus the contents of the descriptors are invariant. As a result, by default,
we'll mark the associated loads as invariant (allowing them to be hoisted out
of loops). I've added a target feature to turn this off, however, just in case
someone needs that option (constructing an on-stack descriptor, casting it to a
function pointer, and then calling it cannot be well-defined C/C++ code, but I
can imagine some JIT-compilation system doing so).
Consider this simple test:
$ cat call.c
typedef void (*fp)();
void bar(fp x) {
for (int i = 0; i < 1600000000; ++i)
x();
}
$ cat main.c
typedef void (*fp)();
void bar(fp x);
void foo() {}
int main() {
bar(foo);
}
On the PPC A2 (the BG/Q supercomputer), marking the function-descriptor loads
as invariant brings the execution time down to ~8 seconds from ~32 seconds with
the loads in the loop.
The difference on the POWER7 is smaller. Compiling with:
gcc -std=c99 -O3 -mcpu=native call.c main.c : ~6 seconds [this is 4.8.2]
clang -O3 -mcpu=native call.c main.c : ~5.3 seconds
clang -O3 -mcpu=native call.c main.c -mno-invariant-function-descriptors : ~4 seconds
(looks like we'd benefit from additional loop unrolling here, as a first
guess, because this is faster with the extra loads)
The -mno-invariant-function-descriptors will be added to Clang shortly.
llvm-svn: 226207
2015-01-16 05:17:34 +08:00
|
|
|
SmallVectorImpl<SDValue> &InVals,
|
2020-04-14 01:44:59 +08:00
|
|
|
const CallBase *CB) const;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
|
2014-04-29 15:57:37 +08:00
|
|
|
SDValue
|
2016-06-12 23:39:02 +08:00
|
|
|
LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
const SDLoc &dl, SelectionDAG &DAG,
|
|
|
|
SmallVectorImpl<SDValue> &InVals) const override;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
|
2017-01-13 08:58:58 +08:00
|
|
|
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
|
|
|
SmallVectorImpl<SDValue> &InVals) const override;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
|
2017-01-13 08:58:58 +08:00
|
|
|
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
|
|
|
|
bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
LLVMContext &Context) const override;
|
2011-10-15 03:51:36 +08:00
|
|
|
|
2016-06-12 23:39:02 +08:00
|
|
|
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
const SmallVectorImpl<SDValue> &OutVals,
|
|
|
|
const SDLoc &dl, SelectionDAG &DAG) const override;
|
|
|
|
|
|
|
|
SDValue extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
|
|
|
|
SelectionDAG &DAG, SDValue ArgVal,
|
|
|
|
const SDLoc &dl) const;
|
|
|
|
|
2019-11-30 01:44:56 +08:00
|
|
|
SDValue LowerFormalArguments_AIX(
|
|
|
|
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
|
|
|
|
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
|
2016-06-12 23:39:02 +08:00
|
|
|
SDValue LowerFormalArguments_Darwin(
|
|
|
|
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
|
|
|
|
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
|
|
|
|
SDValue LowerFormalArguments_64SVR4(
|
|
|
|
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
|
|
|
|
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
|
|
|
|
SDValue LowerFormalArguments_32SVR4(
|
|
|
|
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
|
|
|
|
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
|
|
|
|
|
|
|
|
SDValue createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
|
|
|
|
SDValue CallSeqStart,
|
|
|
|
ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
|
|
|
|
const SDLoc &dl) const;
|
|
|
|
|
2020-01-22 04:24:50 +08:00
|
|
|
SDValue LowerCall_Darwin(SDValue Chain, SDValue Callee, CallFlags CFlags,
|
2016-06-12 23:39:02 +08:00
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
const SmallVectorImpl<SDValue> &OutVals,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
const SDLoc &dl, SelectionDAG &DAG,
|
|
|
|
SmallVectorImpl<SDValue> &InVals,
|
2020-04-14 01:44:59 +08:00
|
|
|
const CallBase *CB) const;
|
2020-01-22 04:24:50 +08:00
|
|
|
SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee, CallFlags CFlags,
|
2016-06-12 23:39:02 +08:00
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
const SmallVectorImpl<SDValue> &OutVals,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
const SDLoc &dl, SelectionDAG &DAG,
|
|
|
|
SmallVectorImpl<SDValue> &InVals,
|
2020-04-14 01:44:59 +08:00
|
|
|
const CallBase *CB) const;
|
2020-01-22 04:24:50 +08:00
|
|
|
SDValue LowerCall_32SVR4(SDValue Chain, SDValue Callee, CallFlags CFlags,
|
2016-06-12 23:39:02 +08:00
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
const SmallVectorImpl<SDValue> &OutVals,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
const SDLoc &dl, SelectionDAG &DAG,
|
|
|
|
SmallVectorImpl<SDValue> &InVals,
|
2020-04-14 01:44:59 +08:00
|
|
|
const CallBase *CB) const;
|
2020-01-22 04:24:50 +08:00
|
|
|
SDValue LowerCall_AIX(SDValue Chain, SDValue Callee, CallFlags CFlags,
|
2019-05-25 04:54:35 +08:00
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
const SmallVectorImpl<SDValue> &OutVals,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
const SDLoc &dl, SelectionDAG &DAG,
|
|
|
|
SmallVectorImpl<SDValue> &InVals,
|
2020-04-14 01:44:59 +08:00
|
|
|
const CallBase *CB) const;
|
2013-03-22 05:37:52 +08:00
|
|
|
|
|
|
|
SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
|
2018-07-05 14:21:37 +08:00
|
|
|
SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
|
2013-04-03 12:01:11 +08:00
|
|
|
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
SDValue DAGCombineExtBoolTrunc(SDNode *N, DAGCombinerInfo &DCI) const;
|
2016-07-05 17:22:29 +08:00
|
|
|
SDValue DAGCombineBuildVector(SDNode *N, DAGCombinerInfo &DCI) const;
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
SDValue DAGCombineTruncBoolExt(SDNode *N, DAGCombinerInfo &DCI) const;
|
2018-05-09 01:36:40 +08:00
|
|
|
SDValue combineStoreFPToInt(SDNode *N, DAGCombinerInfo &DCI) const;
|
2015-01-06 14:01:57 +08:00
|
|
|
SDValue combineFPToIntToFP(SDNode *N, DAGCombinerInfo &DCI) const;
|
2017-05-13 03:25:37 +08:00
|
|
|
SDValue combineSHL(SDNode *N, DAGCombinerInfo &DCI) const;
|
|
|
|
SDValue combineSRA(SDNode *N, DAGCombinerInfo &DCI) const;
|
|
|
|
SDValue combineSRL(SDNode *N, DAGCombinerInfo &DCI) const;
|
[PowerPC] Strength reduction of multiply by a constant by shift and add/sub in place
A shift and add/sub sequence combination is faster in place of a multiply by constant.
Because the cycle or latency of multiply is not huge, we only consider such following
worthy patterns.
```
(mul x, 2^N + 1) => (add (shl x, N), x)
(mul x, -(2^N + 1)) => -(add (shl x, N), x)
(mul x, 2^N - 1) => (sub (shl x, N), x)
(mul x, -(2^N - 1)) => (sub x, (shl x, N))
```
And the cycles or latency is subtarget-dependent so that we need consider the
subtarget to determine to do or not do such transformation.
Also data type is considered for different cycles or latency to do multiply.
Differential Revision: https://reviews.llvm.org/D58950
llvm-svn: 357233
2019-03-29 11:08:39 +08:00
|
|
|
SDValue combineMUL(SDNode *N, DAGCombinerInfo &DCI) const;
|
[PowerPC] Combine ADD to ADDZE
On the ppc64le platform, if ir has the following form,
define i64 @addze1(i64 %x, i64 %z) local_unnamed_addr #0 {
entry:
%cmp = icmp ne i64 %z, CONSTANT (-32767 <= CONSTANT <= 32768)
%conv1 = zext i1 %cmp to i64
%add = add nsw i64 %conv1, %x
ret i64 %add
}
we can optimize it to the form below.
when C == 0
--> addze X, (addic Z, -1))
/
add X, (zext(setne Z, C))--
\ when -32768 <= -C <= 32767 && C != 0
--> addze X, (addic (addi Z, -C), -1)
Patch By: HLJ2009 (Li Jia He)
Differential Revision: https://reviews.llvm.org/D51403
Reviewed By: Nemanjai
llvm-svn: 341634
2018-09-07 15:56:05 +08:00
|
|
|
SDValue combineADD(SDNode *N, DAGCombinerInfo &DCI) const;
|
2020-06-04 16:30:17 +08:00
|
|
|
SDValue combineFMALike(SDNode *N, DAGCombinerInfo &DCI) const;
|
2018-10-24 01:11:36 +08:00
|
|
|
SDValue combineTRUNCATE(SDNode *N, DAGCombinerInfo &DCI) const;
|
2018-10-26 14:48:53 +08:00
|
|
|
SDValue combineSetCC(SDNode *N, DAGCombinerInfo &DCI) const;
|
2018-12-18 11:16:43 +08:00
|
|
|
SDValue combineABS(SDNode *N, DAGCombinerInfo &DCI) const;
|
[PowerPC]Exploit P9 vabsdu for unsigned vselect patterns
For type v4i32/v8ii16/v16i8, do following transforms:
(vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) -> (vabsd a, b)
(vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) -> (vabsd a, b)
(vselect (setcc a, b, setult), (sub b, a), (sub a, b)) -> (vabsd a, b)
(vselect (setcc a, b, setule), (sub b, a), (sub a, b)) -> (vabsd a, b)
Differential Revision: https://reviews.llvm.org/D55812
llvm-svn: 349599
2018-12-19 11:04:07 +08:00
|
|
|
SDValue combineVSelect(SDNode *N, DAGCombinerInfo &DCI) const;
|
2019-08-01 13:26:02 +08:00
|
|
|
SDValue combineVReverseMemOP(ShuffleVectorSDNode *SVN, LSBaseSDNode *LSBase,
|
|
|
|
DAGCombinerInfo &DCI) const;
|
2014-09-27 07:01:47 +08:00
|
|
|
|
2016-11-18 18:41:44 +08:00
|
|
|
/// ConvertSETCCToSubtract - looks at SETCC that compares ints. It replaces
|
|
|
|
/// SETCC with integer subtraction when (1) there is a legal way of doing it
|
|
|
|
/// (2) keeping the result of comparison in GPR has performance benefit.
|
|
|
|
SDValue ConvertSETCCToSubtract(SDNode *N, DAGCombinerInfo &DCI) const;
|
|
|
|
|
2016-11-11 07:31:06 +08:00
|
|
|
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
|
|
|
|
int &RefinementSteps, bool &UseOneConstNR,
|
|
|
|
bool Reciprocal) const override;
|
2016-10-21 00:55:45 +08:00
|
|
|
SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
|
|
|
|
int &RefinementSteps) const override;
|
2015-07-29 07:05:48 +08:00
|
|
|
unsigned combineRepeatedFPDivisors() const override;
|
2013-08-27 04:11:46 +08:00
|
|
|
|
2016-11-30 07:36:03 +08:00
|
|
|
SDValue
|
2017-01-13 08:58:58 +08:00
|
|
|
combineElementTruncationToVectorTruncation(SDNode *N,
|
|
|
|
DAGCombinerInfo &DCI) const;
|
2017-11-02 02:06:56 +08:00
|
|
|
|
|
|
|
/// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be
|
|
|
|
/// handled by the VINSERTH instruction introduced in ISA 3.0. This is
|
|
|
|
/// essentially any shuffle of v8i16 vectors that just inserts one element
|
|
|
|
/// from one vector into the other.
|
|
|
|
SDValue lowerToVINSERTH(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
|
|
|
|
|
2017-11-07 04:18:30 +08:00
|
|
|
/// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be
|
|
|
|
/// handled by the VINSERTB instruction introduced in ISA 3.0. This is
|
|
|
|
/// essentially v16i8 vector version of VINSERTH.
|
|
|
|
SDValue lowerToVINSERTB(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
|
|
|
|
|
2017-11-16 02:58:27 +08:00
|
|
|
// Return whether the call instruction can potentially be optimized to a
|
|
|
|
// tail call. This will cause the optimizers to attempt to move, or
|
|
|
|
// duplicate return instructions to help enable tail call optimizations.
|
|
|
|
bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
|
2018-10-10 04:35:15 +08:00
|
|
|
bool hasBitPreservingFPLogic(EVT VT) const override;
|
2018-05-03 07:55:23 +08:00
|
|
|
bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
|
2017-11-02 02:06:56 +08:00
|
|
|
}; // end class PPCTargetLowering
|
2013-06-13 00:39:22 +08:00
|
|
|
|
2013-07-30 08:50:39 +08:00
|
|
|
namespace PPC {
|
2017-01-13 08:58:58 +08:00
|
|
|
|
2013-07-30 08:50:39 +08:00
|
|
|
FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
|
|
|
|
const TargetLibraryInfo *LibInfo);
|
2017-01-13 08:58:58 +08:00
|
|
|
|
|
|
|
} // end namespace PPC
|
2013-07-30 08:50:39 +08:00
|
|
|
|
2017-07-10 14:32:52 +08:00
|
|
|
bool isIntS16Immediate(SDNode *N, int16_t &Imm);
|
|
|
|
bool isIntS16Immediate(SDValue Op, int16_t &Imm);
|
2017-07-08 05:12:35 +08:00
|
|
|
|
2017-01-13 08:58:58 +08:00
|
|
|
} // end namespace llvm
|
|
|
|
|
|
|
|
#endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H
|