2013-05-07 00:15:19 +08:00
|
|
|
//===-- SystemZISelDAGToDAG.cpp - A dag to dag inst selector for SystemZ --===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2013-05-07 00:15:19 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines an instruction selector for the SystemZ target.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "SystemZTargetMachine.h"
|
2019-02-07 02:59:19 +08:00
|
|
|
#include "SystemZISelLowering.h"
|
2013-07-09 17:46:39 +08:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2013-05-07 00:15:19 +08:00
|
|
|
#include "llvm/CodeGen/SelectionDAGISel.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
2017-04-28 13:31:46 +08:00
|
|
|
#include "llvm/Support/KnownBits.h"
|
2013-05-07 00:15:19 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 06:55:11 +08:00
|
|
|
#define DEBUG_TYPE "systemz-isel"
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
namespace {
|
|
|
|
// Used to build addressing modes.
|
|
|
|
struct SystemZAddressingMode {
|
|
|
|
// The shape of the address.
|
|
|
|
enum AddrForm {
|
|
|
|
// base+displacement
|
|
|
|
FormBD,
|
|
|
|
|
|
|
|
// base+displacement+index for load and store operands
|
|
|
|
FormBDXNormal,
|
|
|
|
|
|
|
|
// base+displacement+index for load address operands
|
|
|
|
FormBDXLA,
|
|
|
|
|
|
|
|
// base+displacement+index+ADJDYNALLOC
|
|
|
|
FormBDXDynAlloc
|
|
|
|
};
|
|
|
|
AddrForm Form;
|
|
|
|
|
|
|
|
// The type of displacement. The enum names here correspond directly
|
|
|
|
// to the definitions in SystemZOperand.td. We could split them into
|
|
|
|
// flags -- single/pair, 128-bit, etc. -- but it hardly seems worth it.
|
|
|
|
enum DispRange {
|
|
|
|
Disp12Only,
|
|
|
|
Disp12Pair,
|
|
|
|
Disp20Only,
|
|
|
|
Disp20Only128,
|
|
|
|
Disp20Pair
|
|
|
|
};
|
|
|
|
DispRange DR;
|
|
|
|
|
|
|
|
// The parts of the address. The address is equivalent to:
|
|
|
|
//
|
|
|
|
// Base + Disp + Index + (IncludesDynAlloc ? ADJDYNALLOC : 0)
|
|
|
|
SDValue Base;
|
|
|
|
int64_t Disp;
|
|
|
|
SDValue Index;
|
|
|
|
bool IncludesDynAlloc;
|
|
|
|
|
|
|
|
SystemZAddressingMode(AddrForm form, DispRange dr)
|
|
|
|
: Form(form), DR(dr), Base(), Disp(0), Index(),
|
|
|
|
IncludesDynAlloc(false) {}
|
|
|
|
|
|
|
|
// True if the address can have an index register.
|
|
|
|
bool hasIndexField() { return Form != FormBD; }
|
|
|
|
|
|
|
|
// True if the address can (and must) include ADJDYNALLOC.
|
|
|
|
bool isDynAlloc() { return Form == FormBDXDynAlloc; }
|
|
|
|
|
2018-10-26 08:02:33 +08:00
|
|
|
void dump(const llvm::SelectionDAG *DAG) {
|
2013-05-07 00:15:19 +08:00
|
|
|
errs() << "SystemZAddressingMode " << this << '\n';
|
|
|
|
|
|
|
|
errs() << " Base ";
|
2014-04-25 13:30:21 +08:00
|
|
|
if (Base.getNode())
|
2018-10-26 08:02:33 +08:00
|
|
|
Base.getNode()->dump(DAG);
|
2013-05-07 00:15:19 +08:00
|
|
|
else
|
|
|
|
errs() << "null\n";
|
|
|
|
|
|
|
|
if (hasIndexField()) {
|
|
|
|
errs() << " Index ";
|
2014-04-25 13:30:21 +08:00
|
|
|
if (Index.getNode())
|
2018-10-26 08:02:33 +08:00
|
|
|
Index.getNode()->dump(DAG);
|
2013-05-07 00:15:19 +08:00
|
|
|
else
|
|
|
|
errs() << "null\n";
|
|
|
|
}
|
|
|
|
|
|
|
|
errs() << " Disp " << Disp;
|
|
|
|
if (IncludesDynAlloc)
|
|
|
|
errs() << " + ADJDYNALLOC";
|
|
|
|
errs() << '\n';
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2013-07-16 19:02:24 +08:00
|
|
|
// Return a mask with Count low bits set.
|
|
|
|
static uint64_t allOnes(unsigned int Count) {
|
2015-06-25 19:52:36 +08:00
|
|
|
assert(Count <= 64);
|
2015-06-24 13:59:19 +08:00
|
|
|
if (Count > 63)
|
|
|
|
return UINT64_MAX;
|
|
|
|
return (uint64_t(1) << Count) - 1;
|
2013-07-16 19:02:24 +08:00
|
|
|
}
|
|
|
|
|
2013-07-18 18:40:35 +08:00
|
|
|
// Represents operands 2 to 5 of the ROTATE AND ... SELECTED BITS operation
|
|
|
|
// given by Opcode. The operands are: Input (R2), Start (I3), End (I4) and
|
|
|
|
// Rotate (I5). The combined operand value is effectively:
|
|
|
|
//
|
|
|
|
// (or (rotl Input, Rotate), ~Mask)
|
|
|
|
//
|
|
|
|
// for RNSBG and:
|
|
|
|
//
|
|
|
|
// (and (rotl Input, Rotate), Mask)
|
|
|
|
//
|
2013-10-16 21:35:13 +08:00
|
|
|
// otherwise. The output value has BitSize bits, although Input may be
|
2016-06-23 00:16:27 +08:00
|
|
|
// narrower (in which case the upper bits are don't care), or wider (in which
|
|
|
|
// case the result will be truncated as part of the operation).
|
2013-07-18 17:45:08 +08:00
|
|
|
struct RxSBGOperands {
|
2013-07-18 18:40:35 +08:00
|
|
|
RxSBGOperands(unsigned Op, SDValue N)
|
2016-09-15 00:05:51 +08:00
|
|
|
: Opcode(Op), BitSize(N.getValueSizeInBits()),
|
2013-07-18 18:40:35 +08:00
|
|
|
Mask(allOnes(BitSize)), Input(N), Start(64 - BitSize), End(63),
|
|
|
|
Rotate(0) {}
|
2013-07-16 19:02:24 +08:00
|
|
|
|
2013-07-18 18:40:35 +08:00
|
|
|
unsigned Opcode;
|
2013-07-16 19:02:24 +08:00
|
|
|
unsigned BitSize;
|
|
|
|
uint64_t Mask;
|
|
|
|
SDValue Input;
|
|
|
|
unsigned Start;
|
|
|
|
unsigned End;
|
|
|
|
unsigned Rotate;
|
|
|
|
};
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
class SystemZDAGToDAGISel : public SelectionDAGISel {
|
2015-01-31 08:06:45 +08:00
|
|
|
const SystemZSubtarget *Subtarget;
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
// Used by SystemZOperands.td to create integer constants.
|
2013-09-27 23:14:04 +08:00
|
|
|
inline SDValue getImm(const SDNode *Node, uint64_t Imm) const {
|
2015-04-28 22:05:47 +08:00
|
|
|
return CurDAG->getTargetConstant(Imm, SDLoc(Node), Node->getValueType(0));
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
|
2013-07-31 19:36:35 +08:00
|
|
|
const SystemZTargetMachine &getTargetMachine() const {
|
|
|
|
return static_cast<const SystemZTargetMachine &>(TM);
|
|
|
|
}
|
|
|
|
|
|
|
|
const SystemZInstrInfo *getInstrInfo() const {
|
2015-01-31 08:06:45 +08:00
|
|
|
return Subtarget->getInstrInfo();
|
2013-07-31 19:36:35 +08:00
|
|
|
}
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
// Try to fold more of the base or index of AM into AM, where IsBase
|
|
|
|
// selects between the base and index.
|
2013-09-27 23:14:04 +08:00
|
|
|
bool expandAddress(SystemZAddressingMode &AM, bool IsBase) const;
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
// Try to describe N in AM, returning true on success.
|
2013-09-27 23:14:04 +08:00
|
|
|
bool selectAddress(SDValue N, SystemZAddressingMode &AM) const;
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
// Extract individual target operands from matched address AM.
|
|
|
|
void getAddressOperands(const SystemZAddressingMode &AM, EVT VT,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue &Base, SDValue &Disp) const;
|
2013-05-07 00:15:19 +08:00
|
|
|
void getAddressOperands(const SystemZAddressingMode &AM, EVT VT,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue &Base, SDValue &Disp, SDValue &Index) const;
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
// Try to match Addr as a FormBD address with displacement type DR.
|
|
|
|
// Return true on success, storing the base and displacement in
|
|
|
|
// Base and Disp respectively.
|
|
|
|
bool selectBDAddr(SystemZAddressingMode::DispRange DR, SDValue Addr,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue &Base, SDValue &Disp) const;
|
2013-05-07 00:15:19 +08:00
|
|
|
|
2013-08-23 19:18:53 +08:00
|
|
|
// Try to match Addr as a FormBDX address with displacement type DR.
|
|
|
|
// Return true on success and if the result had no index. Store the
|
|
|
|
// base and displacement in Base and Disp respectively.
|
|
|
|
bool selectMVIAddr(SystemZAddressingMode::DispRange DR, SDValue Addr,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue &Base, SDValue &Disp) const;
|
2013-08-23 19:18:53 +08:00
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
// Try to match Addr as a FormBDX* address of form Form with
|
|
|
|
// displacement type DR. Return true on success, storing the base,
|
|
|
|
// displacement and index in Base, Disp and Index respectively.
|
|
|
|
bool selectBDXAddr(SystemZAddressingMode::AddrForm Form,
|
|
|
|
SystemZAddressingMode::DispRange DR, SDValue Addr,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue &Base, SDValue &Disp, SDValue &Index) const;
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
// PC-relative address matching routines used by SystemZOperands.td.
|
2013-09-27 23:14:04 +08:00
|
|
|
bool selectPCRelAddress(SDValue Addr, SDValue &Target) const {
|
|
|
|
if (SystemZISD::isPCREL(Addr.getOpcode())) {
|
2013-05-07 00:15:19 +08:00
|
|
|
Target = Addr.getOperand(0);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// BD matching routines used by SystemZOperands.td.
|
2013-09-27 23:14:04 +08:00
|
|
|
bool selectBDAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
return selectBDAddr(SystemZAddressingMode::Disp12Only, Addr, Base, Disp);
|
|
|
|
}
|
2013-09-27 23:14:04 +08:00
|
|
|
bool selectBDAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
return selectBDAddr(SystemZAddressingMode::Disp12Pair, Addr, Base, Disp);
|
|
|
|
}
|
2013-09-27 23:14:04 +08:00
|
|
|
bool selectBDAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
return selectBDAddr(SystemZAddressingMode::Disp20Only, Addr, Base, Disp);
|
|
|
|
}
|
2013-09-27 23:14:04 +08:00
|
|
|
bool selectBDAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
return selectBDAddr(SystemZAddressingMode::Disp20Pair, Addr, Base, Disp);
|
|
|
|
}
|
|
|
|
|
2013-08-23 19:18:53 +08:00
|
|
|
// MVI matching routines used by SystemZOperands.td.
|
2013-09-27 23:14:04 +08:00
|
|
|
bool selectMVIAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
|
2013-08-23 19:18:53 +08:00
|
|
|
return selectMVIAddr(SystemZAddressingMode::Disp12Pair, Addr, Base, Disp);
|
|
|
|
}
|
2013-09-27 23:14:04 +08:00
|
|
|
bool selectMVIAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
|
2013-08-23 19:18:53 +08:00
|
|
|
return selectMVIAddr(SystemZAddressingMode::Disp20Pair, Addr, Base, Disp);
|
|
|
|
}
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
// BDX matching routines used by SystemZOperands.td.
|
|
|
|
bool selectBDXAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue &Index) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
|
|
|
|
SystemZAddressingMode::Disp12Only,
|
|
|
|
Addr, Base, Disp, Index);
|
|
|
|
}
|
|
|
|
bool selectBDXAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue &Index) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
|
|
|
|
SystemZAddressingMode::Disp12Pair,
|
|
|
|
Addr, Base, Disp, Index);
|
|
|
|
}
|
|
|
|
bool selectDynAlloc12Only(SDValue Addr, SDValue &Base, SDValue &Disp,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue &Index) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
return selectBDXAddr(SystemZAddressingMode::FormBDXDynAlloc,
|
|
|
|
SystemZAddressingMode::Disp12Only,
|
|
|
|
Addr, Base, Disp, Index);
|
|
|
|
}
|
|
|
|
bool selectBDXAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue &Index) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
|
|
|
|
SystemZAddressingMode::Disp20Only,
|
|
|
|
Addr, Base, Disp, Index);
|
|
|
|
}
|
|
|
|
bool selectBDXAddr20Only128(SDValue Addr, SDValue &Base, SDValue &Disp,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue &Index) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
|
|
|
|
SystemZAddressingMode::Disp20Only128,
|
|
|
|
Addr, Base, Disp, Index);
|
|
|
|
}
|
|
|
|
bool selectBDXAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue &Index) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
|
|
|
|
SystemZAddressingMode::Disp20Pair,
|
|
|
|
Addr, Base, Disp, Index);
|
|
|
|
}
|
|
|
|
bool selectLAAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue &Index) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
return selectBDXAddr(SystemZAddressingMode::FormBDXLA,
|
|
|
|
SystemZAddressingMode::Disp12Pair,
|
|
|
|
Addr, Base, Disp, Index);
|
|
|
|
}
|
|
|
|
bool selectLAAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue &Index) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
return selectBDXAddr(SystemZAddressingMode::FormBDXLA,
|
|
|
|
SystemZAddressingMode::Disp20Pair,
|
|
|
|
Addr, Base, Disp, Index);
|
|
|
|
}
|
|
|
|
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
// Try to match Addr as an address with a base, 12-bit displacement
|
|
|
|
// and index, where the index is element Elem of a vector.
|
|
|
|
// Return true on success, storing the base, displacement and vector
|
|
|
|
// in Base, Disp and Index respectively.
|
|
|
|
bool selectBDVAddr12Only(SDValue Addr, SDValue Elem, SDValue &Base,
|
|
|
|
SDValue &Disp, SDValue &Index) const;
|
|
|
|
|
2013-07-16 19:55:57 +08:00
|
|
|
// Check whether (or Op (and X InsertMask)) is effectively an insertion
|
|
|
|
// of X into bits InsertMask of some Y != Op. Return true if so and
|
|
|
|
// set Op to that Y.
|
2013-09-27 23:14:04 +08:00
|
|
|
bool detectOrAndInsertion(SDValue &Op, uint64_t InsertMask) const;
|
2013-07-16 19:55:57 +08:00
|
|
|
|
2013-07-31 19:36:35 +08:00
|
|
|
// Try to update RxSBG so that only the bits of RxSBG.Input in Mask are used.
|
|
|
|
// Return true on success.
|
2013-09-27 23:14:04 +08:00
|
|
|
bool refineRxSBGMask(RxSBGOperands &RxSBG, uint64_t Mask) const;
|
2013-07-31 19:36:35 +08:00
|
|
|
|
2013-07-18 17:45:08 +08:00
|
|
|
// Try to fold some of RxSBG.Input into other fields of RxSBG.
|
|
|
|
// Return true on success.
|
2013-09-27 23:14:04 +08:00
|
|
|
bool expandRxSBG(RxSBGOperands &RxSBG) const;
|
2013-07-16 19:02:24 +08:00
|
|
|
|
2013-10-01 22:36:20 +08:00
|
|
|
// Return an undefined value of type VT.
|
2016-06-12 23:39:02 +08:00
|
|
|
SDValue getUNDEF(const SDLoc &DL, EVT VT) const;
|
2013-07-11 16:59:12 +08:00
|
|
|
|
|
|
|
// Convert N to VT, if it isn't already.
|
2016-06-12 23:39:02 +08:00
|
|
|
SDValue convertTo(const SDLoc &DL, EVT VT, SDValue N) const;
|
2013-07-11 16:59:12 +08:00
|
|
|
|
2013-07-16 19:02:24 +08:00
|
|
|
// Try to implement AND or shift node N using RISBG with the zero flag set.
|
|
|
|
// Return the selected node on success, otherwise return null.
|
2016-05-11 05:11:26 +08:00
|
|
|
bool tryRISBGZero(SDNode *N);
|
2013-07-11 16:59:12 +08:00
|
|
|
|
2013-07-18 18:06:15 +08:00
|
|
|
// Try to use RISBG or Opcode to implement OR or XOR node N.
|
|
|
|
// Return the selected node on success, otherwise return null.
|
2016-05-14 06:42:08 +08:00
|
|
|
bool tryRxSBG(SDNode *N, unsigned Opcode);
|
2013-07-16 19:55:57 +08:00
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
// If Op0 is null, then Node is a constant that can be loaded using:
|
|
|
|
//
|
|
|
|
// (Opcode UpperVal LowerVal)
|
|
|
|
//
|
|
|
|
// If Op0 is nonnull, then Node can be implemented using:
|
|
|
|
//
|
|
|
|
// (Opcode (Opcode Op0 UpperVal) LowerVal)
|
2016-05-10 07:54:23 +08:00
|
|
|
void splitLargeImmediate(unsigned Opcode, SDNode *Node, SDValue Op0,
|
|
|
|
uint64_t UpperVal, uint64_t LowerVal);
|
2013-05-07 00:15:19 +08:00
|
|
|
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
// Try to use gather instruction Opcode to implement vector insertion N.
|
2016-05-14 06:42:08 +08:00
|
|
|
bool tryGather(SDNode *N, unsigned Opcode);
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
|
|
|
|
// Try to use scatter instruction Opcode to implement store Store.
|
2016-05-14 06:42:08 +08:00
|
|
|
bool tryScatter(StoreSDNode *Store, unsigned Opcode);
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
|
2018-05-01 01:54:28 +08:00
|
|
|
// Change a chain of {load; op; store} of the same value into a simple op
|
|
|
|
// through memory of that value, if the uses of the modified value and its
|
|
|
|
// address are suitable.
|
|
|
|
bool tryFoldLoadStoreIntoMemOperand(SDNode *Node);
|
|
|
|
|
2013-09-27 23:29:20 +08:00
|
|
|
// Return true if Load and Store are loads and stores of the same size
|
|
|
|
// and are guaranteed not to overlap. Such operations can be implemented
|
|
|
|
// using block (SS-format) instructions.
|
|
|
|
//
|
|
|
|
// Partial overlap would lead to incorrect code, since the block operations
|
|
|
|
// are logically bytewise, even though they have a fast path for the
|
|
|
|
// non-overlapping case. We also need to avoid full overlap (i.e. two
|
|
|
|
// addresses that might be equal at run time) because although that case
|
|
|
|
// would be handled correctly, it might be implemented by millicode.
|
|
|
|
bool canUseBlockOperation(StoreSDNode *Store, LoadSDNode *Load) const;
|
|
|
|
|
2013-09-05 18:36:45 +08:00
|
|
|
// N is a (store (load Y), X) pattern. Return true if it can use an MVC
|
|
|
|
// from Y to X.
|
2013-07-09 17:46:39 +08:00
|
|
|
bool storeLoadCanUseMVC(SDNode *N) const;
|
|
|
|
|
2013-09-05 18:36:45 +08:00
|
|
|
// N is a (store (op (load A[0]), (load A[1])), X) pattern. Return true
|
|
|
|
// if A[1 - I] == X and if N can use a block operation like NC from A[I]
|
|
|
|
// to X.
|
|
|
|
bool storeLoadCanUseBlockBinary(SDNode *N, unsigned I) const;
|
|
|
|
|
2018-01-20 04:52:04 +08:00
|
|
|
// Try to expand a boolean SELECT_CCMASK using an IPM sequence.
|
|
|
|
SDValue expandSelectBoolean(SDNode *Node);
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
public:
|
|
|
|
SystemZDAGToDAGISel(SystemZTargetMachine &TM, CodeGenOpt::Level OptLevel)
|
2015-01-31 08:06:45 +08:00
|
|
|
: SelectionDAGISel(TM, OptLevel) {}
|
|
|
|
|
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override {
|
|
|
|
Subtarget = &MF.getSubtarget<SystemZSubtarget>();
|
|
|
|
return SelectionDAGISel::runOnMachineFunction(MF);
|
|
|
|
}
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
// Override MachineFunctionPass.
|
2016-10-01 10:56:57 +08:00
|
|
|
StringRef getPassName() const override {
|
2013-05-07 00:15:19 +08:00
|
|
|
return "SystemZ DAG->DAG Pattern Instruction Selection";
|
|
|
|
}
|
|
|
|
|
|
|
|
// Override SelectionDAGISel.
|
2016-05-14 06:42:08 +08:00
|
|
|
void Select(SDNode *Node) override;
|
2015-03-13 20:45:09 +08:00
|
|
|
bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
|
2014-03-06 20:03:36 +08:00
|
|
|
std::vector<SDValue> &OutOps) override;
|
2018-05-01 01:52:32 +08:00
|
|
|
bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
|
2018-01-20 04:52:04 +08:00
|
|
|
void PreprocessISelDAG() override;
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
// Include the pieces autogenerated from the target description.
|
|
|
|
#include "SystemZGenDAGISel.inc"
|
|
|
|
};
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
FunctionPass *llvm::createSystemZISelDag(SystemZTargetMachine &TM,
|
|
|
|
CodeGenOpt::Level OptLevel) {
|
|
|
|
return new SystemZDAGToDAGISel(TM, OptLevel);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return true if Val should be selected as a displacement for an address
|
|
|
|
// with range DR. Here we're interested in the range of both the instruction
|
|
|
|
// described by DR and of any pairing instruction.
|
|
|
|
static bool selectDisp(SystemZAddressingMode::DispRange DR, int64_t Val) {
|
|
|
|
switch (DR) {
|
|
|
|
case SystemZAddressingMode::Disp12Only:
|
|
|
|
return isUInt<12>(Val);
|
|
|
|
|
|
|
|
case SystemZAddressingMode::Disp12Pair:
|
|
|
|
case SystemZAddressingMode::Disp20Only:
|
|
|
|
case SystemZAddressingMode::Disp20Pair:
|
|
|
|
return isInt<20>(Val);
|
|
|
|
|
|
|
|
case SystemZAddressingMode::Disp20Only128:
|
|
|
|
return isInt<20>(Val) && isInt<20>(Val + 8);
|
|
|
|
}
|
|
|
|
llvm_unreachable("Unhandled displacement range");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Change the base or index in AM to Value, where IsBase selects
|
|
|
|
// between the base and index.
|
|
|
|
static void changeComponent(SystemZAddressingMode &AM, bool IsBase,
|
|
|
|
SDValue Value) {
|
|
|
|
if (IsBase)
|
|
|
|
AM.Base = Value;
|
|
|
|
else
|
|
|
|
AM.Index = Value;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The base or index of AM is equivalent to Value + ADJDYNALLOC,
|
|
|
|
// where IsBase selects between the base and index. Try to fold the
|
|
|
|
// ADJDYNALLOC into AM.
|
|
|
|
static bool expandAdjDynAlloc(SystemZAddressingMode &AM, bool IsBase,
|
|
|
|
SDValue Value) {
|
|
|
|
if (AM.isDynAlloc() && !AM.IncludesDynAlloc) {
|
|
|
|
changeComponent(AM, IsBase, Value);
|
|
|
|
AM.IncludesDynAlloc = true;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The base of AM is equivalent to Base + Index. Try to use Index as
|
|
|
|
// the index register.
|
|
|
|
static bool expandIndex(SystemZAddressingMode &AM, SDValue Base,
|
|
|
|
SDValue Index) {
|
|
|
|
if (AM.hasIndexField() && !AM.Index.getNode()) {
|
|
|
|
AM.Base = Base;
|
|
|
|
AM.Index = Index;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The base or index of AM is equivalent to Op0 + Op1, where IsBase selects
|
|
|
|
// between the base and index. Try to fold Op1 into AM's displacement.
|
|
|
|
static bool expandDisp(SystemZAddressingMode &AM, bool IsBase,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue Op0, uint64_t Op1) {
|
2013-05-07 00:15:19 +08:00
|
|
|
// First try adjusting the displacement.
|
2013-09-27 23:14:04 +08:00
|
|
|
int64_t TestDisp = AM.Disp + Op1;
|
2013-05-07 00:15:19 +08:00
|
|
|
if (selectDisp(AM.DR, TestDisp)) {
|
|
|
|
changeComponent(AM, IsBase, Op0);
|
|
|
|
AM.Disp = TestDisp;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We could consider forcing the displacement into a register and
|
|
|
|
// using it as an index, but it would need to be carefully tuned.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SystemZDAGToDAGISel::expandAddress(SystemZAddressingMode &AM,
|
2013-09-27 23:14:04 +08:00
|
|
|
bool IsBase) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
SDValue N = IsBase ? AM.Base : AM.Index;
|
|
|
|
unsigned Opcode = N.getOpcode();
|
|
|
|
if (Opcode == ISD::TRUNCATE) {
|
|
|
|
N = N.getOperand(0);
|
|
|
|
Opcode = N.getOpcode();
|
|
|
|
}
|
|
|
|
if (Opcode == ISD::ADD || CurDAG->isBaseWithConstantOffset(N)) {
|
|
|
|
SDValue Op0 = N.getOperand(0);
|
|
|
|
SDValue Op1 = N.getOperand(1);
|
|
|
|
|
|
|
|
unsigned Op0Code = Op0->getOpcode();
|
|
|
|
unsigned Op1Code = Op1->getOpcode();
|
|
|
|
|
|
|
|
if (Op0Code == SystemZISD::ADJDYNALLOC)
|
|
|
|
return expandAdjDynAlloc(AM, IsBase, Op1);
|
|
|
|
if (Op1Code == SystemZISD::ADJDYNALLOC)
|
|
|
|
return expandAdjDynAlloc(AM, IsBase, Op0);
|
|
|
|
|
|
|
|
if (Op0Code == ISD::Constant)
|
2013-09-27 23:14:04 +08:00
|
|
|
return expandDisp(AM, IsBase, Op1,
|
|
|
|
cast<ConstantSDNode>(Op0)->getSExtValue());
|
2013-05-07 00:15:19 +08:00
|
|
|
if (Op1Code == ISD::Constant)
|
2013-09-27 23:14:04 +08:00
|
|
|
return expandDisp(AM, IsBase, Op0,
|
|
|
|
cast<ConstantSDNode>(Op1)->getSExtValue());
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
if (IsBase && expandIndex(AM, Op0, Op1))
|
|
|
|
return true;
|
|
|
|
}
|
2013-09-27 23:14:04 +08:00
|
|
|
if (Opcode == SystemZISD::PCREL_OFFSET) {
|
|
|
|
SDValue Full = N.getOperand(0);
|
|
|
|
SDValue Base = N.getOperand(1);
|
|
|
|
SDValue Anchor = Base.getOperand(0);
|
|
|
|
uint64_t Offset = (cast<GlobalAddressSDNode>(Full)->getOffset() -
|
|
|
|
cast<GlobalAddressSDNode>(Anchor)->getOffset());
|
|
|
|
return expandDisp(AM, IsBase, Base, Offset);
|
|
|
|
}
|
2013-05-07 00:15:19 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return true if an instruction with displacement range DR should be
|
|
|
|
// used for displacement value Val. selectDisp(DR, Val) must already hold.
|
|
|
|
static bool isValidDisp(SystemZAddressingMode::DispRange DR, int64_t Val) {
|
|
|
|
assert(selectDisp(DR, Val) && "Invalid displacement");
|
|
|
|
switch (DR) {
|
|
|
|
case SystemZAddressingMode::Disp12Only:
|
|
|
|
case SystemZAddressingMode::Disp20Only:
|
|
|
|
case SystemZAddressingMode::Disp20Only128:
|
|
|
|
return true;
|
|
|
|
|
|
|
|
case SystemZAddressingMode::Disp12Pair:
|
|
|
|
// Use the other instruction if the displacement is too large.
|
|
|
|
return isUInt<12>(Val);
|
|
|
|
|
|
|
|
case SystemZAddressingMode::Disp20Pair:
|
|
|
|
// Use the other instruction if the displacement is small enough.
|
|
|
|
return !isUInt<12>(Val);
|
|
|
|
}
|
|
|
|
llvm_unreachable("Unhandled displacement range");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return true if Base + Disp + Index should be performed by LA(Y).
|
|
|
|
static bool shouldUseLA(SDNode *Base, int64_t Disp, SDNode *Index) {
|
|
|
|
// Don't use LA(Y) for constants.
|
|
|
|
if (!Base)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Always use LA(Y) for frame addresses, since we know that the destination
|
|
|
|
// register is almost always (perhaps always) going to be different from
|
|
|
|
// the frame register.
|
|
|
|
if (Base->getOpcode() == ISD::FrameIndex)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (Disp) {
|
|
|
|
// Always use LA(Y) if there is a base, displacement and index.
|
|
|
|
if (Index)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Always use LA if the displacement is small enough. It should always
|
|
|
|
// be no worse than AGHI (and better if it avoids a move).
|
|
|
|
if (isUInt<12>(Disp))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// For similar reasons, always use LAY if the constant is too big for AGHI.
|
|
|
|
// LAY should be no worse than AGFI.
|
|
|
|
if (!isInt<16>(Disp))
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
// Don't use LA for plain registers.
|
|
|
|
if (!Index)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Don't use LA for plain addition if the index operand is only used
|
|
|
|
// once. It should be a natural two-operand addition in that case.
|
|
|
|
if (Index->hasOneUse())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Prefer addition if the second operation is sign-extended, in the
|
|
|
|
// hope of using AGF.
|
|
|
|
unsigned IndexOpcode = Index->getOpcode();
|
|
|
|
if (IndexOpcode == ISD::SIGN_EXTEND ||
|
|
|
|
IndexOpcode == ISD::SIGN_EXTEND_INREG)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Don't use LA for two-operand addition if either operand is only
|
|
|
|
// used once. The addition instructions are better in that case.
|
|
|
|
if (Base->hasOneUse())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return true if Addr is suitable for AM, updating AM if so.
|
|
|
|
bool SystemZDAGToDAGISel::selectAddress(SDValue Addr,
|
2013-09-27 23:14:04 +08:00
|
|
|
SystemZAddressingMode &AM) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
// Start out assuming that the address will need to be loaded separately,
|
|
|
|
// then try to extend it as much as we can.
|
|
|
|
AM.Base = Addr;
|
|
|
|
|
|
|
|
// First try treating the address as a constant.
|
|
|
|
if (Addr.getOpcode() == ISD::Constant &&
|
2013-09-27 23:14:04 +08:00
|
|
|
expandDisp(AM, true, SDValue(),
|
|
|
|
cast<ConstantSDNode>(Addr)->getSExtValue()))
|
2013-05-07 00:15:19 +08:00
|
|
|
;
|
2016-05-05 07:31:26 +08:00
|
|
|
// Also see if it's a bare ADJDYNALLOC.
|
|
|
|
else if (Addr.getOpcode() == SystemZISD::ADJDYNALLOC &&
|
|
|
|
expandAdjDynAlloc(AM, true, SDValue()))
|
|
|
|
;
|
2013-05-07 00:15:19 +08:00
|
|
|
else
|
|
|
|
// Otherwise try expanding each component.
|
|
|
|
while (expandAddress(AM, true) ||
|
|
|
|
(AM.Index.getNode() && expandAddress(AM, false)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Reject cases where it isn't profitable to use LA(Y).
|
|
|
|
if (AM.Form == SystemZAddressingMode::FormBDXLA &&
|
|
|
|
!shouldUseLA(AM.Base.getNode(), AM.Disp, AM.Index.getNode()))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Reject cases where the other instruction in a pair should be used.
|
|
|
|
if (!isValidDisp(AM.DR, AM.Disp))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Make sure that ADJDYNALLOC is included where necessary.
|
|
|
|
if (AM.isDynAlloc() && !AM.IncludesDynAlloc)
|
|
|
|
return false;
|
|
|
|
|
2018-10-26 08:02:33 +08:00
|
|
|
LLVM_DEBUG(AM.dump(CurDAG));
|
2013-05-07 00:15:19 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert a node into the DAG at least before Pos. This will reposition
|
|
|
|
// the node as needed, and will assign it a node ID that is <= Pos's ID.
|
|
|
|
// Note that this does *not* preserve the uniqueness of node IDs!
|
|
|
|
// The selection DAG must no longer depend on their uniqueness when this
|
|
|
|
// function is used.
|
|
|
|
static void insertDAGNode(SelectionDAG *DAG, SDNode *Pos, SDValue N) {
|
2018-03-23 03:32:07 +08:00
|
|
|
if (N->getNodeId() == -1 ||
|
|
|
|
(SelectionDAGISel::getUninvalidatedNodeId(N.getNode()) >
|
|
|
|
SelectionDAGISel::getUninvalidatedNodeId(Pos))) {
|
2015-10-20 09:12:46 +08:00
|
|
|
DAG->RepositionNode(Pos->getIterator(), N.getNode());
|
2018-03-20 04:19:46 +08:00
|
|
|
// Mark Node as invalid for pruning as after this it may be a successor to a
|
|
|
|
// selected node but otherwise be in the same position of Pos.
|
|
|
|
// Conservatively mark it with the same -abs(Id) to assure node id
|
|
|
|
// invariant is preserved.
|
2018-03-23 03:32:07 +08:00
|
|
|
N->setNodeId(Pos->getNodeId());
|
|
|
|
SelectionDAGISel::InvalidateNodeId(N.getNode());
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM,
|
|
|
|
EVT VT, SDValue &Base,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue &Disp) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
Base = AM.Base;
|
|
|
|
if (!Base.getNode())
|
|
|
|
// Register 0 means "no base". This is mostly useful for shifts.
|
|
|
|
Base = CurDAG->getRegister(0, VT);
|
|
|
|
else if (Base.getOpcode() == ISD::FrameIndex) {
|
|
|
|
// Lower a FrameIndex to a TargetFrameIndex.
|
|
|
|
int64_t FrameIndex = cast<FrameIndexSDNode>(Base)->getIndex();
|
|
|
|
Base = CurDAG->getTargetFrameIndex(FrameIndex, VT);
|
|
|
|
} else if (Base.getValueType() != VT) {
|
|
|
|
// Truncate values from i64 to i32, for shifts.
|
|
|
|
assert(VT == MVT::i32 && Base.getValueType() == MVT::i64 &&
|
|
|
|
"Unexpected truncation");
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc DL(Base);
|
2013-05-07 00:15:19 +08:00
|
|
|
SDValue Trunc = CurDAG->getNode(ISD::TRUNCATE, DL, VT, Base);
|
|
|
|
insertDAGNode(CurDAG, Base.getNode(), Trunc);
|
|
|
|
Base = Trunc;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lower the displacement to a TargetConstant.
|
2015-04-28 22:05:47 +08:00
|
|
|
Disp = CurDAG->getTargetConstant(AM.Disp, SDLoc(Base), VT);
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM,
|
|
|
|
EVT VT, SDValue &Base,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue &Disp,
|
|
|
|
SDValue &Index) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
getAddressOperands(AM, VT, Base, Disp);
|
|
|
|
|
|
|
|
Index = AM.Index;
|
|
|
|
if (!Index.getNode())
|
|
|
|
// Register 0 means "no index".
|
|
|
|
Index = CurDAG->getRegister(0, VT);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SystemZDAGToDAGISel::selectBDAddr(SystemZAddressingMode::DispRange DR,
|
|
|
|
SDValue Addr, SDValue &Base,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue &Disp) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
SystemZAddressingMode AM(SystemZAddressingMode::FormBD, DR);
|
|
|
|
if (!selectAddress(Addr, AM))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
getAddressOperands(AM, Addr.getValueType(), Base, Disp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-08-23 19:18:53 +08:00
|
|
|
bool SystemZDAGToDAGISel::selectMVIAddr(SystemZAddressingMode::DispRange DR,
|
|
|
|
SDValue Addr, SDValue &Base,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue &Disp) const {
|
2013-08-23 19:18:53 +08:00
|
|
|
SystemZAddressingMode AM(SystemZAddressingMode::FormBDXNormal, DR);
|
|
|
|
if (!selectAddress(Addr, AM) || AM.Index.getNode())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
getAddressOperands(AM, Addr.getValueType(), Base, Disp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
bool SystemZDAGToDAGISel::selectBDXAddr(SystemZAddressingMode::AddrForm Form,
|
|
|
|
SystemZAddressingMode::DispRange DR,
|
|
|
|
SDValue Addr, SDValue &Base,
|
2013-09-27 23:14:04 +08:00
|
|
|
SDValue &Disp, SDValue &Index) const {
|
2013-05-07 00:15:19 +08:00
|
|
|
SystemZAddressingMode AM(Form, DR);
|
|
|
|
if (!selectAddress(Addr, AM))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
getAddressOperands(AM, Addr.getValueType(), Base, Disp, Index);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
bool SystemZDAGToDAGISel::selectBDVAddr12Only(SDValue Addr, SDValue Elem,
|
|
|
|
SDValue &Base,
|
|
|
|
SDValue &Disp,
|
|
|
|
SDValue &Index) const {
|
|
|
|
SDValue Regs[2];
|
|
|
|
if (selectBDXAddr12Only(Addr, Regs[0], Disp, Regs[1]) &&
|
|
|
|
Regs[0].getNode() && Regs[1].getNode()) {
|
|
|
|
for (unsigned int I = 0; I < 2; ++I) {
|
|
|
|
Base = Regs[I];
|
|
|
|
Index = Regs[1 - I];
|
|
|
|
// We can't tell here whether the index vector has the right type
|
|
|
|
// for the access; the caller needs to do that instead.
|
|
|
|
if (Index.getOpcode() == ISD::ZERO_EXTEND)
|
|
|
|
Index = Index.getOperand(0);
|
|
|
|
if (Index.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
|
|
|
|
Index.getOperand(1) == Elem) {
|
|
|
|
Index = Index.getOperand(0);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-07-16 19:55:57 +08:00
|
|
|
bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op,
|
2013-09-27 23:14:04 +08:00
|
|
|
uint64_t InsertMask) const {
|
2013-07-16 19:55:57 +08:00
|
|
|
// We're only interested in cases where the insertion is into some operand
|
|
|
|
// of Op, rather than into Op itself. The only useful case is an AND.
|
|
|
|
if (Op.getOpcode() != ISD::AND)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// We need a constant mask.
|
2014-03-06 19:22:58 +08:00
|
|
|
auto *MaskNode = dyn_cast<ConstantSDNode>(Op.getOperand(1).getNode());
|
2013-07-16 19:55:57 +08:00
|
|
|
if (!MaskNode)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// It's not an insertion of Op.getOperand(0) if the two masks overlap.
|
|
|
|
uint64_t AndMask = MaskNode->getZExtValue();
|
|
|
|
if (InsertMask & AndMask)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// It's only an insertion if all bits are covered or are known to be zero.
|
|
|
|
// The inner check covers all cases but is more expensive.
|
2016-09-15 00:05:51 +08:00
|
|
|
uint64_t Used = allOnes(Op.getValueSizeInBits());
|
2013-07-16 19:55:57 +08:00
|
|
|
if (Used != (AndMask | InsertMask)) {
|
2018-12-21 22:50:54 +08:00
|
|
|
KnownBits Known = CurDAG->computeKnownBits(Op.getOperand(0));
|
2017-04-28 13:31:46 +08:00
|
|
|
if (Used != (AndMask | InsertMask | Known.Zero.getZExtValue()))
|
2013-07-16 19:55:57 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Op = Op.getOperand(0);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-09-27 23:14:04 +08:00
|
|
|
bool SystemZDAGToDAGISel::refineRxSBGMask(RxSBGOperands &RxSBG,
|
|
|
|
uint64_t Mask) const {
|
2013-07-31 19:36:35 +08:00
|
|
|
const SystemZInstrInfo *TII = getInstrInfo();
|
2013-07-18 17:45:08 +08:00
|
|
|
if (RxSBG.Rotate != 0)
|
|
|
|
Mask = (Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate));
|
|
|
|
Mask &= RxSBG.Mask;
|
2013-07-31 19:36:35 +08:00
|
|
|
if (TII->isRxSBGMask(Mask, RxSBG.BitSize, RxSBG.Start, RxSBG.End)) {
|
2013-07-18 17:45:08 +08:00
|
|
|
RxSBG.Mask = Mask;
|
|
|
|
return true;
|
|
|
|
}
|
2013-07-11 16:59:12 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-11-26 18:53:16 +08:00
|
|
|
// Return true if any bits of (RxSBG.Input & Mask) are significant.
|
|
|
|
static bool maskMatters(RxSBGOperands &RxSBG, uint64_t Mask) {
|
|
|
|
// Rotate the mask in the same way as RxSBG.Input is rotated.
|
2013-07-18 18:14:55 +08:00
|
|
|
if (RxSBG.Rotate != 0)
|
2013-11-26 18:53:16 +08:00
|
|
|
Mask = ((Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate)));
|
|
|
|
return (Mask & RxSBG.Mask) != 0;
|
2013-07-18 18:14:55 +08:00
|
|
|
}
|
|
|
|
|
2013-09-27 23:14:04 +08:00
|
|
|
bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
|
2013-07-18 17:45:08 +08:00
|
|
|
SDValue N = RxSBG.Input;
|
2013-07-18 18:14:55 +08:00
|
|
|
unsigned Opcode = N.getOpcode();
|
|
|
|
switch (Opcode) {
|
2016-06-23 00:16:27 +08:00
|
|
|
case ISD::TRUNCATE: {
|
|
|
|
if (RxSBG.Opcode == SystemZ::RNSBG)
|
|
|
|
return false;
|
2016-09-15 00:05:51 +08:00
|
|
|
uint64_t BitSize = N.getValueSizeInBits();
|
2016-06-23 00:16:27 +08:00
|
|
|
uint64_t Mask = allOnes(BitSize);
|
|
|
|
if (!refineRxSBGMask(RxSBG, Mask))
|
|
|
|
return false;
|
|
|
|
RxSBG.Input = N.getOperand(0);
|
|
|
|
return true;
|
|
|
|
}
|
2013-07-16 19:02:24 +08:00
|
|
|
case ISD::AND: {
|
2013-07-18 18:40:35 +08:00
|
|
|
if (RxSBG.Opcode == SystemZ::RNSBG)
|
|
|
|
return false;
|
|
|
|
|
2014-03-06 19:22:58 +08:00
|
|
|
auto *MaskNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
|
2013-07-16 19:02:24 +08:00
|
|
|
if (!MaskNode)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
SDValue Input = N.getOperand(0);
|
|
|
|
uint64_t Mask = MaskNode->getZExtValue();
|
2013-07-18 17:45:08 +08:00
|
|
|
if (!refineRxSBGMask(RxSBG, Mask)) {
|
2013-07-16 19:02:24 +08:00
|
|
|
// If some bits of Input are already known zeros, those bits will have
|
|
|
|
// been removed from the mask. See if adding them back in makes the
|
|
|
|
// mask suitable.
|
2018-12-21 22:50:54 +08:00
|
|
|
KnownBits Known = CurDAG->computeKnownBits(Input);
|
2017-04-28 13:31:46 +08:00
|
|
|
Mask |= Known.Zero.getZExtValue();
|
2013-07-18 17:45:08 +08:00
|
|
|
if (!refineRxSBGMask(RxSBG, Mask))
|
2013-07-16 19:02:24 +08:00
|
|
|
return false;
|
|
|
|
}
|
2013-07-18 17:45:08 +08:00
|
|
|
RxSBG.Input = Input;
|
2013-07-16 19:02:24 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-07-18 18:40:35 +08:00
|
|
|
case ISD::OR: {
|
|
|
|
if (RxSBG.Opcode != SystemZ::RNSBG)
|
|
|
|
return false;
|
|
|
|
|
2014-03-06 19:22:58 +08:00
|
|
|
auto *MaskNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
|
2013-07-18 18:40:35 +08:00
|
|
|
if (!MaskNode)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
SDValue Input = N.getOperand(0);
|
|
|
|
uint64_t Mask = ~MaskNode->getZExtValue();
|
|
|
|
if (!refineRxSBGMask(RxSBG, Mask)) {
|
|
|
|
// If some bits of Input are already known ones, those bits will have
|
|
|
|
// been removed from the mask. See if adding them back in makes the
|
|
|
|
// mask suitable.
|
2018-12-21 22:50:54 +08:00
|
|
|
KnownBits Known = CurDAG->computeKnownBits(Input);
|
2017-04-28 13:31:46 +08:00
|
|
|
Mask &= ~Known.One.getZExtValue();
|
2013-07-18 18:40:35 +08:00
|
|
|
if (!refineRxSBGMask(RxSBG, Mask))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
RxSBG.Input = Input;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-07-16 19:02:24 +08:00
|
|
|
case ISD::ROTL: {
|
2013-07-18 17:45:08 +08:00
|
|
|
// Any 64-bit rotate left can be merged into the RxSBG.
|
2013-10-16 21:35:13 +08:00
|
|
|
if (RxSBG.BitSize != 64 || N.getValueType() != MVT::i64)
|
2013-07-16 19:02:24 +08:00
|
|
|
return false;
|
2014-03-06 19:22:58 +08:00
|
|
|
auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
|
2013-07-16 19:02:24 +08:00
|
|
|
if (!CountNode)
|
|
|
|
return false;
|
|
|
|
|
2013-07-18 17:45:08 +08:00
|
|
|
RxSBG.Rotate = (RxSBG.Rotate + CountNode->getZExtValue()) & 63;
|
|
|
|
RxSBG.Input = N.getOperand(0);
|
2013-07-16 19:02:24 +08:00
|
|
|
return true;
|
|
|
|
}
|
2015-08-15 21:27:30 +08:00
|
|
|
|
2013-12-20 19:49:48 +08:00
|
|
|
case ISD::ANY_EXTEND:
|
|
|
|
// Bits above the extended operand are don't-care.
|
|
|
|
RxSBG.Input = N.getOperand(0);
|
|
|
|
return true;
|
|
|
|
|
2014-01-09 19:28:53 +08:00
|
|
|
case ISD::ZERO_EXTEND:
|
|
|
|
if (RxSBG.Opcode != SystemZ::RNSBG) {
|
|
|
|
// Restrict the mask to the extended operand.
|
2016-09-15 00:05:51 +08:00
|
|
|
unsigned InnerBitSize = N.getOperand(0).getValueSizeInBits();
|
2014-01-09 19:28:53 +08:00
|
|
|
if (!refineRxSBGMask(RxSBG, allOnes(InnerBitSize)))
|
|
|
|
return false;
|
2013-12-20 19:49:48 +08:00
|
|
|
|
2014-01-09 19:28:53 +08:00
|
|
|
RxSBG.Input = N.getOperand(0);
|
|
|
|
return true;
|
|
|
|
}
|
2016-08-18 04:30:52 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2015-08-15 21:27:30 +08:00
|
|
|
|
2013-12-20 19:49:48 +08:00
|
|
|
case ISD::SIGN_EXTEND: {
|
2013-10-16 21:35:13 +08:00
|
|
|
// Check that the extension bits are don't-care (i.e. are masked out
|
|
|
|
// by the final mask).
|
2017-12-06 21:53:24 +08:00
|
|
|
unsigned BitSize = N.getValueSizeInBits();
|
2016-09-15 00:05:51 +08:00
|
|
|
unsigned InnerBitSize = N.getOperand(0).getValueSizeInBits();
|
2017-12-06 21:53:24 +08:00
|
|
|
if (maskMatters(RxSBG, allOnes(BitSize) - allOnes(InnerBitSize))) {
|
|
|
|
// In the case where only the sign bit is active, increase Rotate with
|
|
|
|
// the extension width.
|
|
|
|
if (RxSBG.Mask == 1 && RxSBG.Rotate == 1)
|
|
|
|
RxSBG.Rotate += (BitSize - InnerBitSize);
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
}
|
2013-10-16 21:35:13 +08:00
|
|
|
|
|
|
|
RxSBG.Input = N.getOperand(0);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-07-16 19:02:24 +08:00
|
|
|
case ISD::SHL: {
|
2014-03-06 19:22:58 +08:00
|
|
|
auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
|
2013-07-16 19:02:24 +08:00
|
|
|
if (!CountNode)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
uint64_t Count = CountNode->getZExtValue();
|
2016-09-15 00:05:51 +08:00
|
|
|
unsigned BitSize = N.getValueSizeInBits();
|
2013-10-16 21:35:13 +08:00
|
|
|
if (Count < 1 || Count >= BitSize)
|
2013-07-16 19:02:24 +08:00
|
|
|
return false;
|
|
|
|
|
2013-07-18 18:40:35 +08:00
|
|
|
if (RxSBG.Opcode == SystemZ::RNSBG) {
|
|
|
|
// Treat (shl X, count) as (rotl X, size-count) as long as the bottom
|
|
|
|
// count bits from RxSBG.Input are ignored.
|
2013-11-26 18:53:16 +08:00
|
|
|
if (maskMatters(RxSBG, allOnes(Count)))
|
2013-07-18 18:40:35 +08:00
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
// Treat (shl X, count) as (and (rotl X, count), ~0<<count).
|
2013-10-16 21:35:13 +08:00
|
|
|
if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count) << Count))
|
2013-07-18 18:40:35 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-07-18 17:45:08 +08:00
|
|
|
RxSBG.Rotate = (RxSBG.Rotate + Count) & 63;
|
|
|
|
RxSBG.Input = N.getOperand(0);
|
2013-07-16 19:02:24 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-07-18 18:14:55 +08:00
|
|
|
case ISD::SRL:
|
2013-07-16 19:02:24 +08:00
|
|
|
case ISD::SRA: {
|
2014-03-06 19:22:58 +08:00
|
|
|
auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
|
2013-07-16 19:02:24 +08:00
|
|
|
if (!CountNode)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
uint64_t Count = CountNode->getZExtValue();
|
2016-09-15 00:05:51 +08:00
|
|
|
unsigned BitSize = N.getValueSizeInBits();
|
2013-10-16 21:35:13 +08:00
|
|
|
if (Count < 1 || Count >= BitSize)
|
2013-07-16 19:02:24 +08:00
|
|
|
return false;
|
|
|
|
|
2013-07-18 18:40:35 +08:00
|
|
|
if (RxSBG.Opcode == SystemZ::RNSBG || Opcode == ISD::SRA) {
|
|
|
|
// Treat (srl|sra X, count) as (rotl X, size-count) as long as the top
|
|
|
|
// count bits from RxSBG.Input are ignored.
|
2013-11-26 18:53:16 +08:00
|
|
|
if (maskMatters(RxSBG, allOnes(Count) << (BitSize - Count)))
|
2013-07-18 18:14:55 +08:00
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
// Treat (srl X, count), mask) as (and (rotl X, size-count), ~0>>count),
|
|
|
|
// which is similar to SLL above.
|
2013-10-16 21:35:13 +08:00
|
|
|
if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count)))
|
2013-07-18 18:14:55 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
RxSBG.Rotate = (RxSBG.Rotate - Count) & 63;
|
2013-07-18 17:45:08 +08:00
|
|
|
RxSBG.Input = N.getOperand(0);
|
2013-07-16 19:02:24 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-12 23:39:02 +08:00
|
|
|
SDValue SystemZDAGToDAGISel::getUNDEF(const SDLoc &DL, EVT VT) const {
|
2013-10-01 22:36:20 +08:00
|
|
|
SDNode *N = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT);
|
2013-07-11 16:59:12 +08:00
|
|
|
return SDValue(N, 0);
|
|
|
|
}
|
|
|
|
|
2016-06-12 23:39:02 +08:00
|
|
|
SDValue SystemZDAGToDAGISel::convertTo(const SDLoc &DL, EVT VT,
|
|
|
|
SDValue N) const {
|
2013-09-13 17:12:44 +08:00
|
|
|
if (N.getValueType() == MVT::i32 && VT == MVT::i64)
|
2013-09-30 18:28:35 +08:00
|
|
|
return CurDAG->getTargetInsertSubreg(SystemZ::subreg_l32,
|
2013-10-01 22:36:20 +08:00
|
|
|
DL, VT, getUNDEF(DL, MVT::i64), N);
|
2013-09-13 17:12:44 +08:00
|
|
|
if (N.getValueType() == MVT::i64 && VT == MVT::i32)
|
2013-09-30 18:28:35 +08:00
|
|
|
return CurDAG->getTargetExtractSubreg(SystemZ::subreg_l32, DL, VT, N);
|
2013-07-11 16:59:12 +08:00
|
|
|
assert(N.getValueType() == VT && "Unexpected value types");
|
|
|
|
return N;
|
|
|
|
}
|
|
|
|
|
2016-05-11 05:11:26 +08:00
|
|
|
bool SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) {
|
2015-04-28 22:05:47 +08:00
|
|
|
SDLoc DL(N);
|
2013-07-31 19:36:35 +08:00
|
|
|
EVT VT = N->getValueType(0);
|
2015-06-25 19:52:36 +08:00
|
|
|
if (!VT.isInteger() || VT.getSizeInBits() > 64)
|
2016-05-11 05:11:26 +08:00
|
|
|
return false;
|
2013-07-18 18:40:35 +08:00
|
|
|
RxSBGOperands RISBG(SystemZ::RISBG, SDValue(N, 0));
|
2013-07-16 19:02:24 +08:00
|
|
|
unsigned Count = 0;
|
2013-07-18 17:45:08 +08:00
|
|
|
while (expandRxSBG(RISBG))
|
2016-06-23 00:16:27 +08:00
|
|
|
// The widening or narrowing is expected to be free.
|
|
|
|
// Counting widening or narrowing as a saved operation will result in
|
|
|
|
// preferring an R*SBG over a simple shift/logical instruction.
|
|
|
|
if (RISBG.Input.getOpcode() != ISD::ANY_EXTEND &&
|
|
|
|
RISBG.Input.getOpcode() != ISD::TRUNCATE)
|
2013-10-16 21:35:13 +08:00
|
|
|
Count += 1;
|
2013-07-31 19:36:35 +08:00
|
|
|
if (Count == 0)
|
2016-05-11 05:11:26 +08:00
|
|
|
return false;
|
2013-07-31 19:36:35 +08:00
|
|
|
|
2016-11-11 20:43:51 +08:00
|
|
|
// Prefer to use normal shift instructions over RISBG, since they can handle
|
|
|
|
// all cases and are sometimes shorter.
|
|
|
|
if (Count == 1 && N->getOpcode() != ISD::AND)
|
|
|
|
return false;
|
2015-08-15 21:27:30 +08:00
|
|
|
|
2016-11-11 20:43:51 +08:00
|
|
|
// Prefer register extensions like LLC over RISBG. Also prefer to start
|
|
|
|
// out with normal ANDs if one instruction would be enough. We can convert
|
|
|
|
// these ANDs into an RISBG later if a three-address instruction is useful.
|
|
|
|
if (RISBG.Rotate == 0) {
|
|
|
|
bool PreferAnd = false;
|
|
|
|
// Prefer AND for any 32-bit and-immediate operation.
|
|
|
|
if (VT == MVT::i32)
|
|
|
|
PreferAnd = true;
|
|
|
|
// As well as for any 64-bit operation that can be implemented via LLC(R),
|
|
|
|
// LLH(R), LLGT(R), or one of the and-immediate instructions.
|
|
|
|
else if (RISBG.Mask == 0xff ||
|
|
|
|
RISBG.Mask == 0xffff ||
|
|
|
|
RISBG.Mask == 0x7fffffff ||
|
|
|
|
SystemZ::isImmLF(~RISBG.Mask) ||
|
|
|
|
SystemZ::isImmHF(~RISBG.Mask))
|
|
|
|
PreferAnd = true;
|
2016-11-11 20:46:28 +08:00
|
|
|
// And likewise for the LLZRGF instruction, which doesn't have a register
|
|
|
|
// to register version.
|
|
|
|
else if (auto *Load = dyn_cast<LoadSDNode>(RISBG.Input)) {
|
|
|
|
if (Load->getMemoryVT() == MVT::i32 &&
|
|
|
|
(Load->getExtensionType() == ISD::EXTLOAD ||
|
|
|
|
Load->getExtensionType() == ISD::ZEXTLOAD) &&
|
|
|
|
RISBG.Mask == 0xffffff00 &&
|
|
|
|
Subtarget->hasLoadAndZeroRightmostByte())
|
|
|
|
PreferAnd = true;
|
|
|
|
}
|
2016-11-11 20:43:51 +08:00
|
|
|
if (PreferAnd) {
|
|
|
|
// Replace the current node with an AND. Note that the current node
|
|
|
|
// might already be that same AND, in which case it is already CSE'd
|
|
|
|
// with it, and we must not call ReplaceNode.
|
|
|
|
SDValue In = convertTo(DL, VT, RISBG.Input);
|
|
|
|
SDValue Mask = CurDAG->getConstant(RISBG.Mask, DL, VT);
|
|
|
|
SDValue New = CurDAG->getNode(ISD::AND, DL, VT, In, Mask);
|
|
|
|
if (N != New.getNode()) {
|
|
|
|
insertDAGNode(CurDAG, N, Mask);
|
|
|
|
insertDAGNode(CurDAG, N, New);
|
|
|
|
ReplaceNode(N, New.getNode());
|
|
|
|
N = New.getNode();
|
|
|
|
}
|
|
|
|
// Now, select the machine opcode to implement this operation.
|
2018-02-27 15:53:23 +08:00
|
|
|
if (!N->isMachineOpcode())
|
|
|
|
SelectCode(N);
|
2016-11-11 20:43:51 +08:00
|
|
|
return true;
|
2015-08-15 21:27:30 +08:00
|
|
|
}
|
|
|
|
}
|
2013-07-11 16:59:12 +08:00
|
|
|
|
2013-10-01 22:36:20 +08:00
|
|
|
unsigned Opcode = SystemZ::RISBG;
|
2015-03-31 20:58:17 +08:00
|
|
|
// Prefer RISBGN if available, since it does not clobber CC.
|
|
|
|
if (Subtarget->hasMiscellaneousExtensions())
|
|
|
|
Opcode = SystemZ::RISBGN;
|
2013-10-01 22:36:20 +08:00
|
|
|
EVT OpcodeVT = MVT::i64;
|
2017-11-15 03:20:46 +08:00
|
|
|
if (VT == MVT::i32 && Subtarget->hasHighWord() &&
|
|
|
|
// We can only use the 32-bit instructions if all source bits are
|
|
|
|
// in the low 32 bits without wrapping, both after rotation (because
|
|
|
|
// of the smaller range for Start and End) and before rotation
|
|
|
|
// (because the input value is truncated).
|
|
|
|
RISBG.Start >= 32 && RISBG.End >= RISBG.Start &&
|
|
|
|
((RISBG.Start + RISBG.Rotate) & 63) >= 32 &&
|
|
|
|
((RISBG.End + RISBG.Rotate) & 63) >=
|
|
|
|
((RISBG.Start + RISBG.Rotate) & 63)) {
|
2013-10-01 22:36:20 +08:00
|
|
|
Opcode = SystemZ::RISBMux;
|
|
|
|
OpcodeVT = MVT::i32;
|
|
|
|
RISBG.Start &= 31;
|
|
|
|
RISBG.End &= 31;
|
|
|
|
}
|
2013-07-11 16:59:12 +08:00
|
|
|
SDValue Ops[5] = {
|
2015-04-28 22:05:47 +08:00
|
|
|
getUNDEF(DL, OpcodeVT),
|
|
|
|
convertTo(DL, OpcodeVT, RISBG.Input),
|
|
|
|
CurDAG->getTargetConstant(RISBG.Start, DL, MVT::i32),
|
|
|
|
CurDAG->getTargetConstant(RISBG.End | 128, DL, MVT::i32),
|
|
|
|
CurDAG->getTargetConstant(RISBG.Rotate, DL, MVT::i32)
|
2013-07-11 16:59:12 +08:00
|
|
|
};
|
2016-05-11 05:11:26 +08:00
|
|
|
SDValue New = convertTo(
|
|
|
|
DL, VT, SDValue(CurDAG->getMachineNode(Opcode, DL, OpcodeVT, Ops), 0));
|
2018-03-20 04:19:46 +08:00
|
|
|
ReplaceNode(N, New.getNode());
|
2016-05-11 05:11:26 +08:00
|
|
|
return true;
|
2013-07-11 16:59:12 +08:00
|
|
|
}
|
|
|
|
|
2016-05-14 06:42:08 +08:00
|
|
|
bool SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) {
|
2015-06-25 19:52:36 +08:00
|
|
|
SDLoc DL(N);
|
|
|
|
EVT VT = N->getValueType(0);
|
|
|
|
if (!VT.isInteger() || VT.getSizeInBits() > 64)
|
2016-05-14 06:42:08 +08:00
|
|
|
return false;
|
2013-07-18 18:06:15 +08:00
|
|
|
// Try treating each operand of N as the second operand of the RxSBG
|
2013-07-16 19:55:57 +08:00
|
|
|
// and see which goes deepest.
|
2013-07-18 18:40:35 +08:00
|
|
|
RxSBGOperands RxSBG[] = {
|
|
|
|
RxSBGOperands(Opcode, N->getOperand(0)),
|
|
|
|
RxSBGOperands(Opcode, N->getOperand(1))
|
|
|
|
};
|
2013-07-16 19:55:57 +08:00
|
|
|
unsigned Count[] = { 0, 0 };
|
|
|
|
for (unsigned I = 0; I < 2; ++I)
|
2013-07-18 17:45:08 +08:00
|
|
|
while (expandRxSBG(RxSBG[I]))
|
2016-06-23 00:16:27 +08:00
|
|
|
// The widening or narrowing is expected to be free.
|
|
|
|
// Counting widening or narrowing as a saved operation will result in
|
|
|
|
// preferring an R*SBG over a simple shift/logical instruction.
|
|
|
|
if (RxSBG[I].Input.getOpcode() != ISD::ANY_EXTEND &&
|
|
|
|
RxSBG[I].Input.getOpcode() != ISD::TRUNCATE)
|
2013-10-16 21:35:13 +08:00
|
|
|
Count[I] += 1;
|
2013-07-16 19:55:57 +08:00
|
|
|
|
|
|
|
// Do nothing if neither operand is suitable.
|
|
|
|
if (Count[0] == 0 && Count[1] == 0)
|
2016-05-14 06:42:08 +08:00
|
|
|
return false;
|
2013-07-16 19:55:57 +08:00
|
|
|
|
|
|
|
// Pick the deepest second operand.
|
|
|
|
unsigned I = Count[0] > Count[1] ? 0 : 1;
|
|
|
|
SDValue Op0 = N->getOperand(I ^ 1);
|
|
|
|
|
|
|
|
// Prefer IC for character insertions from memory.
|
2013-07-18 18:06:15 +08:00
|
|
|
if (Opcode == SystemZ::ROSBG && (RxSBG[I].Mask & 0xff) == 0)
|
2014-03-06 19:22:58 +08:00
|
|
|
if (auto *Load = dyn_cast<LoadSDNode>(Op0.getNode()))
|
2013-07-16 19:55:57 +08:00
|
|
|
if (Load->getMemoryVT() == MVT::i8)
|
2016-05-14 06:42:08 +08:00
|
|
|
return false;
|
2013-07-16 19:55:57 +08:00
|
|
|
|
|
|
|
// See whether we can avoid an AND in the first operand by converting
|
|
|
|
// ROSBG to RISBG.
|
2015-03-31 20:58:17 +08:00
|
|
|
if (Opcode == SystemZ::ROSBG && detectOrAndInsertion(Op0, RxSBG[I].Mask)) {
|
2013-07-16 19:55:57 +08:00
|
|
|
Opcode = SystemZ::RISBG;
|
2015-03-31 20:58:17 +08:00
|
|
|
// Prefer RISBGN if available, since it does not clobber CC.
|
|
|
|
if (Subtarget->hasMiscellaneousExtensions())
|
|
|
|
Opcode = SystemZ::RISBGN;
|
|
|
|
}
|
|
|
|
|
2013-07-16 19:55:57 +08:00
|
|
|
SDValue Ops[5] = {
|
2015-04-28 22:05:47 +08:00
|
|
|
convertTo(DL, MVT::i64, Op0),
|
|
|
|
convertTo(DL, MVT::i64, RxSBG[I].Input),
|
|
|
|
CurDAG->getTargetConstant(RxSBG[I].Start, DL, MVT::i32),
|
|
|
|
CurDAG->getTargetConstant(RxSBG[I].End, DL, MVT::i32),
|
|
|
|
CurDAG->getTargetConstant(RxSBG[I].Rotate, DL, MVT::i32)
|
2013-07-16 19:55:57 +08:00
|
|
|
};
|
2016-05-14 06:42:08 +08:00
|
|
|
SDValue New = convertTo(
|
|
|
|
DL, VT, SDValue(CurDAG->getMachineNode(Opcode, DL, MVT::i64, Ops), 0));
|
|
|
|
ReplaceNode(N, New.getNode());
|
|
|
|
return true;
|
2013-07-16 19:55:57 +08:00
|
|
|
}
|
|
|
|
|
2016-05-10 07:54:23 +08:00
|
|
|
void SystemZDAGToDAGISel::splitLargeImmediate(unsigned Opcode, SDNode *Node,
|
|
|
|
SDValue Op0, uint64_t UpperVal,
|
|
|
|
uint64_t LowerVal) {
|
2013-05-07 00:15:19 +08:00
|
|
|
EVT VT = Node->getValueType(0);
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc DL(Node);
|
2015-04-28 22:05:47 +08:00
|
|
|
SDValue Upper = CurDAG->getConstant(UpperVal, DL, VT);
|
2013-05-07 00:15:19 +08:00
|
|
|
if (Op0.getNode())
|
|
|
|
Upper = CurDAG->getNode(Opcode, DL, VT, Op0, Upper);
|
2016-05-10 07:54:23 +08:00
|
|
|
|
|
|
|
{
|
|
|
|
// When we haven't passed in Op0, Upper will be a constant. In order to
|
|
|
|
// prevent folding back to the large immediate in `Or = getNode(...)` we run
|
|
|
|
// SelectCode first and end up with an opaque machine node. This means that
|
|
|
|
// we need to use a handle to keep track of Upper in case it gets CSE'd by
|
|
|
|
// SelectCode.
|
|
|
|
//
|
|
|
|
// Note that in the case where Op0 is passed in we could just call
|
|
|
|
// SelectCode(Upper) later, along with the SelectCode(Or), and avoid needing
|
|
|
|
// the handle at all, but it's fine to do it here.
|
|
|
|
//
|
|
|
|
// TODO: This is a pretty hacky way to do this. Can we do something that
|
|
|
|
// doesn't require a two paragraph explanation?
|
|
|
|
HandleSDNode Handle(Upper);
|
|
|
|
SelectCode(Upper.getNode());
|
|
|
|
Upper = Handle.getValue();
|
|
|
|
}
|
2013-05-07 00:15:19 +08:00
|
|
|
|
2015-04-28 22:05:47 +08:00
|
|
|
SDValue Lower = CurDAG->getConstant(LowerVal, DL, VT);
|
2013-05-07 00:15:19 +08:00
|
|
|
SDValue Or = CurDAG->getNode(Opcode, DL, VT, Upper, Lower);
|
2016-05-10 07:54:23 +08:00
|
|
|
|
2018-03-20 04:19:46 +08:00
|
|
|
ReplaceNode(Node, Or.getNode());
|
2016-05-10 07:54:23 +08:00
|
|
|
|
|
|
|
SelectCode(Or.getNode());
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
|
2016-05-14 06:42:08 +08:00
|
|
|
bool SystemZDAGToDAGISel::tryGather(SDNode *N, unsigned Opcode) {
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
SDValue ElemV = N->getOperand(2);
|
|
|
|
auto *ElemN = dyn_cast<ConstantSDNode>(ElemV);
|
|
|
|
if (!ElemN)
|
2016-05-14 06:42:08 +08:00
|
|
|
return false;
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
|
|
|
|
unsigned Elem = ElemN->getZExtValue();
|
|
|
|
EVT VT = N->getValueType(0);
|
|
|
|
if (Elem >= VT.getVectorNumElements())
|
2016-05-14 06:42:08 +08:00
|
|
|
return false;
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
|
|
|
|
auto *Load = dyn_cast<LoadSDNode>(N->getOperand(1));
|
2018-12-20 21:01:20 +08:00
|
|
|
if (!Load || !Load->hasNUsesOfValue(1, 0))
|
2016-05-14 06:42:08 +08:00
|
|
|
return false;
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
if (Load->getMemoryVT().getSizeInBits() !=
|
|
|
|
Load->getValueType(0).getSizeInBits())
|
2016-05-14 06:42:08 +08:00
|
|
|
return false;
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
|
|
|
|
SDValue Base, Disp, Index;
|
|
|
|
if (!selectBDVAddr12Only(Load->getBasePtr(), ElemV, Base, Disp, Index) ||
|
|
|
|
Index.getValueType() != VT.changeVectorElementTypeToInteger())
|
2016-05-14 06:42:08 +08:00
|
|
|
return false;
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
|
|
|
|
SDLoc DL(Load);
|
|
|
|
SDValue Ops[] = {
|
|
|
|
N->getOperand(0), Base, Disp, Index,
|
|
|
|
CurDAG->getTargetConstant(Elem, DL, MVT::i32), Load->getChain()
|
|
|
|
};
|
|
|
|
SDNode *Res = CurDAG->getMachineNode(Opcode, DL, VT, MVT::Other, Ops);
|
|
|
|
ReplaceUses(SDValue(Load, 1), SDValue(Res, 1));
|
2016-05-14 06:42:08 +08:00
|
|
|
ReplaceNode(N, Res);
|
|
|
|
return true;
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
}
|
|
|
|
|
2016-05-14 06:42:08 +08:00
|
|
|
bool SystemZDAGToDAGISel::tryScatter(StoreSDNode *Store, unsigned Opcode) {
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
SDValue Value = Store->getValue();
|
|
|
|
if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
|
2016-05-14 06:42:08 +08:00
|
|
|
return false;
|
2016-09-15 00:05:51 +08:00
|
|
|
if (Store->getMemoryVT().getSizeInBits() != Value.getValueSizeInBits())
|
2016-05-14 06:42:08 +08:00
|
|
|
return false;
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
|
|
|
|
SDValue ElemV = Value.getOperand(1);
|
|
|
|
auto *ElemN = dyn_cast<ConstantSDNode>(ElemV);
|
|
|
|
if (!ElemN)
|
2016-05-14 06:42:08 +08:00
|
|
|
return false;
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
|
|
|
|
SDValue Vec = Value.getOperand(0);
|
|
|
|
EVT VT = Vec.getValueType();
|
|
|
|
unsigned Elem = ElemN->getZExtValue();
|
|
|
|
if (Elem >= VT.getVectorNumElements())
|
2016-05-14 06:42:08 +08:00
|
|
|
return false;
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
|
|
|
|
SDValue Base, Disp, Index;
|
|
|
|
if (!selectBDVAddr12Only(Store->getBasePtr(), ElemV, Base, Disp, Index) ||
|
|
|
|
Index.getValueType() != VT.changeVectorElementTypeToInteger())
|
2016-05-14 06:42:08 +08:00
|
|
|
return false;
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
|
|
|
|
SDLoc DL(Store);
|
|
|
|
SDValue Ops[] = {
|
|
|
|
Vec, Base, Disp, Index, CurDAG->getTargetConstant(Elem, DL, MVT::i32),
|
|
|
|
Store->getChain()
|
|
|
|
};
|
2016-05-14 06:42:08 +08:00
|
|
|
ReplaceNode(Store, CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops));
|
|
|
|
return true;
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
}
|
|
|
|
|
2018-05-01 01:54:28 +08:00
|
|
|
// Check whether or not the chain ending in StoreNode is suitable for doing
|
|
|
|
// the {load; op; store} to modify transformation.
|
|
|
|
static bool isFusableLoadOpStorePattern(StoreSDNode *StoreNode,
|
|
|
|
SDValue StoredVal, SelectionDAG *CurDAG,
|
|
|
|
LoadSDNode *&LoadNode,
|
|
|
|
SDValue &InputChain) {
|
|
|
|
// Is the stored value result 0 of the operation?
|
|
|
|
if (StoredVal.getResNo() != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Are there other uses of the loaded value than the operation?
|
|
|
|
if (!StoredVal.getNode()->hasNUsesOfValue(1, 0))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Is the store non-extending and non-indexed?
|
|
|
|
if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
SDValue Load = StoredVal->getOperand(0);
|
|
|
|
// Is the stored value a non-extending and non-indexed load?
|
|
|
|
if (!ISD::isNormalLoad(Load.getNode()))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Return LoadNode by reference.
|
|
|
|
LoadNode = cast<LoadSDNode>(Load);
|
|
|
|
|
|
|
|
// Is store the only read of the loaded value?
|
|
|
|
if (!Load.hasOneUse())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Is the address of the store the same as the load?
|
|
|
|
if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
|
|
|
|
LoadNode->getOffset() != StoreNode->getOffset())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check if the chain is produced by the load or is a TokenFactor with
|
|
|
|
// the load output chain as an operand. Return InputChain by reference.
|
|
|
|
SDValue Chain = StoreNode->getChain();
|
|
|
|
|
|
|
|
bool ChainCheck = false;
|
|
|
|
if (Chain == Load.getValue(1)) {
|
|
|
|
ChainCheck = true;
|
|
|
|
InputChain = LoadNode->getChain();
|
|
|
|
} else if (Chain.getOpcode() == ISD::TokenFactor) {
|
|
|
|
SmallVector<SDValue, 4> ChainOps;
|
|
|
|
for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
|
|
|
|
SDValue Op = Chain.getOperand(i);
|
|
|
|
if (Op == Load.getValue(1)) {
|
|
|
|
ChainCheck = true;
|
|
|
|
// Drop Load, but keep its chain. No cycle check necessary.
|
|
|
|
ChainOps.push_back(Load.getOperand(0));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure using Op as part of the chain would not cause a cycle here.
|
|
|
|
// In theory, we could check whether the chain node is a predecessor of
|
|
|
|
// the load. But that can be very expensive. Instead visit the uses and
|
|
|
|
// make sure they all have smaller node id than the load.
|
|
|
|
int LoadId = LoadNode->getNodeId();
|
|
|
|
for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
|
|
|
|
UE = UI->use_end(); UI != UE; ++UI) {
|
|
|
|
if (UI.getUse().getResNo() != 0)
|
|
|
|
continue;
|
|
|
|
if (UI->getNodeId() > LoadId)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
ChainOps.push_back(Op);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ChainCheck)
|
|
|
|
// Make a new TokenFactor with all the other input chains except
|
|
|
|
// for the load.
|
|
|
|
InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain),
|
|
|
|
MVT::Other, ChainOps);
|
|
|
|
}
|
|
|
|
if (!ChainCheck)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Change a chain of {load; op; store} of the same value into a simple op
|
|
|
|
// through memory of that value, if the uses of the modified value and its
|
|
|
|
// address are suitable.
|
|
|
|
//
|
|
|
|
// The tablegen pattern memory operand pattern is currently not able to match
|
|
|
|
// the case where the CC on the original operation are used.
|
|
|
|
//
|
|
|
|
// See the equivalent routine in X86ISelDAGToDAG for further comments.
|
|
|
|
bool SystemZDAGToDAGISel::tryFoldLoadStoreIntoMemOperand(SDNode *Node) {
|
|
|
|
StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
|
|
|
|
SDValue StoredVal = StoreNode->getOperand(1);
|
|
|
|
unsigned Opc = StoredVal->getOpcode();
|
|
|
|
SDLoc DL(StoreNode);
|
|
|
|
|
|
|
|
// Before we try to select anything, make sure this is memory operand size
|
|
|
|
// and opcode we can handle. Note that this must match the code below that
|
|
|
|
// actually lowers the opcodes.
|
|
|
|
EVT MemVT = StoreNode->getMemoryVT();
|
|
|
|
unsigned NewOpc = 0;
|
|
|
|
bool NegateOperand = false;
|
|
|
|
switch (Opc) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case SystemZISD::SSUBO:
|
|
|
|
NegateOperand = true;
|
Fix clang -Wimplicit-fallthrough warnings across llvm, NFC
This patch should not introduce any behavior changes. It consists of
mostly one of two changes:
1. Replacing fall through comments with the LLVM_FALLTHROUGH macro
2. Inserting 'break' before falling through into a case block consisting
of only 'break'.
We were already using this warning with GCC, but its warning behaves
slightly differently. In this patch, the following differences are
relevant:
1. GCC recognizes comments that say "fall through" as annotations, clang
doesn't
2. GCC doesn't warn on "case N: foo(); default: break;", clang does
3. GCC doesn't warn when the case contains a switch, but falls through
the outer case.
I will enable the warning separately in a follow-up patch so that it can
be cleanly reverted if necessary.
Reviewers: alexfh, rsmith, lattner, rtrieu, EricWF, bollu
Differential Revision: https://reviews.llvm.org/D53950
llvm-svn: 345882
2018-11-02 03:54:45 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2018-05-01 01:54:28 +08:00
|
|
|
case SystemZISD::SADDO:
|
|
|
|
if (MemVT == MVT::i32)
|
|
|
|
NewOpc = SystemZ::ASI;
|
|
|
|
else if (MemVT == MVT::i64)
|
|
|
|
NewOpc = SystemZ::AGSI;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
case SystemZISD::USUBO:
|
|
|
|
NegateOperand = true;
|
Fix clang -Wimplicit-fallthrough warnings across llvm, NFC
This patch should not introduce any behavior changes. It consists of
mostly one of two changes:
1. Replacing fall through comments with the LLVM_FALLTHROUGH macro
2. Inserting 'break' before falling through into a case block consisting
of only 'break'.
We were already using this warning with GCC, but its warning behaves
slightly differently. In this patch, the following differences are
relevant:
1. GCC recognizes comments that say "fall through" as annotations, clang
doesn't
2. GCC doesn't warn on "case N: foo(); default: break;", clang does
3. GCC doesn't warn when the case contains a switch, but falls through
the outer case.
I will enable the warning separately in a follow-up patch so that it can
be cleanly reverted if necessary.
Reviewers: alexfh, rsmith, lattner, rtrieu, EricWF, bollu
Differential Revision: https://reviews.llvm.org/D53950
llvm-svn: 345882
2018-11-02 03:54:45 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2018-05-01 01:54:28 +08:00
|
|
|
case SystemZISD::UADDO:
|
|
|
|
if (MemVT == MVT::i32)
|
|
|
|
NewOpc = SystemZ::ALSI;
|
|
|
|
else if (MemVT == MVT::i64)
|
|
|
|
NewOpc = SystemZ::ALGSI;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
LoadSDNode *LoadNode = nullptr;
|
|
|
|
SDValue InputChain;
|
|
|
|
if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadNode,
|
|
|
|
InputChain))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
SDValue Operand = StoredVal.getOperand(1);
|
|
|
|
auto *OperandC = dyn_cast<ConstantSDNode>(Operand);
|
|
|
|
if (!OperandC)
|
|
|
|
return false;
|
|
|
|
auto OperandV = OperandC->getAPIntValue();
|
|
|
|
if (NegateOperand)
|
|
|
|
OperandV = -OperandV;
|
|
|
|
if (OperandV.getMinSignedBits() > 8)
|
|
|
|
return false;
|
|
|
|
Operand = CurDAG->getTargetConstant(OperandV, DL, MemVT);
|
|
|
|
|
|
|
|
SDValue Base, Disp;
|
|
|
|
if (!selectBDAddr20Only(StoreNode->getBasePtr(), Base, Disp))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
SDValue Ops[] = { Base, Disp, Operand, InputChain };
|
|
|
|
MachineSDNode *Result =
|
|
|
|
CurDAG->getMachineNode(NewOpc, DL, MVT::i32, MVT::Other, Ops);
|
2018-08-15 07:30:32 +08:00
|
|
|
CurDAG->setNodeMemRefs(
|
|
|
|
Result, {StoreNode->getMemOperand(), LoadNode->getMemOperand()});
|
2018-05-01 01:54:28 +08:00
|
|
|
|
|
|
|
ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
|
|
|
|
ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
|
|
|
|
CurDAG->RemoveDeadNode(Node);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-09-27 23:29:20 +08:00
|
|
|
bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode *Store,
|
|
|
|
LoadSDNode *Load) const {
|
2013-09-05 18:36:45 +08:00
|
|
|
// Check that the two memory operands have the same size.
|
|
|
|
if (Load->getMemoryVT() != Store->getMemoryVT())
|
|
|
|
return false;
|
2013-07-09 17:46:39 +08:00
|
|
|
|
2013-09-05 18:36:45 +08:00
|
|
|
// Volatility stops an access from being decomposed.
|
2013-07-09 17:46:39 +08:00
|
|
|
if (Load->isVolatile() || Store->isVolatile())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// There's no chance of overlap if the load is invariant.
|
[CodeGen] Split out the notions of MI invariance and MI dereferenceability.
Summary:
An IR load can be invariant, dereferenceable, neither, or both. But
currently, MI's notion of invariance is IR-invariant &&
IR-dereferenceable.
This patch splits up the notions of invariance and dereferenceability at
the MI level. It's NFC, so adds some probably-unnecessary
"is-dereferenceable" checks, which we can remove later if desired.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D23371
llvm-svn: 281151
2016-09-11 09:38:58 +08:00
|
|
|
if (Load->isInvariant() && Load->isDereferenceable())
|
2013-07-09 17:46:39 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// Otherwise we need to check whether there's an alias.
|
2014-04-15 15:22:52 +08:00
|
|
|
const Value *V1 = Load->getMemOperand()->getValue();
|
|
|
|
const Value *V2 = Store->getMemOperand()->getValue();
|
2013-07-09 17:46:39 +08:00
|
|
|
if (!V1 || !V2)
|
|
|
|
return false;
|
|
|
|
|
2013-09-27 23:29:20 +08:00
|
|
|
// Reject equality.
|
|
|
|
uint64_t Size = Load->getMemoryVT().getStoreSize();
|
2013-07-09 17:46:39 +08:00
|
|
|
int64_t End1 = Load->getSrcValueOffset() + Size;
|
|
|
|
int64_t End2 = Store->getSrcValueOffset() + Size;
|
2013-09-27 23:29:20 +08:00
|
|
|
if (V1 == V2 && End1 == End2)
|
|
|
|
return false;
|
|
|
|
|
2015-06-17 15:18:54 +08:00
|
|
|
return !AA->alias(MemoryLocation(V1, End1, Load->getAAInfo()),
|
|
|
|
MemoryLocation(V2, End2, Store->getAAInfo()));
|
2013-07-09 17:46:39 +08:00
|
|
|
}
|
|
|
|
|
2013-09-05 18:36:45 +08:00
|
|
|
bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode *N) const {
|
2014-03-06 19:22:58 +08:00
|
|
|
auto *Store = cast<StoreSDNode>(N);
|
|
|
|
auto *Load = cast<LoadSDNode>(Store->getValue());
|
2013-09-05 18:36:45 +08:00
|
|
|
|
|
|
|
// Prefer not to use MVC if either address can use ... RELATIVE LONG
|
|
|
|
// instructions.
|
|
|
|
uint64_t Size = Load->getMemoryVT().getStoreSize();
|
|
|
|
if (Size > 1 && Size <= 8) {
|
|
|
|
// Prefer LHRL, LRL and LGRL.
|
2013-09-27 23:14:04 +08:00
|
|
|
if (SystemZISD::isPCREL(Load->getBasePtr().getOpcode()))
|
2013-09-05 18:36:45 +08:00
|
|
|
return false;
|
|
|
|
// Prefer STHRL, STRL and STGRL.
|
2013-09-27 23:14:04 +08:00
|
|
|
if (SystemZISD::isPCREL(Store->getBasePtr().getOpcode()))
|
2013-09-05 18:36:45 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-09-27 23:29:20 +08:00
|
|
|
return canUseBlockOperation(Store, Load);
|
2013-09-05 18:36:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool SystemZDAGToDAGISel::storeLoadCanUseBlockBinary(SDNode *N,
|
|
|
|
unsigned I) const {
|
2014-03-06 19:22:58 +08:00
|
|
|
auto *StoreA = cast<StoreSDNode>(N);
|
|
|
|
auto *LoadA = cast<LoadSDNode>(StoreA->getValue().getOperand(1 - I));
|
|
|
|
auto *LoadB = cast<LoadSDNode>(StoreA->getValue().getOperand(I));
|
2013-09-27 23:29:20 +08:00
|
|
|
return !LoadA->isVolatile() && canUseBlockOperation(StoreA, LoadB);
|
2013-09-05 18:36:45 +08:00
|
|
|
}
|
|
|
|
|
2016-05-14 06:42:08 +08:00
|
|
|
void SystemZDAGToDAGISel::Select(SDNode *Node) {
|
2013-05-07 00:15:19 +08:00
|
|
|
// If we have a custom node, we already have selected!
|
|
|
|
if (Node->isMachineOpcode()) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
|
2013-09-22 16:21:56 +08:00
|
|
|
Node->setNodeId(-1);
|
2016-05-14 06:42:08 +08:00
|
|
|
return;
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned Opcode = Node->getOpcode();
|
|
|
|
switch (Opcode) {
|
|
|
|
case ISD::OR:
|
2013-07-16 19:55:57 +08:00
|
|
|
if (Node->getOperand(1).getOpcode() != ISD::Constant)
|
2016-05-14 06:42:08 +08:00
|
|
|
if (tryRxSBG(Node, SystemZ::ROSBG))
|
|
|
|
return;
|
2013-07-18 18:06:15 +08:00
|
|
|
goto or_xor;
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
case ISD::XOR:
|
2013-07-18 18:06:15 +08:00
|
|
|
if (Node->getOperand(1).getOpcode() != ISD::Constant)
|
2016-05-14 06:42:08 +08:00
|
|
|
if (tryRxSBG(Node, SystemZ::RXSBG))
|
|
|
|
return;
|
2013-07-18 18:06:15 +08:00
|
|
|
// Fall through.
|
|
|
|
or_xor:
|
2013-05-07 00:15:19 +08:00
|
|
|
// If this is a 64-bit operation in which both 32-bit halves are nonzero,
|
2017-11-15 04:00:34 +08:00
|
|
|
// split the operation into two. If both operands here happen to be
|
|
|
|
// constant, leave this to common code to optimize.
|
|
|
|
if (Node->getValueType(0) == MVT::i64 &&
|
|
|
|
Node->getOperand(0).getOpcode() != ISD::Constant)
|
2014-03-06 19:22:58 +08:00
|
|
|
if (auto *Op1 = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
|
2013-05-07 00:15:19 +08:00
|
|
|
uint64_t Val = Op1->getZExtValue();
|
2016-05-10 07:54:23 +08:00
|
|
|
if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val)) {
|
|
|
|
splitLargeImmediate(Opcode, Node, Node->getOperand(0),
|
|
|
|
Val - uint32_t(Val), uint32_t(Val));
|
2016-05-14 06:42:08 +08:00
|
|
|
return;
|
2016-05-10 07:54:23 +08:00
|
|
|
}
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2013-07-11 16:59:12 +08:00
|
|
|
case ISD::AND:
|
2013-07-18 18:40:35 +08:00
|
|
|
if (Node->getOperand(1).getOpcode() != ISD::Constant)
|
2016-05-14 06:42:08 +08:00
|
|
|
if (tryRxSBG(Node, SystemZ::RNSBG))
|
|
|
|
return;
|
2016-08-18 04:30:52 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2013-07-16 19:02:24 +08:00
|
|
|
case ISD::ROTL:
|
|
|
|
case ISD::SHL:
|
|
|
|
case ISD::SRL:
|
2013-12-20 19:49:48 +08:00
|
|
|
case ISD::ZERO_EXTEND:
|
2016-05-14 06:42:08 +08:00
|
|
|
if (tryRISBGZero(Node))
|
|
|
|
return;
|
2013-07-11 16:59:12 +08:00
|
|
|
break;
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
case ISD::Constant:
|
|
|
|
// If this is a 64-bit constant that is out of the range of LLILF,
|
|
|
|
// LLIHF and LGFI, split it into two 32-bit pieces.
|
|
|
|
if (Node->getValueType(0) == MVT::i64) {
|
|
|
|
uint64_t Val = cast<ConstantSDNode>(Node)->getZExtValue();
|
2016-05-10 07:54:23 +08:00
|
|
|
if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val) && !isInt<32>(Val)) {
|
|
|
|
splitLargeImmediate(ISD::OR, Node, SDValue(), Val - uint32_t(Val),
|
|
|
|
uint32_t(Val));
|
2016-05-14 06:42:08 +08:00
|
|
|
return;
|
2016-05-10 07:54:23 +08:00
|
|
|
}
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2013-07-31 20:38:08 +08:00
|
|
|
case SystemZISD::SELECT_CCMASK: {
|
|
|
|
SDValue Op0 = Node->getOperand(0);
|
|
|
|
SDValue Op1 = Node->getOperand(1);
|
|
|
|
// Prefer to put any load first, so that it can be matched as a
|
2016-11-28 21:34:08 +08:00
|
|
|
// conditional load. Likewise for constants in range for LOCHI.
|
|
|
|
if ((Op1.getOpcode() == ISD::LOAD && Op0.getOpcode() != ISD::LOAD) ||
|
|
|
|
(Subtarget->hasLoadStoreOnCond2() &&
|
|
|
|
Node->getValueType(0).isInteger() &&
|
|
|
|
Op1.getOpcode() == ISD::Constant &&
|
|
|
|
isInt<16>(cast<ConstantSDNode>(Op1)->getSExtValue()) &&
|
|
|
|
!(Op0.getOpcode() == ISD::Constant &&
|
|
|
|
isInt<16>(cast<ConstantSDNode>(Op0)->getSExtValue())))) {
|
2013-07-31 20:38:08 +08:00
|
|
|
SDValue CCValid = Node->getOperand(2);
|
|
|
|
SDValue CCMask = Node->getOperand(3);
|
|
|
|
uint64_t ConstCCValid =
|
|
|
|
cast<ConstantSDNode>(CCValid.getNode())->getZExtValue();
|
|
|
|
uint64_t ConstCCMask =
|
|
|
|
cast<ConstantSDNode>(CCMask.getNode())->getZExtValue();
|
|
|
|
// Invert the condition.
|
2015-04-28 22:05:47 +08:00
|
|
|
CCMask = CurDAG->getConstant(ConstCCValid ^ ConstCCMask, SDLoc(Node),
|
2013-07-31 20:38:08 +08:00
|
|
|
CCMask.getValueType());
|
|
|
|
SDValue Op4 = Node->getOperand(4);
|
2018-05-18 19:54:04 +08:00
|
|
|
SDNode *UpdatedNode =
|
|
|
|
CurDAG->UpdateNodeOperands(Node, Op1, Op0, CCValid, CCMask, Op4);
|
|
|
|
if (UpdatedNode != Node) {
|
2018-05-18 20:07:16 +08:00
|
|
|
// In case this node already exists then replace Node with it.
|
2018-05-18 19:54:04 +08:00
|
|
|
ReplaceNode(Node, UpdatedNode);
|
|
|
|
Node = UpdatedNode;
|
|
|
|
}
|
2013-07-31 20:38:08 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
|
|
|
|
case ISD::INSERT_VECTOR_ELT: {
|
|
|
|
EVT VT = Node->getValueType(0);
|
2016-09-15 00:37:15 +08:00
|
|
|
unsigned ElemBitSize = VT.getScalarSizeInBits();
|
2016-05-14 06:42:08 +08:00
|
|
|
if (ElemBitSize == 32) {
|
|
|
|
if (tryGather(Node, SystemZ::VGEF))
|
|
|
|
return;
|
|
|
|
} else if (ElemBitSize == 64) {
|
|
|
|
if (tryGather(Node, SystemZ::VGEG))
|
|
|
|
return;
|
|
|
|
}
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-02-07 02:59:19 +08:00
|
|
|
case ISD::BUILD_VECTOR: {
|
|
|
|
auto *BVN = cast<BuildVectorSDNode>(Node);
|
|
|
|
SDLoc DL(Node);
|
|
|
|
EVT VT = Node->getValueType(0);
|
|
|
|
uint64_t Mask = 0;
|
|
|
|
if (SystemZTargetLowering::tryBuildVectorByteMask(BVN, Mask)) {
|
|
|
|
SDNode *Res = CurDAG->getMachineNode(SystemZ::VGBM, DL, VT,
|
|
|
|
CurDAG->getTargetConstant(Mask, DL, MVT::i32));
|
|
|
|
ReplaceNode(Node, Res);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-02-13 02:06:06 +08:00
|
|
|
case ISD::ConstantFP: {
|
|
|
|
APFloat Imm = cast<ConstantFPSDNode>(Node)->getValueAPF();
|
|
|
|
if (Imm.isZero() || Imm.isNegZero())
|
|
|
|
break;
|
|
|
|
const SystemZInstrInfo *TII = getInstrInfo();
|
|
|
|
EVT VT = Node->getValueType(0);
|
|
|
|
unsigned Start, End;
|
|
|
|
unsigned BitWidth = VT.getSizeInBits();
|
2019-02-13 07:13:18 +08:00
|
|
|
bool Success = SystemZTargetLowering::analyzeFPImm(Imm, BitWidth, Start,
|
|
|
|
End, static_cast<const SystemZInstrInfo *>(TII)); (void)Success;
|
2019-02-13 02:06:06 +08:00
|
|
|
assert(Success && "Expected legal FP immediate");
|
|
|
|
SDLoc DL(Node);
|
|
|
|
unsigned Opcode = (BitWidth == 32 ? SystemZ::VGMF : SystemZ::VGMG);
|
|
|
|
SDNode *Res = CurDAG->getMachineNode(Opcode, DL, VT,
|
|
|
|
CurDAG->getTargetConstant(Start, DL, MVT::i32),
|
|
|
|
CurDAG->getTargetConstant(End, DL, MVT::i32));
|
|
|
|
unsigned SubRegIdx = (BitWidth == 32 ? SystemZ::subreg_h32
|
|
|
|
: SystemZ::subreg_h64);
|
|
|
|
Res = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SDValue(Res, 0))
|
|
|
|
.getNode();
|
|
|
|
ReplaceNode(Node, Res);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
case ISD::STORE: {
|
2018-05-01 01:54:28 +08:00
|
|
|
if (tryFoldLoadStoreIntoMemOperand(Node))
|
|
|
|
return;
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
auto *Store = cast<StoreSDNode>(Node);
|
2016-09-15 00:05:51 +08:00
|
|
|
unsigned ElemBitSize = Store->getValue().getValueSizeInBits();
|
2016-05-14 06:42:08 +08:00
|
|
|
if (ElemBitSize == 32) {
|
|
|
|
if (tryScatter(Store, SystemZ::VSCEF))
|
|
|
|
return;
|
|
|
|
} else if (ElemBitSize == 64) {
|
|
|
|
if (tryScatter(Store, SystemZ::VSCEG))
|
|
|
|
return;
|
|
|
|
}
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
break;
|
|
|
|
}
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
|
2016-05-14 06:42:08 +08:00
|
|
|
SelectCode(Node);
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool SystemZDAGToDAGISel::
|
|
|
|
SelectInlineAsmMemoryOperand(const SDValue &Op,
|
2015-03-13 20:45:09 +08:00
|
|
|
unsigned ConstraintID,
|
2013-05-07 00:15:19 +08:00
|
|
|
std::vector<SDValue> &OutOps) {
|
2016-06-13 22:24:05 +08:00
|
|
|
SystemZAddressingMode::AddrForm Form;
|
|
|
|
SystemZAddressingMode::DispRange DispRange;
|
2016-06-09 23:19:16 +08:00
|
|
|
SDValue Base, Disp, Index;
|
|
|
|
|
2015-03-18 00:16:14 +08:00
|
|
|
switch(ConstraintID) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected asm memory constraint");
|
|
|
|
case InlineAsm::Constraint_i:
|
|
|
|
case InlineAsm::Constraint_Q:
|
2016-06-13 22:24:05 +08:00
|
|
|
// Accept an address with a short displacement, but no index.
|
|
|
|
Form = SystemZAddressingMode::FormBD;
|
|
|
|
DispRange = SystemZAddressingMode::Disp12Only;
|
|
|
|
break;
|
2015-03-18 00:16:14 +08:00
|
|
|
case InlineAsm::Constraint_R:
|
2016-06-13 22:24:05 +08:00
|
|
|
// Accept an address with a short displacement and an index.
|
|
|
|
Form = SystemZAddressingMode::FormBDXNormal;
|
|
|
|
DispRange = SystemZAddressingMode::Disp12Only;
|
2015-03-18 00:16:14 +08:00
|
|
|
break;
|
2016-06-09 23:19:16 +08:00
|
|
|
case InlineAsm::Constraint_S:
|
2016-06-13 22:24:05 +08:00
|
|
|
// Accept an address with a long displacement, but no index.
|
|
|
|
Form = SystemZAddressingMode::FormBD;
|
|
|
|
DispRange = SystemZAddressingMode::Disp20Only;
|
|
|
|
break;
|
2016-06-09 23:19:16 +08:00
|
|
|
case InlineAsm::Constraint_T:
|
|
|
|
case InlineAsm::Constraint_m:
|
2017-11-10 00:31:57 +08:00
|
|
|
case InlineAsm::Constraint_o:
|
2016-06-13 22:24:05 +08:00
|
|
|
// Accept an address with a long displacement and an index.
|
|
|
|
// m works the same as T, as this is the most general case.
|
2017-11-10 00:31:57 +08:00
|
|
|
// We don't really have any special handling of "offsettable"
|
|
|
|
// memory addresses, so just treat o the same as m.
|
2016-06-13 22:24:05 +08:00
|
|
|
Form = SystemZAddressingMode::FormBDXNormal;
|
|
|
|
DispRange = SystemZAddressingMode::Disp20Only;
|
2016-06-09 23:19:16 +08:00
|
|
|
break;
|
2015-03-18 00:16:14 +08:00
|
|
|
}
|
2016-06-13 22:24:05 +08:00
|
|
|
|
|
|
|
if (selectBDXAddr(Form, DispRange, Op, Base, Disp, Index)) {
|
2016-08-19 05:44:15 +08:00
|
|
|
const TargetRegisterClass *TRC =
|
|
|
|
Subtarget->getRegisterInfo()->getPointerRegClass(*MF);
|
|
|
|
SDLoc DL(Base);
|
|
|
|
SDValue RC = CurDAG->getTargetConstant(TRC->getID(), DL, MVT::i32);
|
|
|
|
|
|
|
|
// Make sure that the base address doesn't go into %r0.
|
|
|
|
// If it's a TargetFrameIndex or a fixed register, we shouldn't do anything.
|
|
|
|
if (Base.getOpcode() != ISD::TargetFrameIndex &&
|
|
|
|
Base.getOpcode() != ISD::Register) {
|
|
|
|
Base =
|
|
|
|
SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
|
|
|
|
DL, Base.getValueType(),
|
|
|
|
Base, RC), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure that the index register isn't assigned to %r0 either.
|
|
|
|
if (Index.getOpcode() != ISD::Register) {
|
|
|
|
Index =
|
|
|
|
SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
|
|
|
|
DL, Index.getValueType(),
|
|
|
|
Index, RC), 0);
|
|
|
|
}
|
|
|
|
|
2016-06-13 22:24:05 +08:00
|
|
|
OutOps.push_back(Base);
|
|
|
|
OutOps.push_back(Disp);
|
|
|
|
OutOps.push_back(Index);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-03-18 00:16:14 +08:00
|
|
|
return true;
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
2018-01-20 04:52:04 +08:00
|
|
|
|
2018-05-01 01:52:32 +08:00
|
|
|
// IsProfitableToFold - Returns true if is profitable to fold the specific
|
|
|
|
// operand node N of U during instruction selection that starts at Root.
|
|
|
|
bool
|
|
|
|
SystemZDAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U,
|
|
|
|
SDNode *Root) const {
|
|
|
|
// We want to avoid folding a LOAD into an ICMP node if as a result
|
|
|
|
// we would be forced to spill the condition code into a GPR.
|
|
|
|
if (N.getOpcode() == ISD::LOAD && U->getOpcode() == SystemZISD::ICMP) {
|
|
|
|
if (!N.hasOneUse() || !U->hasOneUse())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The user of the CC value will usually be a CopyToReg into the
|
|
|
|
// physical CC register, which in turn is glued and chained to the
|
|
|
|
// actual instruction that uses the CC value. Bail out if we have
|
|
|
|
// anything else than that.
|
|
|
|
SDNode *CCUser = *U->use_begin();
|
|
|
|
SDNode *CCRegUser = nullptr;
|
|
|
|
if (CCUser->getOpcode() == ISD::CopyToReg ||
|
|
|
|
cast<RegisterSDNode>(CCUser->getOperand(1))->getReg() == SystemZ::CC) {
|
|
|
|
for (auto *U : CCUser->uses()) {
|
|
|
|
if (CCRegUser == nullptr)
|
|
|
|
CCRegUser = U;
|
|
|
|
else if (CCRegUser != U)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (CCRegUser == nullptr)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If the actual instruction is a branch, the only thing that remains to be
|
|
|
|
// checked is whether the CCUser chain is a predecessor of the load.
|
|
|
|
if (CCRegUser->isMachineOpcode() &&
|
|
|
|
CCRegUser->getMachineOpcode() == SystemZ::BRC)
|
|
|
|
return !N->isPredecessorOf(CCUser->getOperand(0).getNode());
|
|
|
|
|
|
|
|
// Otherwise, the instruction may have multiple operands, and we need to
|
|
|
|
// verify that none of them are a predecessor of the load. This is exactly
|
|
|
|
// the same check that would be done by common code if the CC setter were
|
|
|
|
// glued to the CC user, so simply invoke that check here.
|
|
|
|
if (!IsLegalToFold(N, U, CCRegUser, OptLevel, false))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-01-20 04:52:04 +08:00
|
|
|
namespace {
|
|
|
|
// Represents a sequence for extracting a 0/1 value from an IPM result:
|
|
|
|
// (((X ^ XORValue) + AddValue) >> Bit)
|
|
|
|
struct IPMConversion {
|
|
|
|
IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit)
|
|
|
|
: XORValue(xorValue), AddValue(addValue), Bit(bit) {}
|
|
|
|
|
|
|
|
int64_t XORValue;
|
|
|
|
int64_t AddValue;
|
|
|
|
unsigned Bit;
|
|
|
|
};
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
// Return a sequence for getting a 1 from an IPM result when CC has a
|
|
|
|
// value in CCMask and a 0 when CC has a value in CCValid & ~CCMask.
|
|
|
|
// The handling of CC values outside CCValid doesn't matter.
|
|
|
|
static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
|
|
|
|
// Deal with cases where the result can be taken directly from a bit
|
|
|
|
// of the IPM result.
|
|
|
|
if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3)))
|
|
|
|
return IPMConversion(0, 0, SystemZ::IPM_CC);
|
|
|
|
if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3)))
|
|
|
|
return IPMConversion(0, 0, SystemZ::IPM_CC + 1);
|
|
|
|
|
|
|
|
// Deal with cases where we can add a value to force the sign bit
|
|
|
|
// to contain the right value. Putting the bit in 31 means we can
|
|
|
|
// use SRL rather than RISBG(L), and also makes it easier to get a
|
|
|
|
// 0/-1 value, so it has priority over the other tests below.
|
|
|
|
//
|
|
|
|
// These sequences rely on the fact that the upper two bits of the
|
|
|
|
// IPM result are zero.
|
|
|
|
uint64_t TopBit = uint64_t(1) << 31;
|
|
|
|
if (CCMask == (CCValid & SystemZ::CCMASK_0))
|
|
|
|
return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31);
|
|
|
|
if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1)))
|
|
|
|
return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31);
|
|
|
|
if (CCMask == (CCValid & (SystemZ::CCMASK_0
|
|
|
|
| SystemZ::CCMASK_1
|
|
|
|
| SystemZ::CCMASK_2)))
|
|
|
|
return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31);
|
|
|
|
if (CCMask == (CCValid & SystemZ::CCMASK_3))
|
|
|
|
return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31);
|
|
|
|
if (CCMask == (CCValid & (SystemZ::CCMASK_1
|
|
|
|
| SystemZ::CCMASK_2
|
|
|
|
| SystemZ::CCMASK_3)))
|
|
|
|
return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31);
|
|
|
|
|
|
|
|
// Next try inverting the value and testing a bit. 0/1 could be
|
|
|
|
// handled this way too, but we dealt with that case above.
|
|
|
|
if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2)))
|
|
|
|
return IPMConversion(-1, 0, SystemZ::IPM_CC);
|
|
|
|
|
|
|
|
// Handle cases where adding a value forces a non-sign bit to contain
|
|
|
|
// the right value.
|
|
|
|
if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2)))
|
|
|
|
return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1);
|
|
|
|
if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3)))
|
|
|
|
return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1);
|
|
|
|
|
|
|
|
// The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are
|
|
|
|
// can be done by inverting the low CC bit and applying one of the
|
|
|
|
// sign-based extractions above.
|
|
|
|
if (CCMask == (CCValid & SystemZ::CCMASK_1))
|
|
|
|
return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31);
|
|
|
|
if (CCMask == (CCValid & SystemZ::CCMASK_2))
|
|
|
|
return IPMConversion(1 << SystemZ::IPM_CC,
|
|
|
|
TopBit - (3 << SystemZ::IPM_CC), 31);
|
|
|
|
if (CCMask == (CCValid & (SystemZ::CCMASK_0
|
|
|
|
| SystemZ::CCMASK_1
|
|
|
|
| SystemZ::CCMASK_3)))
|
|
|
|
return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31);
|
|
|
|
if (CCMask == (CCValid & (SystemZ::CCMASK_0
|
|
|
|
| SystemZ::CCMASK_2
|
|
|
|
| SystemZ::CCMASK_3)))
|
|
|
|
return IPMConversion(1 << SystemZ::IPM_CC,
|
|
|
|
TopBit - (1 << SystemZ::IPM_CC), 31);
|
|
|
|
|
|
|
|
llvm_unreachable("Unexpected CC combination");
|
|
|
|
}
|
|
|
|
|
|
|
|
SDValue SystemZDAGToDAGISel::expandSelectBoolean(SDNode *Node) {
|
|
|
|
auto *TrueOp = dyn_cast<ConstantSDNode>(Node->getOperand(0));
|
|
|
|
auto *FalseOp = dyn_cast<ConstantSDNode>(Node->getOperand(1));
|
|
|
|
if (!TrueOp || !FalseOp)
|
|
|
|
return SDValue();
|
|
|
|
if (FalseOp->getZExtValue() != 0)
|
|
|
|
return SDValue();
|
|
|
|
if (TrueOp->getSExtValue() != 1 && TrueOp->getSExtValue() != -1)
|
|
|
|
return SDValue();
|
|
|
|
|
|
|
|
auto *CCValidOp = dyn_cast<ConstantSDNode>(Node->getOperand(2));
|
|
|
|
auto *CCMaskOp = dyn_cast<ConstantSDNode>(Node->getOperand(3));
|
|
|
|
if (!CCValidOp || !CCMaskOp)
|
|
|
|
return SDValue();
|
|
|
|
int CCValid = CCValidOp->getZExtValue();
|
|
|
|
int CCMask = CCMaskOp->getZExtValue();
|
|
|
|
|
|
|
|
SDLoc DL(Node);
|
2018-05-01 01:52:32 +08:00
|
|
|
SDValue CCReg = Node->getOperand(4);
|
2018-01-20 04:52:04 +08:00
|
|
|
IPMConversion IPM = getIPMConversion(CCValid, CCMask);
|
2018-05-01 01:52:32 +08:00
|
|
|
SDValue Result = CurDAG->getNode(SystemZISD::IPM, DL, MVT::i32, CCReg);
|
2018-01-20 04:52:04 +08:00
|
|
|
|
|
|
|
if (IPM.XORValue)
|
|
|
|
Result = CurDAG->getNode(ISD::XOR, DL, MVT::i32, Result,
|
|
|
|
CurDAG->getConstant(IPM.XORValue, DL, MVT::i32));
|
|
|
|
|
|
|
|
if (IPM.AddValue)
|
|
|
|
Result = CurDAG->getNode(ISD::ADD, DL, MVT::i32, Result,
|
|
|
|
CurDAG->getConstant(IPM.AddValue, DL, MVT::i32));
|
|
|
|
|
|
|
|
EVT VT = Node->getValueType(0);
|
|
|
|
if (VT == MVT::i32 && IPM.Bit == 31) {
|
|
|
|
unsigned ShiftOp = TrueOp->getSExtValue() == 1 ? ISD::SRL : ISD::SRA;
|
|
|
|
Result = CurDAG->getNode(ShiftOp, DL, MVT::i32, Result,
|
|
|
|
CurDAG->getConstant(IPM.Bit, DL, MVT::i32));
|
|
|
|
} else {
|
|
|
|
if (VT != MVT::i32)
|
|
|
|
Result = CurDAG->getNode(ISD::ANY_EXTEND, DL, VT, Result);
|
|
|
|
|
|
|
|
if (TrueOp->getSExtValue() == 1) {
|
|
|
|
// The SHR/AND sequence should get optimized to an RISBG.
|
|
|
|
Result = CurDAG->getNode(ISD::SRL, DL, VT, Result,
|
|
|
|
CurDAG->getConstant(IPM.Bit, DL, MVT::i32));
|
|
|
|
Result = CurDAG->getNode(ISD::AND, DL, VT, Result,
|
|
|
|
CurDAG->getConstant(1, DL, VT));
|
|
|
|
} else {
|
|
|
|
// Sign-extend from IPM.Bit using a pair of shifts.
|
|
|
|
int ShlAmt = VT.getSizeInBits() - 1 - IPM.Bit;
|
|
|
|
int SraAmt = VT.getSizeInBits() - 1;
|
|
|
|
Result = CurDAG->getNode(ISD::SHL, DL, VT, Result,
|
|
|
|
CurDAG->getConstant(ShlAmt, DL, MVT::i32));
|
|
|
|
Result = CurDAG->getNode(ISD::SRA, DL, VT, Result,
|
|
|
|
CurDAG->getConstant(SraAmt, DL, MVT::i32));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void SystemZDAGToDAGISel::PreprocessISelDAG() {
|
2018-01-20 04:56:04 +08:00
|
|
|
// If we have conditional immediate loads, we always prefer
|
|
|
|
// using those over an IPM sequence.
|
|
|
|
if (Subtarget->hasLoadStoreOnCond2())
|
|
|
|
return;
|
|
|
|
|
2018-01-20 04:52:04 +08:00
|
|
|
bool MadeChange = false;
|
|
|
|
|
|
|
|
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
|
|
|
|
E = CurDAG->allnodes_end();
|
|
|
|
I != E;) {
|
|
|
|
SDNode *N = &*I++;
|
|
|
|
if (N->use_empty())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
SDValue Res;
|
|
|
|
switch (N->getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case SystemZISD::SELECT_CCMASK:
|
|
|
|
Res = expandSelectBoolean(N);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Res) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "SystemZ DAG preprocessing replacing:\nOld: ");
|
|
|
|
LLVM_DEBUG(N->dump(CurDAG));
|
|
|
|
LLVM_DEBUG(dbgs() << "\nNew: ");
|
|
|
|
LLVM_DEBUG(Res.getNode()->dump(CurDAG));
|
|
|
|
LLVM_DEBUG(dbgs() << "\n");
|
2018-01-20 04:52:04 +08:00
|
|
|
|
|
|
|
CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
|
|
|
|
MadeChange = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MadeChange)
|
|
|
|
CurDAG->RemoveDeadNodes();
|
|
|
|
}
|