2012-02-18 20:03:15 +08:00
|
|
|
//===-- PPCSubtarget.h - Define Subtarget for the PPC ----------*- C++ -*--===//
|
2005-08-04 15:12:09 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-08-04 15:12:09 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2011-07-02 05:01:15 +08:00
|
|
|
// This file declares the PowerPC specific subclass of TargetSubtargetInfo.
|
2005-08-04 15:12:09 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-08-14 00:26:38 +08:00
|
|
|
#ifndef LLVM_LIB_TARGET_POWERPC_PPCSUBTARGET_H
|
|
|
|
#define LLVM_LIB_TARGET_POWERPC_PPCSUBTARGET_H
|
2005-08-04 15:12:09 +08:00
|
|
|
|
2014-06-13 04:54:11 +08:00
|
|
|
#include "PPCFrameLowering.h"
|
2014-06-13 06:50:10 +08:00
|
|
|
#include "PPCISelLowering.h"
|
2015-01-14 19:23:27 +08:00
|
|
|
#include "PPCInstrInfo.h"
|
2011-06-29 09:14:12 +08:00
|
|
|
#include "llvm/ADT/Triple.h"
|
2016-01-28 00:32:26 +08:00
|
|
|
#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
|
2014-06-13 05:08:06 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2012-12-04 15:12:27 +08:00
|
|
|
#include "llvm/MC/MCInstrItineraries.h"
|
|
|
|
#include "llvm/Target/TargetSubtargetInfo.h"
|
2005-09-02 05:38:21 +08:00
|
|
|
#include <string>
|
|
|
|
|
2011-07-02 04:45:01 +08:00
|
|
|
#define GET_SUBTARGETINFO_HEADER
|
2011-07-02 06:36:09 +08:00
|
|
|
#include "PPCGenSubtargetInfo.inc"
|
2011-07-02 04:45:01 +08:00
|
|
|
|
2007-01-19 12:36:02 +08:00
|
|
|
// GCC #defines PPC on Linux but we use it as our namespace name
|
|
|
|
#undef PPC
|
|
|
|
|
2005-08-04 15:12:09 +08:00
|
|
|
namespace llvm {
|
2011-07-07 15:07:08 +08:00
|
|
|
class StringRef;
|
2006-12-13 04:57:08 +08:00
|
|
|
|
|
|
|
namespace PPC {
|
|
|
|
// -m directive values.
|
|
|
|
enum {
|
2008-02-15 07:35:16 +08:00
|
|
|
DIR_NONE,
|
2006-12-13 04:57:08 +08:00
|
|
|
DIR_32,
|
2012-10-29 23:51:35 +08:00
|
|
|
DIR_440,
|
|
|
|
DIR_601,
|
|
|
|
DIR_602,
|
|
|
|
DIR_603,
|
2006-12-13 04:57:08 +08:00
|
|
|
DIR_7400,
|
2012-10-29 23:51:35 +08:00
|
|
|
DIR_750,
|
|
|
|
DIR_970,
|
2012-04-02 03:22:40 +08:00
|
|
|
DIR_A2,
|
2012-08-29 00:12:39 +08:00
|
|
|
DIR_E500mc,
|
|
|
|
DIR_E5500,
|
2013-02-02 06:59:51 +08:00
|
|
|
DIR_PWR3,
|
|
|
|
DIR_PWR4,
|
|
|
|
DIR_PWR5,
|
|
|
|
DIR_PWR5X,
|
2012-06-11 23:43:08 +08:00
|
|
|
DIR_PWR6,
|
2013-02-02 06:59:51 +08:00
|
|
|
DIR_PWR6X,
|
2012-06-11 23:43:08 +08:00
|
|
|
DIR_PWR7,
|
2014-06-26 21:36:19 +08:00
|
|
|
DIR_PWR8,
|
2016-05-10 02:54:58 +08:00
|
|
|
DIR_PWR9,
|
2012-10-29 23:51:35 +08:00
|
|
|
DIR_64
|
2006-12-13 04:57:08 +08:00
|
|
|
};
|
2015-06-23 17:49:53 +08:00
|
|
|
}
|
2006-12-13 04:57:08 +08:00
|
|
|
|
2006-12-12 07:22:45 +08:00
|
|
|
class GlobalValue;
|
|
|
|
class TargetMachine;
|
2012-10-29 23:51:35 +08:00
|
|
|
|
2011-07-02 04:45:01 +08:00
|
|
|
class PPCSubtarget : public PPCGenSubtargetInfo {
|
2016-03-29 09:36:01 +08:00
|
|
|
public:
|
|
|
|
enum POPCNTDKind {
|
|
|
|
POPCNTD_Unavailable,
|
|
|
|
POPCNTD_Slow,
|
|
|
|
POPCNTD_Fast
|
|
|
|
};
|
|
|
|
|
2005-08-04 15:12:09 +08:00
|
|
|
protected:
|
2014-08-09 12:38:56 +08:00
|
|
|
/// TargetTriple - What processor and OS we're targeting.
|
|
|
|
Triple TargetTriple;
|
|
|
|
|
2005-08-04 15:12:09 +08:00
|
|
|
/// stackAlignment - The minimum alignment known to hold of the stack frame on
|
|
|
|
/// entry to the function and which must be maintained by every function.
|
2005-08-06 06:05:03 +08:00
|
|
|
unsigned StackAlignment;
|
2012-10-29 23:51:35 +08:00
|
|
|
|
2005-11-02 04:06:59 +08:00
|
|
|
/// Selected instruction itineraries (one entry per itinerary class.)
|
|
|
|
InstrItineraryData InstrItins;
|
2012-10-29 23:51:35 +08:00
|
|
|
|
2006-12-13 04:57:08 +08:00
|
|
|
/// Which cpu directive was used.
|
|
|
|
unsigned DarwinDirective;
|
2005-08-04 15:12:09 +08:00
|
|
|
|
|
|
|
/// Used by the ISel to turn in optimizations for POWER4-derived architectures
|
2012-06-12 03:57:01 +08:00
|
|
|
bool HasMFOCRF;
|
2006-06-17 01:34:12 +08:00
|
|
|
bool Has64BitSupport;
|
|
|
|
bool Use64BitRegs;
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
bool UseCRBits;
|
[PowerPC] Refactor soft-float support, and enable PPC64 soft float
This change enables soft-float for PowerPC64, and also makes soft-float disable
all vector instruction sets for both 32-bit and 64-bit modes. This latter part
is necessary because the PPC backend canonicalizes many Altivec vector types to
floating-point types, and so soft-float breaks scalarization support for many
operations. Both for embedded targets and for operating-system kernels desiring
soft-float support, it seems reasonable that disabling hardware floating-point
also disables vector instructions (embedded targets without hardware floating
point support are unlikely to have Altivec, etc. and operating system kernels
desiring not to use floating-point registers to lower syscall cost are unlikely
to want to use vector registers either). If someone needs this to work, we'll
need to change the fact that we promote many Altivec operations to act on
v4f32. To make it possible to disable Altivec when soft-float is enabled,
hardware floating-point support needs to be expressed as a positive feature,
like the others, and not a negative feature, because target features cannot
have dependencies on the disabling of some other feature. So +soft-float has
now become -hard-float.
Fixes PR26970.
llvm-svn: 283060
2016-10-02 10:10:20 +08:00
|
|
|
bool HasHardFloat;
|
2006-06-17 01:50:12 +08:00
|
|
|
bool IsPPC64;
|
2005-10-27 01:30:34 +08:00
|
|
|
bool HasAltivec;
|
2014-08-07 20:18:21 +08:00
|
|
|
bool HasSPE;
|
2013-01-31 05:17:42 +08:00
|
|
|
bool HasQPX;
|
2013-10-17 04:38:58 +08:00
|
|
|
bool HasVSX;
|
2014-10-11 01:21:15 +08:00
|
|
|
bool HasP8Vector;
|
2015-02-04 05:58:23 +08:00
|
|
|
bool HasP8Altivec;
|
2015-03-05 04:44:33 +08:00
|
|
|
bool HasP8Crypto;
|
2016-02-27 05:11:55 +08:00
|
|
|
bool HasP9Vector;
|
|
|
|
bool HasP9Altivec;
|
2013-08-19 13:01:02 +08:00
|
|
|
bool HasFCPSGN;
|
2005-09-03 02:33:05 +08:00
|
|
|
bool HasFSQRT;
|
2013-04-03 12:01:11 +08:00
|
|
|
bool HasFRE, HasFRES, HasFRSQRTE, HasFRSQRTES;
|
|
|
|
bool HasRecipPrec;
|
2006-02-28 15:08:22 +08:00
|
|
|
bool HasSTFIWX;
|
2013-03-31 18:12:51 +08:00
|
|
|
bool HasLFIWAX;
|
2013-03-29 16:57:48 +08:00
|
|
|
bool HasFPRND;
|
2013-04-02 01:52:07 +08:00
|
|
|
bool HasFPCVT;
|
2012-06-23 07:10:08 +08:00
|
|
|
bool HasISEL;
|
2015-04-10 07:54:37 +08:00
|
|
|
bool HasBPERMD;
|
|
|
|
bool HasExtDiv;
|
2015-01-03 09:16:37 +08:00
|
|
|
bool HasCMPB;
|
2013-03-29 03:25:55 +08:00
|
|
|
bool HasLDBRX;
|
2011-10-17 12:03:49 +08:00
|
|
|
bool IsBookE;
|
2014-10-03 06:34:22 +08:00
|
|
|
bool HasOnlyMSYNC;
|
2014-08-04 23:47:38 +08:00
|
|
|
bool IsE500;
|
|
|
|
bool IsPPC4xx;
|
2014-08-05 01:07:41 +08:00
|
|
|
bool IsPPC6xx;
|
2015-06-17 00:01:15 +08:00
|
|
|
bool FeatureMFTB;
|
2013-09-12 22:40:06 +08:00
|
|
|
bool DeprecatedDST;
|
2006-12-12 07:22:45 +08:00
|
|
|
bool HasLazyResolverStubs;
|
2013-07-26 09:35:43 +08:00
|
|
|
bool IsLittleEndian;
|
2015-01-15 04:17:10 +08:00
|
|
|
bool HasICBT;
|
[PowerPC] Loosen ELFv1 PPC64 func descriptor loads for indirect calls
Function pointers under PPC64 ELFv1 (which is used on PPC64/Linux on the
POWER7, A2 and earlier cores) are really pointers to a function descriptor, a
structure with three pointers: the actual pointer to the code to which to jump,
the pointer to the TOC needed by the callee, and an environment pointer. We
used to chain these loads, and make them opaque to the rest of the optimizer,
so that they'd always occur directly before the call. This is not necessary,
and in fact, highly suboptimal on embedded cores. Once the function pointer is
known, the loads can be performed ahead of time; in fact, they can be hoisted
out of loops.
Now these function descriptors are almost always generated by the linker, and
thus the contents of the descriptors are invariant. As a result, by default,
we'll mark the associated loads as invariant (allowing them to be hoisted out
of loops). I've added a target feature to turn this off, however, just in case
someone needs that option (constructing an on-stack descriptor, casting it to a
function pointer, and then calling it cannot be well-defined C/C++ code, but I
can imagine some JIT-compilation system doing so).
Consider this simple test:
$ cat call.c
typedef void (*fp)();
void bar(fp x) {
for (int i = 0; i < 1600000000; ++i)
x();
}
$ cat main.c
typedef void (*fp)();
void bar(fp x);
void foo() {}
int main() {
bar(foo);
}
On the PPC A2 (the BG/Q supercomputer), marking the function-descriptor loads
as invariant brings the execution time down to ~8 seconds from ~32 seconds with
the loads in the loop.
The difference on the POWER7 is smaller. Compiling with:
gcc -std=c99 -O3 -mcpu=native call.c main.c : ~6 seconds [this is 4.8.2]
clang -O3 -mcpu=native call.c main.c : ~5.3 seconds
clang -O3 -mcpu=native call.c main.c -mno-invariant-function-descriptors : ~4 seconds
(looks like we'd benefit from additional loop unrolling here, as a first
guess, because this is faster with the extra loads)
The -mno-invariant-function-descriptors will be added to Clang shortly.
llvm-svn: 226207
2015-01-16 05:17:34 +08:00
|
|
|
bool HasInvariantFunctionDescriptors;
|
2015-03-11 04:51:07 +08:00
|
|
|
bool HasPartwordAtomics;
|
2015-04-11 18:40:42 +08:00
|
|
|
bool HasDirectMove;
|
2015-03-26 03:36:23 +08:00
|
|
|
bool HasHTM;
|
2015-11-21 06:38:20 +08:00
|
|
|
bool HasFusion;
|
2015-12-15 20:19:34 +08:00
|
|
|
bool HasFloat128;
|
2016-03-31 23:26:37 +08:00
|
|
|
bool IsISA3_0;
|
2016-08-30 08:59:23 +08:00
|
|
|
bool UseLongCalls;
|
2016-03-29 09:36:01 +08:00
|
|
|
|
|
|
|
POPCNTDKind HasPOPCNTD;
|
2012-10-29 23:51:35 +08:00
|
|
|
|
[PowerPC] Add support for the QPX vector instruction set
This adds support for the QPX vector instruction set, which is used by the
enhanced A2 cores on the IBM BG/Q supercomputers. QPX vectors are 256 bytes
wide, holding 4 double-precision floating-point values. Boolean values, modeled
here as <4 x i1> are actually also represented as floating-point values
(essentially { -1, 1 } for { false, true }). QPX shares many features with
Altivec and VSX, but is distinct from both of them. One major difference is
that, instead of adding completely-separate vector registers, QPX vector
registers are extensions of the scalar floating-point registers (lane 0 is the
corresponding scalar floating-point value). The operations supported on QPX
vectors mirrors that supported on the scalar floating-point values (with some
additional ones for permutations and logical/comparison operations).
I've been maintaining this support out-of-tree, as part of the bgclang project,
for several years. This is not the entire bgclang patch set, but is most of the
subset that can be cleanly integrated into LLVM proper at this time. Adding
this to the LLVM backend is part of my efforts to rebase bgclang to the current
LLVM trunk, but is independently useful (especially for codes that use LLVM as
a JIT in library form).
The assembler/disassembler test coverage is complete. The CodeGen test coverage
is not, but I've included some tests, and more will be added as follow-up work.
llvm-svn: 230413
2015-02-25 09:06:45 +08:00
|
|
|
/// When targeting QPX running a stock PPC64 Linux kernel where the stack
|
|
|
|
/// alignment has not been changed, we need to keep the 16-byte alignment
|
|
|
|
/// of the stack.
|
|
|
|
bool IsQPXStackUnaligned;
|
|
|
|
|
2015-02-14 06:48:51 +08:00
|
|
|
const PPCTargetMachine &TM;
|
2014-06-13 06:28:06 +08:00
|
|
|
PPCFrameLowering FrameLowering;
|
2014-06-13 06:05:46 +08:00
|
|
|
PPCInstrInfo InstrInfo;
|
2014-06-13 06:50:10 +08:00
|
|
|
PPCTargetLowering TLInfo;
|
2016-01-28 00:32:26 +08:00
|
|
|
SelectionDAGTargetInfo TSInfo;
|
2014-06-13 05:08:06 +08:00
|
|
|
|
2005-08-04 15:12:09 +08:00
|
|
|
public:
|
|
|
|
/// This constructor initializes the data members to match that
|
2009-08-03 06:11:08 +08:00
|
|
|
/// of the specified triple.
|
2005-08-04 15:12:09 +08:00
|
|
|
///
|
2015-06-10 20:11:26 +08:00
|
|
|
PPCSubtarget(const Triple &TT, const std::string &CPU, const std::string &FS,
|
|
|
|
const PPCTargetMachine &TM);
|
2012-10-29 23:51:35 +08:00
|
|
|
|
|
|
|
/// ParseSubtargetFeatures - Parses features string setting specified
|
2005-10-27 02:07:50 +08:00
|
|
|
/// subtarget options. Definition of function is auto generated by tblgen.
|
2011-07-07 15:07:08 +08:00
|
|
|
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
|
2012-10-29 23:51:35 +08:00
|
|
|
|
2005-08-04 15:12:09 +08:00
|
|
|
/// getStackAlignment - Returns the minimum alignment known to hold of the
|
|
|
|
/// stack frame on entry to the function and which must be maintained by every
|
|
|
|
/// function for this subtarget.
|
2005-08-06 06:05:03 +08:00
|
|
|
unsigned getStackAlignment() const { return StackAlignment; }
|
2012-10-29 23:51:35 +08:00
|
|
|
|
2006-12-13 04:57:08 +08:00
|
|
|
/// getDarwinDirective - Returns the -m directive specified for the cpu.
|
|
|
|
///
|
|
|
|
unsigned getDarwinDirective() const { return DarwinDirective; }
|
2012-10-29 23:51:35 +08:00
|
|
|
|
2014-06-14 06:38:48 +08:00
|
|
|
/// getInstrItins - Return the instruction itineraries based on subtarget
|
2005-11-02 04:06:59 +08:00
|
|
|
/// selection.
|
2014-08-05 05:25:23 +08:00
|
|
|
const InstrItineraryData *getInstrItineraryData() const override {
|
|
|
|
return &InstrItins;
|
|
|
|
}
|
|
|
|
|
|
|
|
const PPCFrameLowering *getFrameLowering() const override {
|
|
|
|
return &FrameLowering;
|
|
|
|
}
|
|
|
|
const PPCInstrInfo *getInstrInfo() const override { return &InstrInfo; }
|
|
|
|
const PPCTargetLowering *getTargetLowering() const override {
|
|
|
|
return &TLInfo;
|
|
|
|
}
|
2016-01-28 00:32:26 +08:00
|
|
|
const SelectionDAGTargetInfo *getSelectionDAGInfo() const override {
|
2014-08-05 05:25:23 +08:00
|
|
|
return &TSInfo;
|
|
|
|
}
|
|
|
|
const PPCRegisterInfo *getRegisterInfo() const override {
|
|
|
|
return &getInstrInfo()->getRegisterInfo();
|
|
|
|
}
|
2015-02-14 06:23:04 +08:00
|
|
|
const PPCTargetMachine &getTargetMachine() const { return TM; }
|
2014-06-13 04:54:11 +08:00
|
|
|
|
|
|
|
/// initializeSubtargetDependencies - Initializes using a CPU and feature string
|
|
|
|
/// so that we can use initializer lists for subtarget initialization.
|
|
|
|
PPCSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
|
|
|
|
|
2013-07-16 06:29:40 +08:00
|
|
|
private:
|
|
|
|
void initializeEnvironment();
|
2014-09-04 04:36:31 +08:00
|
|
|
void initSubtargetFeatures(StringRef CPU, StringRef FS);
|
2013-07-16 06:29:40 +08:00
|
|
|
|
|
|
|
public:
|
2006-06-17 01:50:12 +08:00
|
|
|
/// isPPC64 - Return true if we are generating code for 64-bit pointer mode.
|
|
|
|
///
|
2015-02-17 14:45:15 +08:00
|
|
|
bool isPPC64() const;
|
2012-10-29 23:51:35 +08:00
|
|
|
|
2006-06-17 01:50:12 +08:00
|
|
|
/// has64BitSupport - Return true if the selected CPU supports 64-bit
|
|
|
|
/// instructions, regardless of whether we are in 32-bit or 64-bit mode.
|
|
|
|
bool has64BitSupport() const { return Has64BitSupport; }
|
2015-12-15 01:57:33 +08:00
|
|
|
// useSoftFloat - Return true if soft-float option is turned on.
|
[PowerPC] Refactor soft-float support, and enable PPC64 soft float
This change enables soft-float for PowerPC64, and also makes soft-float disable
all vector instruction sets for both 32-bit and 64-bit modes. This latter part
is necessary because the PPC backend canonicalizes many Altivec vector types to
floating-point types, and so soft-float breaks scalarization support for many
operations. Both for embedded targets and for operating-system kernels desiring
soft-float support, it seems reasonable that disabling hardware floating-point
also disables vector instructions (embedded targets without hardware floating
point support are unlikely to have Altivec, etc. and operating system kernels
desiring not to use floating-point registers to lower syscall cost are unlikely
to want to use vector registers either). If someone needs this to work, we'll
need to change the fact that we promote many Altivec operations to act on
v4f32. To make it possible to disable Altivec when soft-float is enabled,
hardware floating-point support needs to be expressed as a positive feature,
like the others, and not a negative feature, because target features cannot
have dependencies on the disabling of some other feature. So +soft-float has
now become -hard-float.
Fixes PR26970.
llvm-svn: 283060
2016-10-02 10:10:20 +08:00
|
|
|
bool useSoftFloat() const { return !HasHardFloat; }
|
2012-10-29 23:51:35 +08:00
|
|
|
|
2006-06-17 01:50:12 +08:00
|
|
|
/// use64BitRegs - Return true if in 64-bit mode or if we should use 64-bit
|
|
|
|
/// registers in 32-bit mode when possible. This can only true if
|
|
|
|
/// has64BitSupport() returns true.
|
|
|
|
bool use64BitRegs() const { return Use64BitRegs; }
|
2012-10-29 23:51:35 +08:00
|
|
|
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
/// useCRBits - Return true if we should store and manipulate i1 values in
|
|
|
|
/// the individual condition register bits.
|
|
|
|
bool useCRBits() const { return UseCRBits; }
|
|
|
|
|
2006-12-12 07:22:45 +08:00
|
|
|
/// hasLazyResolverStub - Return true if accesses to the specified global have
|
|
|
|
/// to go through a dyld lazy resolution stub. This means that an extra load
|
|
|
|
/// is required to get the address of the global.
|
2015-02-14 06:23:04 +08:00
|
|
|
bool hasLazyResolverStub(const GlobalValue *GV) const;
|
2012-10-29 23:51:35 +08:00
|
|
|
|
2013-07-26 09:35:43 +08:00
|
|
|
// isLittleEndian - True if generating little-endian code
|
|
|
|
bool isLittleEndian() const { return IsLittleEndian; }
|
|
|
|
|
2006-06-17 01:50:12 +08:00
|
|
|
// Specific obvious features.
|
2013-08-19 13:01:02 +08:00
|
|
|
bool hasFCPSGN() const { return HasFCPSGN; }
|
2005-09-03 02:33:05 +08:00
|
|
|
bool hasFSQRT() const { return HasFSQRT; }
|
2013-04-03 12:01:11 +08:00
|
|
|
bool hasFRE() const { return HasFRE; }
|
|
|
|
bool hasFRES() const { return HasFRES; }
|
|
|
|
bool hasFRSQRTE() const { return HasFRSQRTE; }
|
|
|
|
bool hasFRSQRTES() const { return HasFRSQRTES; }
|
|
|
|
bool hasRecipPrec() const { return HasRecipPrec; }
|
2006-02-28 15:08:22 +08:00
|
|
|
bool hasSTFIWX() const { return HasSTFIWX; }
|
2013-03-31 18:12:51 +08:00
|
|
|
bool hasLFIWAX() const { return HasLFIWAX; }
|
2013-03-29 16:57:48 +08:00
|
|
|
bool hasFPRND() const { return HasFPRND; }
|
2013-04-02 01:52:07 +08:00
|
|
|
bool hasFPCVT() const { return HasFPCVT; }
|
2005-10-27 01:30:34 +08:00
|
|
|
bool hasAltivec() const { return HasAltivec; }
|
2014-08-07 20:18:21 +08:00
|
|
|
bool hasSPE() const { return HasSPE; }
|
2013-01-31 05:17:42 +08:00
|
|
|
bool hasQPX() const { return HasQPX; }
|
[PowerPC] Initial support for the VSX instruction set
VSX is an ISA extension supported on the POWER7 and later cores that enhances
floating-point vector and scalar capabilities. Among other things, this adds
<2 x double> support and generally helps to reduce register pressure.
The interesting part of this ISA feature is the register configuration: there
are 64 new 128-bit vector registers, the 32 of which are super-registers of the
existing 32 scalar floating-point registers, and the second 32 of which overlap
with the 32 Altivec vector registers. This makes things like vector insertion
and extraction tricky: this can be free but only if we force a restriction to
the right register subclass when needed. A new "minipass" PPCVSXCopy takes care
of this (although it could do a more-optimal job of it; see the comment about
unnecessary copies below).
Please note that, currently, VSX is not enabled by default when targeting
anything because it is not yet ready for that. The assembler and disassembler
are fully implemented and tested. However:
- CodeGen support causes miscompiles; test-suite runtime failures:
MultiSource/Benchmarks/FreeBench/distray/distray
MultiSource/Benchmarks/McCat/08-main/main
MultiSource/Benchmarks/Olden/voronoi/voronoi
MultiSource/Benchmarks/mafft/pairlocalalign
MultiSource/Benchmarks/tramp3d-v4/tramp3d-v4
SingleSource/Benchmarks/CoyoteBench/almabench
SingleSource/Benchmarks/Misc/matmul_f64_4x4
- The lowering currently falls back to using Altivec instructions far more
than it should. Worse, there are some things that are scalarized through the
stack that shouldn't be.
- A lot of unnecessary copies make it past the optimizers, and this needs to
be fixed.
- Many more regression tests are needed.
Normally, I'd fix these things prior to committing, but there are some
students and other contributors who would like to work this, and so it makes
sense to move this development process upstream where it can be subject to the
regular code-review procedures.
llvm-svn: 203768
2014-03-13 15:58:58 +08:00
|
|
|
bool hasVSX() const { return HasVSX; }
|
2014-10-11 01:21:15 +08:00
|
|
|
bool hasP8Vector() const { return HasP8Vector; }
|
2015-02-04 05:58:23 +08:00
|
|
|
bool hasP8Altivec() const { return HasP8Altivec; }
|
2015-03-05 04:44:33 +08:00
|
|
|
bool hasP8Crypto() const { return HasP8Crypto; }
|
2016-02-27 05:11:55 +08:00
|
|
|
bool hasP9Vector() const { return HasP9Vector; }
|
|
|
|
bool hasP9Altivec() const { return HasP9Altivec; }
|
2012-06-12 03:57:01 +08:00
|
|
|
bool hasMFOCRF() const { return HasMFOCRF; }
|
2012-06-23 07:10:08 +08:00
|
|
|
bool hasISEL() const { return HasISEL; }
|
2015-04-10 07:54:37 +08:00
|
|
|
bool hasBPERMD() const { return HasBPERMD; }
|
|
|
|
bool hasExtDiv() const { return HasExtDiv; }
|
2015-01-03 09:16:37 +08:00
|
|
|
bool hasCMPB() const { return HasCMPB; }
|
2013-03-29 03:25:55 +08:00
|
|
|
bool hasLDBRX() const { return HasLDBRX; }
|
2011-10-17 12:03:49 +08:00
|
|
|
bool isBookE() const { return IsBookE; }
|
2014-10-03 06:34:22 +08:00
|
|
|
bool hasOnlyMSYNC() const { return HasOnlyMSYNC; }
|
2014-08-04 23:47:38 +08:00
|
|
|
bool isPPC4xx() const { return IsPPC4xx; }
|
2014-08-05 01:07:41 +08:00
|
|
|
bool isPPC6xx() const { return IsPPC6xx; }
|
2014-08-04 23:47:38 +08:00
|
|
|
bool isE500() const { return IsE500; }
|
2015-06-17 00:01:15 +08:00
|
|
|
bool isFeatureMFTB() const { return FeatureMFTB; }
|
2013-09-12 22:40:06 +08:00
|
|
|
bool isDeprecatedDST() const { return DeprecatedDST; }
|
2015-01-15 04:17:10 +08:00
|
|
|
bool hasICBT() const { return HasICBT; }
|
[PowerPC] Loosen ELFv1 PPC64 func descriptor loads for indirect calls
Function pointers under PPC64 ELFv1 (which is used on PPC64/Linux on the
POWER7, A2 and earlier cores) are really pointers to a function descriptor, a
structure with three pointers: the actual pointer to the code to which to jump,
the pointer to the TOC needed by the callee, and an environment pointer. We
used to chain these loads, and make them opaque to the rest of the optimizer,
so that they'd always occur directly before the call. This is not necessary,
and in fact, highly suboptimal on embedded cores. Once the function pointer is
known, the loads can be performed ahead of time; in fact, they can be hoisted
out of loops.
Now these function descriptors are almost always generated by the linker, and
thus the contents of the descriptors are invariant. As a result, by default,
we'll mark the associated loads as invariant (allowing them to be hoisted out
of loops). I've added a target feature to turn this off, however, just in case
someone needs that option (constructing an on-stack descriptor, casting it to a
function pointer, and then calling it cannot be well-defined C/C++ code, but I
can imagine some JIT-compilation system doing so).
Consider this simple test:
$ cat call.c
typedef void (*fp)();
void bar(fp x) {
for (int i = 0; i < 1600000000; ++i)
x();
}
$ cat main.c
typedef void (*fp)();
void bar(fp x);
void foo() {}
int main() {
bar(foo);
}
On the PPC A2 (the BG/Q supercomputer), marking the function-descriptor loads
as invariant brings the execution time down to ~8 seconds from ~32 seconds with
the loads in the loop.
The difference on the POWER7 is smaller. Compiling with:
gcc -std=c99 -O3 -mcpu=native call.c main.c : ~6 seconds [this is 4.8.2]
clang -O3 -mcpu=native call.c main.c : ~5.3 seconds
clang -O3 -mcpu=native call.c main.c -mno-invariant-function-descriptors : ~4 seconds
(looks like we'd benefit from additional loop unrolling here, as a first
guess, because this is faster with the extra loads)
The -mno-invariant-function-descriptors will be added to Clang shortly.
llvm-svn: 226207
2015-01-16 05:17:34 +08:00
|
|
|
bool hasInvariantFunctionDescriptors() const {
|
|
|
|
return HasInvariantFunctionDescriptors;
|
|
|
|
}
|
2015-03-11 04:51:07 +08:00
|
|
|
bool hasPartwordAtomics() const { return HasPartwordAtomics; }
|
2015-04-11 18:40:42 +08:00
|
|
|
bool hasDirectMove() const { return HasDirectMove; }
|
2007-01-16 17:29:17 +08:00
|
|
|
|
[PowerPC] Add support for the QPX vector instruction set
This adds support for the QPX vector instruction set, which is used by the
enhanced A2 cores on the IBM BG/Q supercomputers. QPX vectors are 256 bytes
wide, holding 4 double-precision floating-point values. Boolean values, modeled
here as <4 x i1> are actually also represented as floating-point values
(essentially { -1, 1 } for { false, true }). QPX shares many features with
Altivec and VSX, but is distinct from both of them. One major difference is
that, instead of adding completely-separate vector registers, QPX vector
registers are extensions of the scalar floating-point registers (lane 0 is the
corresponding scalar floating-point value). The operations supported on QPX
vectors mirrors that supported on the scalar floating-point values (with some
additional ones for permutations and logical/comparison operations).
I've been maintaining this support out-of-tree, as part of the bgclang project,
for several years. This is not the entire bgclang patch set, but is most of the
subset that can be cleanly integrated into LLVM proper at this time. Adding
this to the LLVM backend is part of my efforts to rebase bgclang to the current
LLVM trunk, but is independently useful (especially for codes that use LLVM as
a JIT in library form).
The assembler/disassembler test coverage is complete. The CodeGen test coverage
is not, but I've included some tests, and more will be added as follow-up work.
llvm-svn: 230413
2015-02-25 09:06:45 +08:00
|
|
|
bool isQPXStackUnaligned() const { return IsQPXStackUnaligned; }
|
|
|
|
unsigned getPlatformStackAlignment() const {
|
|
|
|
if ((hasQPX() || isBGQ()) && !isQPXStackUnaligned())
|
|
|
|
return 32;
|
|
|
|
|
|
|
|
return 16;
|
|
|
|
}
|
2017-07-12 00:42:20 +08:00
|
|
|
|
|
|
|
// DarwinABI has a 224-byte red zone. PPC32 SVR4ABI(Non-DarwinABI) has no
|
|
|
|
// red zone and PPC64 SVR4ABI has a 288-byte red zone.
|
|
|
|
unsigned getRedZoneSize() const {
|
|
|
|
return isDarwinABI() ? 224 : (isPPC64() ? 288 : 0);
|
|
|
|
}
|
|
|
|
|
2015-03-26 03:36:23 +08:00
|
|
|
bool hasHTM() const { return HasHTM; }
|
2015-11-21 06:38:20 +08:00
|
|
|
bool hasFusion() const { return HasFusion; }
|
2015-12-15 20:19:34 +08:00
|
|
|
bool hasFloat128() const { return HasFloat128; }
|
2016-03-31 23:26:37 +08:00
|
|
|
bool isISA3_0() const { return IsISA3_0; }
|
2016-08-30 08:59:23 +08:00
|
|
|
bool useLongCalls() const { return UseLongCalls; }
|
2016-09-22 17:52:19 +08:00
|
|
|
bool needsSwapsForVSXMemOps() const {
|
|
|
|
return hasVSX() && isLittleEndian() && !hasP9Vector();
|
|
|
|
}
|
[PowerPC] Add support for the QPX vector instruction set
This adds support for the QPX vector instruction set, which is used by the
enhanced A2 cores on the IBM BG/Q supercomputers. QPX vectors are 256 bytes
wide, holding 4 double-precision floating-point values. Boolean values, modeled
here as <4 x i1> are actually also represented as floating-point values
(essentially { -1, 1 } for { false, true }). QPX shares many features with
Altivec and VSX, but is distinct from both of them. One major difference is
that, instead of adding completely-separate vector registers, QPX vector
registers are extensions of the scalar floating-point registers (lane 0 is the
corresponding scalar floating-point value). The operations supported on QPX
vectors mirrors that supported on the scalar floating-point values (with some
additional ones for permutations and logical/comparison operations).
I've been maintaining this support out-of-tree, as part of the bgclang project,
for several years. This is not the entire bgclang patch set, but is most of the
subset that can be cleanly integrated into LLVM proper at this time. Adding
this to the LLVM backend is part of my efforts to rebase bgclang to the current
LLVM trunk, but is independently useful (especially for codes that use LLVM as
a JIT in library form).
The assembler/disassembler test coverage is complete. The CodeGen test coverage
is not, but I've included some tests, and more will be added as follow-up work.
llvm-svn: 230413
2015-02-25 09:06:45 +08:00
|
|
|
|
2016-03-29 09:36:01 +08:00
|
|
|
POPCNTDKind hasPOPCNTD() const { return HasPOPCNTD; }
|
|
|
|
|
2011-04-20 04:54:28 +08:00
|
|
|
const Triple &getTargetTriple() const { return TargetTriple; }
|
|
|
|
|
2008-01-03 03:35:16 +08:00
|
|
|
/// isDarwin - True if this is any darwin platform.
|
2011-04-20 08:14:25 +08:00
|
|
|
bool isDarwin() const { return TargetTriple.isMacOSX(); }
|
2013-01-29 08:22:47 +08:00
|
|
|
/// isBGQ - True if this is a BG/Q platform.
|
|
|
|
bool isBGQ() const { return TargetTriple.getVendor() == Triple::BGQ; }
|
2008-12-19 18:55:56 +08:00
|
|
|
|
2014-07-19 07:29:49 +08:00
|
|
|
bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
|
|
|
|
bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
|
2016-04-20 04:14:52 +08:00
|
|
|
bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
|
2014-07-19 07:29:49 +08:00
|
|
|
|
2015-02-17 14:45:15 +08:00
|
|
|
bool isDarwinABI() const { return isTargetMachO() || isDarwin(); }
|
|
|
|
bool isSVR4ABI() const { return !isDarwinABI(); }
|
|
|
|
bool isELFv2ABI() const;
|
2009-08-15 19:54:46 +08:00
|
|
|
|
2017-01-17 04:12:26 +08:00
|
|
|
/// Originally, this function return hasISEL(). Now we always enable it,
|
|
|
|
/// but may expand the ISEL instruction later.
|
|
|
|
bool enableEarlyIfConversion() const override { return true; }
|
2014-05-22 07:40:26 +08:00
|
|
|
|
2013-09-12 07:05:25 +08:00
|
|
|
// Scheduling customization.
|
2014-04-29 15:57:37 +08:00
|
|
|
bool enableMachineScheduler() const override;
|
2014-07-16 06:39:58 +08:00
|
|
|
// This overrides the PostRAScheduler bit in the SchedModel for each CPU.
|
2015-06-13 11:42:16 +08:00
|
|
|
bool enablePostRAScheduler() const override;
|
2014-07-16 06:39:58 +08:00
|
|
|
AntiDepBreakMode getAntiDepBreakMode() const override;
|
|
|
|
void getCriticalPathRCs(RegClassVector &CriticalPathRCs) const override;
|
|
|
|
|
2013-09-12 07:05:25 +08:00
|
|
|
void overrideSchedPolicy(MachineSchedPolicy &Policy,
|
2014-04-29 15:57:37 +08:00
|
|
|
unsigned NumRegionInstrs) const override;
|
|
|
|
bool useAA() const override;
|
2015-01-09 10:03:11 +08:00
|
|
|
|
|
|
|
bool enableSubRegLiveness() const override;
|
2015-11-21 04:51:31 +08:00
|
|
|
|
|
|
|
/// classifyGlobalReference - Classify a global variable reference for the
|
|
|
|
/// current subtarget accourding to how we should reference it.
|
|
|
|
unsigned char classifyGlobalReference(const GlobalValue *GV) const;
|
[XRay] Implement powerpc64le xray.
Summary:
powerpc64 big-endian is not supported, but I believe that most logic can
be shared, except for xray_powerpc64.cc.
Also add a function InvalidateInstructionCache to xray_util.h, which is
copied from llvm/Support/Memory.cpp. I'm not sure if I need to add a unittest,
and I don't know how.
Reviewers: dberris, echristo, iteratee, kbarton, hfinkel
Subscribers: mehdi_amini, nemanjai, mgorny, llvm-commits
Differential Revision: https://reviews.llvm.org/D29742
llvm-svn: 294781
2017-02-11 05:03:24 +08:00
|
|
|
|
|
|
|
bool isXRaySupported() const override { return IsPPC64 && IsLittleEndian; }
|
2005-08-04 15:12:09 +08:00
|
|
|
};
|
2015-06-23 17:49:53 +08:00
|
|
|
} // End llvm namespace
|
2005-08-04 15:12:09 +08:00
|
|
|
|
|
|
|
#endif
|