llvm-project/llvm/lib/Target/PowerPC/PPCSubtarget.h

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

437 lines
14 KiB
C
Raw Normal View History

//===-- PPCSubtarget.h - Define Subtarget for the PPC ----------*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the PowerPC specific subclass of TargetSubtargetInfo.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_POWERPC_PPCSUBTARGET_H
#define LLVM_LIB_TARGET_POWERPC_PPCSUBTARGET_H
#include "PPCFrameLowering.h"
#include "PPCISelLowering.h"
#include "PPCInstrInfo.h"
#include "llvm/ADT/Triple.h"
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCInstrItineraries.h"
#include <string>
#define GET_SUBTARGETINFO_HEADER
#include "PPCGenSubtargetInfo.inc"
// GCC #defines PPC on Linux but we use it as our namespace name
#undef PPC
namespace llvm {
class StringRef;
namespace PPC {
// -m directive values.
enum {
DIR_NONE,
DIR_32,
DIR_440,
DIR_601,
DIR_602,
DIR_603,
DIR_7400,
DIR_750,
DIR_970,
DIR_A2,
DIR_E500,
DIR_E500mc,
DIR_E5500,
DIR_PWR3,
DIR_PWR4,
DIR_PWR5,
DIR_PWR5X,
DIR_PWR6,
DIR_PWR6X,
DIR_PWR7,
DIR_PWR8,
DIR_PWR9,
DIR_PWR10,
DIR_PWR_FUTURE,
DIR_64
};
}
class GlobalValue;
class PPCSubtarget : public PPCGenSubtargetInfo {
public:
enum POPCNTDKind {
POPCNTD_Unavailable,
POPCNTD_Slow,
POPCNTD_Fast
};
protected:
/// TargetTriple - What processor and OS we're targeting.
Triple TargetTriple;
/// stackAlignment - The minimum alignment known to hold of the stack frame on
/// entry to the function and which must be maintained by every function.
Align StackAlignment;
/// Selected instruction itineraries (one entry per itinerary class.)
InstrItineraryData InstrItins;
/// Which cpu directive was used.
unsigned CPUDirective;
/// Used by the ISel to turn in optimizations for POWER4-derived architectures
bool HasMFOCRF;
bool Has64BitSupport;
bool Use64BitRegs;
Add CR-bit tracking to the PowerPC backend for i1 values This change enables tracking i1 values in the PowerPC backend using the condition register bits. These bits can be treated on PowerPC as separate registers; individual bit operations (and, or, xor, etc.) are supported. Tracking booleans in CR bits has several advantages: - Reduction in register pressure (because we no longer need GPRs to store boolean values). - Logical operations on booleans can be handled more efficiently; we used to have to move all results from comparisons into GPRs, perform promoted logical operations in GPRs, and then move the result back into condition register bits to be used by conditional branches. This can be very inefficient, because the throughput of these CR <-> GPR moves have high latency and low throughput (especially when other associated instructions are accounted for). - On the POWER7 and similar cores, we can increase total throughput by using the CR bits. CR bit operations have a dedicated functional unit. Most of this is more-or-less mechanical: Adjustments were needed in the calling-convention code, support was added for spilling/restoring individual condition-register bits, and conditional branch instruction definitions taking specific CR bits were added (plus patterns and code for generating bit-level operations). This is enabled by default when running at -O2 and higher. For -O0 and -O1, where the ability to debug is more important, this feature is disabled by default. Individual CR bits do not have assigned DWARF register numbers, and storing values in CR bits makes them invisible to the debugger. It is critical, however, that we don't move i1 values that have been promoted to larger values (such as those passed as function arguments) into bit registers only to quickly turn around and move the values back into GPRs (such as happens when values are returned by functions). A pair of target-specific DAG combines are added to remove the trunc/extends in: trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) and: zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) In short, we only want to use CR bits where some of the i1 values come from comparisons or are used by conditional branches or selects. To put it another way, if we can do the entire i1 computation in GPRs, then we probably should (on the POWER7, the GPR-operation throughput is higher, and for all cores, the CR <-> GPR moves are expensive). POWER7 test-suite performance results (from 10 runs in each configuration): SingleSource/Benchmarks/Misc/mandel-2: 35% speedup MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown MultiSource/Applications/lemon/lemon: 8% slowdown llvm-svn: 202451
2014-02-28 08:27:01 +08:00
bool UseCRBits;
bool HasHardFloat;
bool IsPPC64;
bool HasAltivec;
bool HasFPU;
bool HasSPE;
bool HasEFPU2;
bool HasVSX;
bool NeedsTwoConstNR;
bool HasP8Vector;
bool HasP8Altivec;
bool HasP8Crypto;
bool HasP9Vector;
bool HasP9Altivec;
bool HasP10Vector;
bool HasPrefixInstrs;
bool HasPCRelativeMemops;
bool HasMMA;
bool HasROPProtect;
bool HasPrivileged;
bool HasFCPSGN;
bool HasFSQRT;
bool HasFRE, HasFRES, HasFRSQRTE, HasFRSQRTES;
bool HasRecipPrec;
bool HasSTFIWX;
bool HasLFIWAX;
bool HasFPRND;
bool HasFPCVT;
bool HasISEL;
bool HasBPERMD;
bool HasExtDiv;
bool HasCMPB;
bool HasLDBRX;
bool IsBookE;
bool HasOnlyMSYNC;
bool IsE500;
bool IsPPC4xx;
bool IsPPC6xx;
bool FeatureMFTB;
bool AllowsUnalignedFPAccess;
bool DeprecatedDST;
bool IsLittleEndian;
bool HasICBT;
[PowerPC] Loosen ELFv1 PPC64 func descriptor loads for indirect calls Function pointers under PPC64 ELFv1 (which is used on PPC64/Linux on the POWER7, A2 and earlier cores) are really pointers to a function descriptor, a structure with three pointers: the actual pointer to the code to which to jump, the pointer to the TOC needed by the callee, and an environment pointer. We used to chain these loads, and make them opaque to the rest of the optimizer, so that they'd always occur directly before the call. This is not necessary, and in fact, highly suboptimal on embedded cores. Once the function pointer is known, the loads can be performed ahead of time; in fact, they can be hoisted out of loops. Now these function descriptors are almost always generated by the linker, and thus the contents of the descriptors are invariant. As a result, by default, we'll mark the associated loads as invariant (allowing them to be hoisted out of loops). I've added a target feature to turn this off, however, just in case someone needs that option (constructing an on-stack descriptor, casting it to a function pointer, and then calling it cannot be well-defined C/C++ code, but I can imagine some JIT-compilation system doing so). Consider this simple test: $ cat call.c typedef void (*fp)(); void bar(fp x) { for (int i = 0; i < 1600000000; ++i) x(); } $ cat main.c typedef void (*fp)(); void bar(fp x); void foo() {} int main() { bar(foo); } On the PPC A2 (the BG/Q supercomputer), marking the function-descriptor loads as invariant brings the execution time down to ~8 seconds from ~32 seconds with the loads in the loop. The difference on the POWER7 is smaller. Compiling with: gcc -std=c99 -O3 -mcpu=native call.c main.c : ~6 seconds [this is 4.8.2] clang -O3 -mcpu=native call.c main.c : ~5.3 seconds clang -O3 -mcpu=native call.c main.c -mno-invariant-function-descriptors : ~4 seconds (looks like we'd benefit from additional loop unrolling here, as a first guess, because this is faster with the extra loads) The -mno-invariant-function-descriptors will be added to Clang shortly. llvm-svn: 226207
2015-01-16 05:17:34 +08:00
bool HasInvariantFunctionDescriptors;
bool HasPartwordAtomics;
bool HasQuadwordAtomics;
bool HasDirectMove;
bool HasHTM;
bool HasFloat128;
bool HasFusion;
bool HasStoreFusion;
bool HasAddiLoadFusion;
bool HasAddisLoadFusion;
bool IsISA2_07;
bool IsISA3_0;
bool IsISA3_1;
bool UseLongCalls;
bool SecurePlt;
bool VectorsUseTwoUnits;
bool UsePPCPreRASchedStrategy;
bool UsePPCPostRASchedStrategy;
bool PairedVectorMemops;
bool PredictableSelectIsExpensive;
bool HasModernAIXAs;
bool IsAIX;
POPCNTDKind HasPOPCNTD;
const PPCTargetMachine &TM;
PPCFrameLowering FrameLowering;
PPCInstrInfo InstrInfo;
PPCTargetLowering TLInfo;
SelectionDAGTargetInfo TSInfo;
/// GlobalISel related APIs.
std::unique_ptr<CallLowering> CallLoweringInfo;
std::unique_ptr<LegalizerInfo> Legalizer;
std::unique_ptr<RegisterBankInfo> RegBankInfo;
std::unique_ptr<InstructionSelector> InstSelector;
public:
/// This constructor initializes the data members to match that
/// of the specified triple.
///
PPCSubtarget(const Triple &TT, const std::string &CPU, const std::string &FS,
const PPCTargetMachine &TM);
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
/// getStackAlignment - Returns the minimum alignment known to hold of the
/// stack frame on entry to the function and which must be maintained by every
/// function for this subtarget.
Align getStackAlignment() const { return StackAlignment; }
/// getCPUDirective - Returns the -m directive specified for the cpu.
///
unsigned getCPUDirective() const { return CPUDirective; }
2014-06-14 06:38:48 +08:00
/// getInstrItins - Return the instruction itineraries based on subtarget
/// selection.
const InstrItineraryData *getInstrItineraryData() const override {
return &InstrItins;
}
const PPCFrameLowering *getFrameLowering() const override {
return &FrameLowering;
}
const PPCInstrInfo *getInstrInfo() const override { return &InstrInfo; }
const PPCTargetLowering *getTargetLowering() const override {
return &TLInfo;
}
const SelectionDAGTargetInfo *getSelectionDAGInfo() const override {
return &TSInfo;
}
const PPCRegisterInfo *getRegisterInfo() const override {
return &getInstrInfo()->getRegisterInfo();
}
const PPCTargetMachine &getTargetMachine() const { return TM; }
/// initializeSubtargetDependencies - Initializes using a CPU and feature string
/// so that we can use initializer lists for subtarget initialization.
PPCSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
private:
void initializeEnvironment();
void initSubtargetFeatures(StringRef CPU, StringRef FS);
public:
/// isPPC64 - Return true if we are generating code for 64-bit pointer mode.
///
bool isPPC64() const;
/// has64BitSupport - Return true if the selected CPU supports 64-bit
/// instructions, regardless of whether we are in 32-bit or 64-bit mode.
bool has64BitSupport() const { return Has64BitSupport; }
// useSoftFloat - Return true if soft-float option is turned on.
bool useSoftFloat() const {
if (isAIXABI() && !HasHardFloat)
report_fatal_error("soft-float is not yet supported on AIX.");
return !HasHardFloat;
}
/// use64BitRegs - Return true if in 64-bit mode or if we should use 64-bit
/// registers in 32-bit mode when possible. This can only true if
/// has64BitSupport() returns true.
bool use64BitRegs() const { return Use64BitRegs; }
Add CR-bit tracking to the PowerPC backend for i1 values This change enables tracking i1 values in the PowerPC backend using the condition register bits. These bits can be treated on PowerPC as separate registers; individual bit operations (and, or, xor, etc.) are supported. Tracking booleans in CR bits has several advantages: - Reduction in register pressure (because we no longer need GPRs to store boolean values). - Logical operations on booleans can be handled more efficiently; we used to have to move all results from comparisons into GPRs, perform promoted logical operations in GPRs, and then move the result back into condition register bits to be used by conditional branches. This can be very inefficient, because the throughput of these CR <-> GPR moves have high latency and low throughput (especially when other associated instructions are accounted for). - On the POWER7 and similar cores, we can increase total throughput by using the CR bits. CR bit operations have a dedicated functional unit. Most of this is more-or-less mechanical: Adjustments were needed in the calling-convention code, support was added for spilling/restoring individual condition-register bits, and conditional branch instruction definitions taking specific CR bits were added (plus patterns and code for generating bit-level operations). This is enabled by default when running at -O2 and higher. For -O0 and -O1, where the ability to debug is more important, this feature is disabled by default. Individual CR bits do not have assigned DWARF register numbers, and storing values in CR bits makes them invisible to the debugger. It is critical, however, that we don't move i1 values that have been promoted to larger values (such as those passed as function arguments) into bit registers only to quickly turn around and move the values back into GPRs (such as happens when values are returned by functions). A pair of target-specific DAG combines are added to remove the trunc/extends in: trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) and: zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) In short, we only want to use CR bits where some of the i1 values come from comparisons or are used by conditional branches or selects. To put it another way, if we can do the entire i1 computation in GPRs, then we probably should (on the POWER7, the GPR-operation throughput is higher, and for all cores, the CR <-> GPR moves are expensive). POWER7 test-suite performance results (from 10 runs in each configuration): SingleSource/Benchmarks/Misc/mandel-2: 35% speedup MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown MultiSource/Applications/lemon/lemon: 8% slowdown llvm-svn: 202451
2014-02-28 08:27:01 +08:00
/// useCRBits - Return true if we should store and manipulate i1 values in
/// the individual condition register bits.
bool useCRBits() const { return UseCRBits; }
// isLittleEndian - True if generating little-endian code
bool isLittleEndian() const { return IsLittleEndian; }
// Specific obvious features.
bool hasFCPSGN() const { return HasFCPSGN; }
bool hasFSQRT() const { return HasFSQRT; }
bool hasFRE() const { return HasFRE; }
bool hasFRES() const { return HasFRES; }
bool hasFRSQRTE() const { return HasFRSQRTE; }
bool hasFRSQRTES() const { return HasFRSQRTES; }
bool hasRecipPrec() const { return HasRecipPrec; }
bool hasSTFIWX() const { return HasSTFIWX; }
bool hasLFIWAX() const { return HasLFIWAX; }
bool hasFPRND() const { return HasFPRND; }
bool hasFPCVT() const { return HasFPCVT; }
bool hasAltivec() const { return HasAltivec; }
bool hasSPE() const { return HasSPE; }
bool hasEFPU2() const { return HasEFPU2; }
bool hasFPU() const { return HasFPU; }
[PowerPC] Initial support for the VSX instruction set VSX is an ISA extension supported on the POWER7 and later cores that enhances floating-point vector and scalar capabilities. Among other things, this adds <2 x double> support and generally helps to reduce register pressure. The interesting part of this ISA feature is the register configuration: there are 64 new 128-bit vector registers, the 32 of which are super-registers of the existing 32 scalar floating-point registers, and the second 32 of which overlap with the 32 Altivec vector registers. This makes things like vector insertion and extraction tricky: this can be free but only if we force a restriction to the right register subclass when needed. A new "minipass" PPCVSXCopy takes care of this (although it could do a more-optimal job of it; see the comment about unnecessary copies below). Please note that, currently, VSX is not enabled by default when targeting anything because it is not yet ready for that. The assembler and disassembler are fully implemented and tested. However: - CodeGen support causes miscompiles; test-suite runtime failures: MultiSource/Benchmarks/FreeBench/distray/distray MultiSource/Benchmarks/McCat/08-main/main MultiSource/Benchmarks/Olden/voronoi/voronoi MultiSource/Benchmarks/mafft/pairlocalalign MultiSource/Benchmarks/tramp3d-v4/tramp3d-v4 SingleSource/Benchmarks/CoyoteBench/almabench SingleSource/Benchmarks/Misc/matmul_f64_4x4 - The lowering currently falls back to using Altivec instructions far more than it should. Worse, there are some things that are scalarized through the stack that shouldn't be. - A lot of unnecessary copies make it past the optimizers, and this needs to be fixed. - Many more regression tests are needed. Normally, I'd fix these things prior to committing, but there are some students and other contributors who would like to work this, and so it makes sense to move this development process upstream where it can be subject to the regular code-review procedures. llvm-svn: 203768
2014-03-13 15:58:58 +08:00
bool hasVSX() const { return HasVSX; }
bool needsTwoConstNR() const { return NeedsTwoConstNR; }
bool hasP8Vector() const { return HasP8Vector; }
bool hasP8Altivec() const { return HasP8Altivec; }
bool hasP8Crypto() const { return HasP8Crypto; }
bool hasP9Vector() const { return HasP9Vector; }
bool hasP9Altivec() const { return HasP9Altivec; }
bool hasP10Vector() const { return HasP10Vector; }
bool hasPrefixInstrs() const { return HasPrefixInstrs; }
bool hasPCRelativeMemops() const { return HasPCRelativeMemops; }
bool hasMMA() const { return HasMMA; }
bool hasROPProtect() const { return HasROPProtect; }
bool hasPrivileged() const { return HasPrivileged; }
bool pairedVectorMemops() const { return PairedVectorMemops; }
bool hasMFOCRF() const { return HasMFOCRF; }
bool hasISEL() const { return HasISEL; }
bool hasBPERMD() const { return HasBPERMD; }
bool hasExtDiv() const { return HasExtDiv; }
bool hasCMPB() const { return HasCMPB; }
bool hasLDBRX() const { return HasLDBRX; }
bool isBookE() const { return IsBookE; }
bool hasOnlyMSYNC() const { return HasOnlyMSYNC; }
bool isPPC4xx() const { return IsPPC4xx; }
bool isPPC6xx() const { return IsPPC6xx; }
bool isSecurePlt() const {return SecurePlt; }
bool vectorsUseTwoUnits() const {return VectorsUseTwoUnits; }
bool isE500() const { return IsE500; }
bool isFeatureMFTB() const { return FeatureMFTB; }
bool allowsUnalignedFPAccess() const { return AllowsUnalignedFPAccess; }
bool isDeprecatedDST() const { return DeprecatedDST; }
bool hasICBT() const { return HasICBT; }
[PowerPC] Loosen ELFv1 PPC64 func descriptor loads for indirect calls Function pointers under PPC64 ELFv1 (which is used on PPC64/Linux on the POWER7, A2 and earlier cores) are really pointers to a function descriptor, a structure with three pointers: the actual pointer to the code to which to jump, the pointer to the TOC needed by the callee, and an environment pointer. We used to chain these loads, and make them opaque to the rest of the optimizer, so that they'd always occur directly before the call. This is not necessary, and in fact, highly suboptimal on embedded cores. Once the function pointer is known, the loads can be performed ahead of time; in fact, they can be hoisted out of loops. Now these function descriptors are almost always generated by the linker, and thus the contents of the descriptors are invariant. As a result, by default, we'll mark the associated loads as invariant (allowing them to be hoisted out of loops). I've added a target feature to turn this off, however, just in case someone needs that option (constructing an on-stack descriptor, casting it to a function pointer, and then calling it cannot be well-defined C/C++ code, but I can imagine some JIT-compilation system doing so). Consider this simple test: $ cat call.c typedef void (*fp)(); void bar(fp x) { for (int i = 0; i < 1600000000; ++i) x(); } $ cat main.c typedef void (*fp)(); void bar(fp x); void foo() {} int main() { bar(foo); } On the PPC A2 (the BG/Q supercomputer), marking the function-descriptor loads as invariant brings the execution time down to ~8 seconds from ~32 seconds with the loads in the loop. The difference on the POWER7 is smaller. Compiling with: gcc -std=c99 -O3 -mcpu=native call.c main.c : ~6 seconds [this is 4.8.2] clang -O3 -mcpu=native call.c main.c : ~5.3 seconds clang -O3 -mcpu=native call.c main.c -mno-invariant-function-descriptors : ~4 seconds (looks like we'd benefit from additional loop unrolling here, as a first guess, because this is faster with the extra loads) The -mno-invariant-function-descriptors will be added to Clang shortly. llvm-svn: 226207
2015-01-16 05:17:34 +08:00
bool hasInvariantFunctionDescriptors() const {
return HasInvariantFunctionDescriptors;
}
bool usePPCPreRASchedStrategy() const { return UsePPCPreRASchedStrategy; }
bool usePPCPostRASchedStrategy() const { return UsePPCPostRASchedStrategy; }
bool hasPartwordAtomics() const { return HasPartwordAtomics; }
bool hasQuadwordAtomics() const { return HasQuadwordAtomics; }
bool hasDirectMove() const { return HasDirectMove; }
Align getPlatformStackAlignment() const {
return Align(16);
2015-02-25 09:06:45 +08:00
}
unsigned getRedZoneSize() const {
if (isPPC64())
// 288 bytes = 18*8 (FPRs) + 18*8 (GPRs, GPR13 reserved)
return 288;
// AIX PPC32: 220 bytes = 18*8 (FPRs) + 19*4 (GPRs);
// PPC32 SVR4ABI has no redzone.
return isAIXABI() ? 220 : 0;
}
bool hasHTM() const { return HasHTM; }
bool hasFloat128() const { return HasFloat128; }
bool isISA2_07() const { return IsISA2_07; }
bool isISA3_0() const { return IsISA3_0; }
bool isISA3_1() const { return IsISA3_1; }
bool useLongCalls() const { return UseLongCalls; }
bool hasFusion() const { return HasFusion; }
bool hasStoreFusion() const { return HasStoreFusion; }
bool hasAddiLoadFusion() const { return HasAddiLoadFusion; }
bool hasAddisLoadFusion() const { return HasAddisLoadFusion; }
bool needsSwapsForVSXMemOps() const {
return hasVSX() && isLittleEndian() && !hasP9Vector();
}
2015-02-25 09:06:45 +08:00
POPCNTDKind hasPOPCNTD() const { return HasPOPCNTD; }
const Triple &getTargetTriple() const { return TargetTriple; }
bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
bool isAIXABI() const { return TargetTriple.isOSAIX(); }
bool isSVR4ABI() const { return !isAIXABI(); }
bool isELFv2ABI() const;
bool is64BitELFABI() const { return isSVR4ABI() && isPPC64(); }
bool is32BitELFABI() const { return isSVR4ABI() && !isPPC64(); }
bool isUsingPCRelativeCalls() const;
/// Originally, this function return hasISEL(). Now we always enable it,
/// but may expand the ISEL instruction later.
bool enableEarlyIfConversion() const override { return true; }
/// Scheduling customization.
bool enableMachineScheduler() const override;
/// Pipeliner customization.
bool enableMachinePipeliner() const override;
/// Machine Pipeliner customization
bool useDFAforSMS() const override;
/// This overrides the PostRAScheduler bit in the SchedModel for each CPU.
bool enablePostRAScheduler() const override;
AntiDepBreakMode getAntiDepBreakMode() const override;
void getCriticalPathRCs(RegClassVector &CriticalPathRCs) const override;
void overrideSchedPolicy(MachineSchedPolicy &Policy,
unsigned NumRegionInstrs) const override;
bool useAA() const override;
bool enableSubRegLiveness() const override;
/// True if the GV will be accessed via an indirect symbol.
bool isGVIndirectSymbol(const GlobalValue *GV) const;
/// True if the ABI is descriptor based.
bool usesFunctionDescriptors() const {
// Both 32-bit and 64-bit AIX are descriptor based. For ELF only the 64-bit
// v1 ABI uses descriptors.
return isAIXABI() || (is64BitELFABI() && !isELFv2ABI());
}
[PowerPC] Add Support for indirect calls on AIX. Extends the desciptor-based indirect call support for 32-bit codegen, and enables indirect calls for AIX. In-depth Description: In a function descriptor based ABI, a function pointer points at a descriptor structure as opposed to the function's entry point. The descriptor takes the form of 3 pointers: 1 for the function's entry point, 1 for the TOC anchor of the module containing the function definition, and 1 for the environment pointer: struct FunctionDescriptor { void *EntryPoint; void *TOCAnchor; void *EnvironmentPointer; }; An indirect call has several steps of loading the the information from the descriptor into the proper registers for setting up the call. Namely it has to: 1) Save the caller's TOC pointer into the TOC save slot in the linkage area, and then load the callee's TOC pointer into the TOC register (GPR 2 on AIX). 2) Load the function descriptor's entry point into the count register. 3) Load the environment pointer into the environment pointer register (GPR 11 on AIX). 4) Perform the call by branching on count register. 5) Restore the caller's TOC pointer after returning from the indirect call. A couple important caveats to the above: - There is no way to directly load a value from memory into the count register. Instead we populate the count register by loading the entry point address into a gpr and then moving the gpr to the count register. - The TOC restore has to come immediately after the branch on count register instruction (i.e., the 1st instruction executed after we return from the call). This is an implementation limitation. We could, in theory, schedule the restore elsewhere as long as no uses of the TOC pointer fall in between the call and the restore; however, to keep it simple, we insert a pseudo instruction that represents both the indirect branch instruction and the load instruction that restores the caller's TOC from the linkage area. As they flow through the compiler as a single pseudo instruction, nothing can be inserted between them and the caller's TOC is then valid at any use. Differtential Revision: https://reviews.llvm.org/D70724
2019-12-14 03:34:11 +08:00
unsigned descriptorTOCAnchorOffset() const {
assert(usesFunctionDescriptors() &&
"Should only be called when the target uses descriptors.");
return IsPPC64 ? 8 : 4;
}
unsigned descriptorEnvironmentPointerOffset() const {
assert(usesFunctionDescriptors() &&
"Should only be called when the target uses descriptors.");
return IsPPC64 ? 16 : 8;
}
MCRegister getEnvironmentPointerRegister() const {
assert(usesFunctionDescriptors() &&
"Should only be called when the target uses descriptors.");
return IsPPC64 ? PPC::X11 : PPC::R11;
}
MCRegister getTOCPointerRegister() const {
assert((is64BitELFABI() || isAIXABI()) &&
"Should only be called when the target is a TOC based ABI.");
return IsPPC64 ? PPC::X2 : PPC::R2;
}
MCRegister getStackPointerRegister() const {
return IsPPC64 ? PPC::X1 : PPC::R1;
}
bool isXRaySupported() const override { return IsPPC64 && IsLittleEndian; }
bool isPredictableSelectIsExpensive() const {
return PredictableSelectIsExpensive;
}
// Select allocation orders of GPRC and G8RC. It should be strictly consistent
// with corresponding AltOrders in PPCRegisterInfo.td.
unsigned getGPRAllocationOrderIdx() const {
if (is64BitELFABI())
return 1;
if (isAIXABI())
return 2;
return 0;
}
// GlobalISEL
const CallLowering *getCallLowering() const override;
const RegisterBankInfo *getRegBankInfo() const override;
const LegalizerInfo *getLegalizerInfo() const override;
InstructionSelector *getInstructionSelector() const override;
};
} // End llvm namespace
#endif