llvm-project/llvm/lib/Target/X86/X86Subtarget.cpp

432 lines
13 KiB
C++
Raw Normal View History

2010-12-05 07:57:24 +08:00
//===-- X86Subtarget.cpp - X86 Subtarget Information ----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the X86 specific subclass of TargetSubtargetInfo.
//
//===----------------------------------------------------------------------===//
#include "X86.h"
#include "X86CallLowering.h"
#include "X86LegalizerInfo.h"
#include "X86RegisterBankInfo.h"
#include "X86Subtarget.h"
#include "MCTargetDesc/X86BaseInfo.h"
#include "X86TargetMachine.h"
#include "llvm/ADT/Triple.h"
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#if defined(_MSC_VER)
#include <intrin.h>
#endif
using namespace llvm;
#define DEBUG_TYPE "subtarget"
#define GET_SUBTARGETINFO_TARGET_DESC
#define GET_SUBTARGETINFO_CTOR
#include "X86GenSubtargetInfo.inc"
// Temporary option to control early if-conversion for x86 while adding machine
// models.
static cl::opt<bool>
X86EarlyIfConv("x86-early-ifcvt", cl::Hidden,
cl::desc("Enable early if-conversion on X86"));
/// Classify a blockaddress reference for the current subtarget according to how
/// we should reference it in a non-pcrel context.
2016-05-20 02:34:20 +08:00
unsigned char X86Subtarget::classifyBlockAddressReference() const {
return classifyLocalReference(nullptr);
}
2012-08-02 02:39:17 +08:00
/// Classify a global variable reference for the current subtarget according to
/// how we should reference it in a non-pcrel context.
unsigned char
X86Subtarget::classifyGlobalReference(const GlobalValue *GV) const {
return classifyGlobalReference(GV, *GV->getParent());
}
2012-08-02 02:39:17 +08:00
unsigned char
X86Subtarget::classifyLocalReference(const GlobalValue *GV) const {
// 64 bits can use %rip addressing for anything local.
if (is64Bit())
return X86II::MO_NO_FLAG;
// If this is for a position dependent executable, the static linker can
// figure it out.
if (!isPositionIndependent())
return X86II::MO_NO_FLAG;
2012-08-02 02:39:17 +08:00
// The COFF dynamic linker just patches the executable sections.
if (isTargetCOFF())
return X86II::MO_NO_FLAG;
if (isTargetDarwin()) {
// 32 bit macho has no relocation for a-b if a is undefined, even if
// b is in the section that is being relocated.
// This means we have to use o load even for GVs that are known to be
// local to the dso.
if (GV && (GV->isDeclarationForLinker() || GV->hasCommonLinkage()))
return X86II::MO_DARWIN_NONLAZY_PIC_BASE;
2012-08-02 02:39:17 +08:00
return X86II::MO_PIC_BASE_OFFSET;
}
2012-08-02 02:39:17 +08:00
return X86II::MO_GOTOFF;
}
2012-08-02 02:39:17 +08:00
unsigned char X86Subtarget::classifyGlobalReference(const GlobalValue *GV,
const Module &M) const {
// Large model never uses stubs.
if (TM.getCodeModel() == CodeModel::Large)
return X86II::MO_NO_FLAG;
2012-08-02 02:39:17 +08:00
// Absolute symbols can be referenced directly.
if (GV) {
if (Optional<ConstantRange> CR = GV->getAbsoluteSymbolRange()) {
// See if we can use the 8-bit immediate form. Note that some instructions
// will sign extend the immediate operand, so to be conservative we only
// accept the range [0,128).
if (CR->getUnsignedMax().ult(128))
return X86II::MO_ABS8;
else
return X86II::MO_NO_FLAG;
}
}
if (TM.shouldAssumeDSOLocal(M, GV))
return classifyLocalReference(GV);
if (isTargetCOFF())
return X86II::MO_DLLIMPORT;
if (is64Bit())
return X86II::MO_GOTPCREL;
if (isTargetDarwin()) {
if (!isPositionIndependent())
return X86II::MO_DARWIN_NONLAZY;
return X86II::MO_DARWIN_NONLAZY_PIC_BASE;
}
2012-08-02 02:39:17 +08:00
return X86II::MO_GOT;
}
unsigned char
X86Subtarget::classifyGlobalFunctionReference(const GlobalValue *GV) const {
return classifyGlobalFunctionReference(GV, *GV->getParent());
}
unsigned char
X86Subtarget::classifyGlobalFunctionReference(const GlobalValue *GV,
const Module &M) const {
if (TM.shouldAssumeDSOLocal(M, GV))
return X86II::MO_NO_FLAG;
if (isTargetCOFF()) {
assert(GV->hasDLLImportStorageClass() &&
"shouldAssumeDSOLocal gave inconsistent answer");
return X86II::MO_DLLIMPORT;
}
const Function *F = dyn_cast_or_null<Function>(GV);
if (isTargetELF()) {
if (is64Bit() && F && (CallingConv::X86_RegCall == F->getCallingConv()))
// According to psABI, PLT stub clobbers XMM8-XMM15.
// In Regcall calling convention those registers are used for passing
// parameters. Thus we need to prevent lazy binding in Regcall.
return X86II::MO_GOTPCREL;
// If PLT must be avoided then the call should be via GOTPCREL.
if (((F && F->hasFnAttribute(Attribute::NonLazyBind)) ||
(!F && M.getRtLibUseGOT())) &&
is64Bit())
return X86II::MO_GOTPCREL;
return X86II::MO_PLT;
}
if (is64Bit()) {
if (F && F->hasFnAttribute(Attribute::NonLazyBind))
// If the function is marked as non-lazy, generate an indirect call
// which loads from the GOT directly. This avoids runtime overhead
// at the cost of eager binding (and one extra byte of encoding).
return X86II::MO_GOTPCREL;
return X86II::MO_NO_FLAG;
}
return X86II::MO_NO_FLAG;
}
/// Return true if the subtarget allows calls to immediate address.
bool X86Subtarget::isLegalToCallImmediateAddr() const {
// FIXME: I386 PE/COFF supports PC relative calls using IMAGE_REL_I386_REL32
// but WinCOFFObjectWriter::RecordRelocation cannot emit them. Once it does,
// the following check for Win32 should be removed.
if (In64BitMode || isTargetWin32())
return false;
return isTargetELF() || TM.getRelocationModel() == Reloc::Static;
}
void X86Subtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
std::string CPUName = CPU;
if (CPUName.empty())
CPUName = "generic";
// Make sure 64-bit features are available in 64-bit mode. (But make sure
// SSE2 can be turned off explicitly.)
std::string FullFS = FS;
if (In64BitMode) {
if (!FullFS.empty())
FullFS = "+64bit,+sse2," + FullFS;
else
FullFS = "+64bit,+sse2";
}
// LAHF/SAHF are always supported in non-64-bit mode.
if (!In64BitMode) {
if (!FullFS.empty())
FullFS = "+sahf," + FullFS;
else
FullFS = "+sahf";
}
// Parse features string and set the CPU.
ParseSubtargetFeatures(CPUName, FullFS);
// All CPUs that implement SSE4.2 or SSE4A support unaligned accesses of
// 16-bytes and under that are reasonably fast. These features were
// introduced with Intel's Nehalem/Silvermont and AMD's Family10h
// micro-architectures respectively.
if (hasSSE42() || hasSSE4A())
IsUAMem16Slow = false;
InstrItins = getInstrItineraryForCPU(CPUName);
// It's important to keep the MCSubtargetInfo feature bits in sync with
// target data structure which is shared with MC code emitter, etc.
if (In64BitMode)
ToggleFeature(X86::Mode64Bit);
else if (In32BitMode)
ToggleFeature(X86::Mode32Bit);
else if (In16BitMode)
ToggleFeature(X86::Mode16Bit);
else
llvm_unreachable("Not 16-bit, 32-bit or 64-bit mode!");
DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel
<< ", 3DNowLevel " << X863DNowLevel
<< ", 64bit " << HasX86_64 << "\n");
assert((!In64BitMode || HasX86_64) &&
"64-bit code requested on a subtarget that doesn't support it!");
// Stack alignment is 16 bytes on Darwin, Linux, kFreeBSD and Solaris (both
// 32 and 64 bit) and for all 64-bit targets.
if (StackAlignOverride)
stackAlignment = StackAlignOverride;
else if (isTargetDarwin() || isTargetLinux() || isTargetSolaris() ||
isTargetKFreeBSD() || In64BitMode)
stackAlignment = 16;
// Some CPUs have more overhead for gather. The specified overhead is relative
// to the Load operation. "2" is the number provided by Intel architects. This
// parameter is used for cost estimation of Gather Op and comparison with
// other alternatives.
// TODO: Remove the explicit hasAVX512()?, That would mean we would only
// enable gather with a -march.
if (hasAVX512() || (hasAVX2() && hasFastGather()))
GatherOverhead = 2;
if (hasAVX512())
ScatterOverhead = 2;
// Consume the vector width attribute or apply any target specific limit.
if (PreferVectorWidthOverride)
PreferVectorWidth = PreferVectorWidthOverride;
else if (Prefer256Bit)
PreferVectorWidth = 256;
}
void X86Subtarget::initializeEnvironment() {
X86SSELevel = NoSSE;
X863DNowLevel = NoThreeDNow;
HasX87 = false;
HasNOPL = false;
HasCMov = false;
HasX86_64 = false;
HasPOPCNT = false;
HasSSE4A = false;
HasAES = false;
HasVAES = false;
HasFXSR = false;
HasXSAVE = false;
HasXSAVEOPT = false;
HasXSAVEC = false;
HasXSAVES = false;
HasPCLMUL = false;
HasVPCLMULQDQ = false;
HasGFNI = false;
HasFMA = false;
HasFMA4 = false;
HasXOP = false;
HasTBM = false;
HasLWP = false;
HasMOVBE = false;
HasRDRAND = false;
HasF16C = false;
HasFSGSBase = false;
HasLZCNT = false;
HasBMI = false;
HasBMI2 = false;
HasVBMI = false;
HasVBMI2 = false;
HasIFMA = false;
HasRTM = false;
HasERI = false;
HasCDI = false;
HasPFI = false;
HasDQI = false;
HasVPOPCNTDQ = false;
HasBWI = false;
HasVLX = false;
HasADX = false;
HasPKU = false;
HasVNNI = false;
HasBITALG = false;
HasSHA = false;
HasPREFETCHWT1 = false;
HasPRFCHW = false;
HasRDSEED = false;
HasLAHFSAHF = false;
HasMWAITX = false;
HasCLZERO = false;
HasMPX = false;
HasSHSTK = false;
HasIBT = false;
HasSGX = false;
HasCLFLUSHOPT = false;
HasCLWB = false;
HasRDPID = false;
Introduce the "retpoline" x86 mitigation technique for variant #2 of the speculative execution vulnerabilities disclosed today, specifically identified by CVE-2017-5715, "Branch Target Injection", and is one of the two halves to Spectre.. Summary: First, we need to explain the core of the vulnerability. Note that this is a very incomplete description, please see the Project Zero blog post for details: https://googleprojectzero.blogspot.com/2018/01/reading-privileged-memory-with-side.html The basis for branch target injection is to direct speculative execution of the processor to some "gadget" of executable code by poisoning the prediction of indirect branches with the address of that gadget. The gadget in turn contains an operation that provides a side channel for reading data. Most commonly, this will look like a load of secret data followed by a branch on the loaded value and then a load of some predictable cache line. The attacker then uses timing of the processors cache to determine which direction the branch took *in the speculative execution*, and in turn what one bit of the loaded value was. Due to the nature of these timing side channels and the branch predictor on Intel processors, this allows an attacker to leak data only accessible to a privileged domain (like the kernel) back into an unprivileged domain. The goal is simple: avoid generating code which contains an indirect branch that could have its prediction poisoned by an attacker. In many cases, the compiler can simply use directed conditional branches and a small search tree. LLVM already has support for lowering switches in this way and the first step of this patch is to disable jump-table lowering of switches and introduce a pass to rewrite explicit indirectbr sequences into a switch over integers. However, there is no fully general alternative to indirect calls. We introduce a new construct we call a "retpoline" to implement indirect calls in a non-speculatable way. It can be thought of loosely as a trampoline for indirect calls which uses the RET instruction on x86. Further, we arrange for a specific call->ret sequence which ensures the processor predicts the return to go to a controlled, known location. The retpoline then "smashes" the return address pushed onto the stack by the call with the desired target of the original indirect call. The result is a predicted return to the next instruction after a call (which can be used to trap speculative execution within an infinite loop) and an actual indirect branch to an arbitrary address. On 64-bit x86 ABIs, this is especially easily done in the compiler by using a guaranteed scratch register to pass the target into this device. For 32-bit ABIs there isn't a guaranteed scratch register and so several different retpoline variants are introduced to use a scratch register if one is available in the calling convention and to otherwise use direct stack push/pop sequences to pass the target address. This "retpoline" mitigation is fully described in the following blog post: https://support.google.com/faqs/answer/7625886 We also support a target feature that disables emission of the retpoline thunk by the compiler to allow for custom thunks if users want them. These are particularly useful in environments like kernels that routinely do hot-patching on boot and want to hot-patch their thunk to different code sequences. They can write this custom thunk and use `-mretpoline-external-thunk` *in addition* to `-mretpoline`. In this case, on x86-64 thu thunk names must be: ``` __llvm_external_retpoline_r11 ``` or on 32-bit: ``` __llvm_external_retpoline_eax __llvm_external_retpoline_ecx __llvm_external_retpoline_edx __llvm_external_retpoline_push ``` And the target of the retpoline is passed in the named register, or in the case of the `push` suffix on the top of the stack via a `pushl` instruction. There is one other important source of indirect branches in x86 ELF binaries: the PLT. These patches also include support for LLD to generate PLT entries that perform a retpoline-style indirection. The only other indirect branches remaining that we are aware of are from precompiled runtimes (such as crt0.o and similar). The ones we have found are not really attackable, and so we have not focused on them here, but eventually these runtimes should also be replicated for retpoline-ed configurations for completeness. For kernels or other freestanding or fully static executables, the compiler switch `-mretpoline` is sufficient to fully mitigate this particular attack. For dynamic executables, you must compile *all* libraries with `-mretpoline` and additionally link the dynamic executable and all shared libraries with LLD and pass `-z retpolineplt` (or use similar functionality from some other linker). We strongly recommend also using `-z now` as non-lazy binding allows the retpoline-mitigated PLT to be substantially smaller. When manually apply similar transformations to `-mretpoline` to the Linux kernel we observed very small performance hits to applications running typical workloads, and relatively minor hits (approximately 2%) even for extremely syscall-heavy applications. This is largely due to the small number of indirect branches that occur in performance sensitive paths of the kernel. When using these patches on statically linked applications, especially C++ applications, you should expect to see a much more dramatic performance hit. For microbenchmarks that are switch, indirect-, or virtual-call heavy we have seen overheads ranging from 10% to 50%. However, real-world workloads exhibit substantially lower performance impact. Notably, techniques such as PGO and ThinLTO dramatically reduce the impact of hot indirect calls (by speculatively promoting them to direct calls) and allow optimized search trees to be used to lower switches. If you need to deploy these techniques in C++ applications, we *strongly* recommend that you ensure all hot call targets are statically linked (avoiding PLT indirection) and use both PGO and ThinLTO. Well tuned servers using all of these techniques saw 5% - 10% overhead from the use of retpoline. We will add detailed documentation covering these components in subsequent patches, but wanted to make the core functionality available as soon as possible. Happy for more code review, but we'd really like to get these patches landed and backported ASAP for obvious reasons. We're planning to backport this to both 6.0 and 5.0 release streams and get a 5.0 release with just this cherry picked ASAP for distros and vendors. This patch is the work of a number of people over the past month: Eric, Reid, Rui, and myself. I'm mailing it out as a single commit due to the time sensitive nature of landing this and the need to backport it. Huge thanks to everyone who helped out here, and everyone at Intel who helped out in discussions about how to craft this. Also, credit goes to Paul Turner (at Google, but not an LLVM contributor) for much of the underlying retpoline design. Reviewers: echristo, rnk, ruiu, craig.topper, DavidKreitzer Subscribers: sanjoy, emaste, mcrosier, mgorny, mehdi_amini, hiraditya, llvm-commits Differential Revision: https://reviews.llvm.org/D41723 llvm-svn: 323155
2018-01-23 06:05:25 +08:00
UseRetpoline = false;
UseRetpolineExternalThunk = false;
IsPMULLDSlow = false;
IsSHLDSlow = false;
IsUAMem16Slow = false;
IsUAMem32Slow = false;
HasSSEUnalignedMem = false;
HasCmpxchg16b = false;
UseLeaForSP = false;
HasPOPCNTFalseDeps = false;
HasLZCNTFalseDeps = false;
HasFastVariableShuffle = false;
HasFastPartialYMMorZMMWrite = false;
HasFast11ByteNOP = false;
HasFast15ByteNOP = false;
HasFastGather = false;
HasFastScalarFSQRT = false;
HasFastVectorFSQRT = false;
HasFastLZCNT = false;
HasFastSHLDRotate = false;
HasMacroFusion = false;
HasERMSB = false;
HasSlowDivide32 = false;
HasSlowDivide64 = false;
PadShortFunctions = false;
SlowTwoMemOps = false;
LEAUsesAG = false;
SlowLEA = false;
Slow3OpsLEA = false;
SlowIncDec = false;
stackAlignment = 4;
// FIXME: this is a known good value for Yonah. How about others?
MaxInlineSizeThreshold = 128;
UseSoftFloat = false;
X86ProcFamily = Others;
GatherOverhead = 1024;
ScatterOverhead = 1024;
PreferVectorWidth = UINT32_MAX;
Prefer256Bit = false;
}
X86Subtarget &X86Subtarget::initializeSubtargetDependencies(StringRef CPU,
StringRef FS) {
initializeEnvironment();
initSubtargetFeatures(CPU, FS);
return *this;
}
X86Subtarget::X86Subtarget(const Triple &TT, StringRef CPU, StringRef FS,
const X86TargetMachine &TM,
unsigned StackAlignOverride,
[X86] Don't make 512-bit vectors legal when preferred vector width is 256 bits and 512 bits aren't required This patch adds a new function attribute "required-vector-width" that can be set by the frontend to indicate the maximum vector width present in the original source code. The idea is that this would be set based on ABI requirements, intrinsics or explicit vector types being used, maybe simd pragmas, etc. The backend will then use this information to determine if its save to make 512-bit vectors illegal when the preference is for 256-bit vectors. For code that has no vectors in it originally and only get vectors through the loop and slp vectorizers this allows us to generate code largely similar to our AVX2 only output while still enabling AVX512 features like mask registers and gather/scatter. The loop vectorizer doesn't always obey TTI and will create oversized vectors with the expectation the backend will legalize it. In order to avoid changing the vectorizer and potentially harm our AVX2 codegen this patch tries to make the legalizer behavior similar. This is restricted to CPUs that support AVX512F and AVX512VL so that we have good fallback options to use 128 and 256-bit vectors and still get masking. I've qualified every place I could find in X86ISelLowering.cpp and added tests cases for many of them with 2 different values for the attribute to see the codegen differences. We still need to do frontend work for the attribute and teach the inliner how to merge it, etc. But this gets the codegen layer ready for it. Differential Revision: https://reviews.llvm.org/D42724 llvm-svn: 324834
2018-02-11 16:06:27 +08:00
unsigned PreferVectorWidthOverride,
unsigned RequiredVectorWidth)
: X86GenSubtargetInfo(TT, CPU, FS), X86ProcFamily(Others),
PICStyle(PICStyles::None), TM(TM), TargetTriple(TT),
StackAlignOverride(StackAlignOverride),
PreferVectorWidthOverride(PreferVectorWidthOverride),
[X86] Don't make 512-bit vectors legal when preferred vector width is 256 bits and 512 bits aren't required This patch adds a new function attribute "required-vector-width" that can be set by the frontend to indicate the maximum vector width present in the original source code. The idea is that this would be set based on ABI requirements, intrinsics or explicit vector types being used, maybe simd pragmas, etc. The backend will then use this information to determine if its save to make 512-bit vectors illegal when the preference is for 256-bit vectors. For code that has no vectors in it originally and only get vectors through the loop and slp vectorizers this allows us to generate code largely similar to our AVX2 only output while still enabling AVX512 features like mask registers and gather/scatter. The loop vectorizer doesn't always obey TTI and will create oversized vectors with the expectation the backend will legalize it. In order to avoid changing the vectorizer and potentially harm our AVX2 codegen this patch tries to make the legalizer behavior similar. This is restricted to CPUs that support AVX512F and AVX512VL so that we have good fallback options to use 128 and 256-bit vectors and still get masking. I've qualified every place I could find in X86ISelLowering.cpp and added tests cases for many of them with 2 different values for the attribute to see the codegen differences. We still need to do frontend work for the attribute and teach the inliner how to merge it, etc. But this gets the codegen layer ready for it. Differential Revision: https://reviews.llvm.org/D42724 llvm-svn: 324834
2018-02-11 16:06:27 +08:00
RequiredVectorWidth(RequiredVectorWidth),
In64BitMode(TargetTriple.getArch() == Triple::x86_64),
In32BitMode(TargetTriple.getArch() == Triple::x86 &&
TargetTriple.getEnvironment() != Triple::CODE16),
In16BitMode(TargetTriple.getArch() == Triple::x86 &&
TargetTriple.getEnvironment() == Triple::CODE16),
InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this),
FrameLowering(*this, getStackAlignment()) {
// Determine the PICStyle based on the target selected.
if (!isPositionIndependent())
setPICStyle(PICStyles::None);
else if (is64Bit())
setPICStyle(PICStyles::RIPRel);
else if (isTargetCOFF())
setPICStyle(PICStyles::None);
else if (isTargetDarwin())
setPICStyle(PICStyles::StubPIC);
else if (isTargetELF())
setPICStyle(PICStyles::GOT);
CallLoweringInfo.reset(new X86CallLowering(*getTargetLowering()));
Legalizer.reset(new X86LegalizerInfo(*this, TM));
auto *RBI = new X86RegisterBankInfo(*getRegisterInfo());
RegBankInfo.reset(RBI);
InstSelector.reset(createX86InstructionSelector(TM, *this, *RBI));
}
const CallLowering *X86Subtarget::getCallLowering() const {
return CallLoweringInfo.get();
}
const InstructionSelector *X86Subtarget::getInstructionSelector() const {
return InstSelector.get();
}
const LegalizerInfo *X86Subtarget::getLegalizerInfo() const {
return Legalizer.get();
}
const RegisterBankInfo *X86Subtarget::getRegBankInfo() const {
return RegBankInfo.get();
}
bool X86Subtarget::enableEarlyIfConversion() const {
return hasCMov() && X86EarlyIfConv;
}