2012-02-18 20:03:15 +08:00
|
|
|
//===-- X86.td - Target definition file for the Intel X86 --*- tablegen -*-===//
|
2011-04-14 22:33:36 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2011-04-14 22:33:36 +08:00
|
|
|
//
|
2003-10-21 23:17:13 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2003-08-03 23:47:49 +08:00
|
|
|
//
|
2011-10-11 14:44:02 +08:00
|
|
|
// This is a target description file for the Intel i386 architecture, referred
|
|
|
|
// to here as the "X86" architecture.
|
2003-08-03 23:47:49 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2003-08-04 12:59:56 +08:00
|
|
|
// Get the target-independent interfaces which we are implementing...
|
2003-08-03 23:47:49 +08:00
|
|
|
//
|
2008-11-24 15:34:46 +08:00
|
|
|
include "llvm/Target/Target.td"
|
2003-08-03 23:47:49 +08:00
|
|
|
|
2011-07-08 05:06:52 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2012-08-16 11:50:04 +08:00
|
|
|
// X86 Subtarget state
|
2011-07-08 05:06:52 +08:00
|
|
|
//
|
|
|
|
|
|
|
|
def Mode64Bit : SubtargetFeature<"64bit-mode", "In64BitMode", "true",
|
|
|
|
"64-bit mode (x86_64)">;
|
2014-01-06 12:55:54 +08:00
|
|
|
def Mode32Bit : SubtargetFeature<"32bit-mode", "In32BitMode", "true",
|
|
|
|
"32-bit mode (80386)">;
|
|
|
|
def Mode16Bit : SubtargetFeature<"16bit-mode", "In16BitMode", "true",
|
|
|
|
"16-bit mode (i8086)">;
|
2011-07-08 05:06:52 +08:00
|
|
|
|
2006-10-06 17:17:41 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2012-08-16 11:50:04 +08:00
|
|
|
// X86 Subtarget features
|
2007-05-05 04:38:40 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2009-09-02 13:53:04 +08:00
|
|
|
|
2016-03-23 19:13:54 +08:00
|
|
|
def FeatureX87 : SubtargetFeature<"x87","HasX87", "true",
|
|
|
|
"Enable X87 float instructions">;
|
|
|
|
|
2018-01-11 06:07:16 +08:00
|
|
|
def FeatureNOPL : SubtargetFeature<"nopl", "HasNOPL", "true",
|
|
|
|
"Enable NOPL instruction">;
|
|
|
|
|
2009-09-02 13:53:04 +08:00
|
|
|
def FeatureCMOV : SubtargetFeature<"cmov","HasCMov", "true",
|
|
|
|
"Enable conditional move instructions">;
|
|
|
|
|
2019-03-21 07:35:49 +08:00
|
|
|
def FeatureCMPXCHG8B : SubtargetFeature<"cx8", "HasCmpxchg8b", "true",
|
|
|
|
"Support CMPXCHG8B instructions">;
|
|
|
|
|
2010-12-05 04:32:23 +08:00
|
|
|
def FeaturePOPCNT : SubtargetFeature<"popcnt", "HasPOPCNT", "true",
|
|
|
|
"Support POPCNT instruction">;
|
|
|
|
|
2015-10-16 14:03:09 +08:00
|
|
|
def FeatureFXSR : SubtargetFeature<"fxsr", "HasFXSR", "true",
|
|
|
|
"Support fxsave/fxrestore instructions">;
|
|
|
|
|
2015-10-12 19:47:46 +08:00
|
|
|
def FeatureXSAVE : SubtargetFeature<"xsave", "HasXSAVE", "true",
|
|
|
|
"Support xsave instructions">;
|
|
|
|
|
|
|
|
def FeatureXSAVEOPT: SubtargetFeature<"xsaveopt", "HasXSAVEOPT", "true",
|
2020-06-26 15:14:58 +08:00
|
|
|
"Support xsaveopt instructions",
|
|
|
|
[FeatureXSAVE]>;
|
2015-10-12 19:47:46 +08:00
|
|
|
|
|
|
|
def FeatureXSAVEC : SubtargetFeature<"xsavec", "HasXSAVEC", "true",
|
2020-06-26 15:14:58 +08:00
|
|
|
"Support xsavec instructions",
|
|
|
|
[FeatureXSAVE]>;
|
2015-10-12 19:47:46 +08:00
|
|
|
|
|
|
|
def FeatureXSAVES : SubtargetFeature<"xsaves", "HasXSAVES", "true",
|
2020-06-26 15:14:58 +08:00
|
|
|
"Support xsaves instructions",
|
|
|
|
[FeatureXSAVE]>;
|
2015-10-12 19:47:46 +08:00
|
|
|
|
2007-05-05 04:38:40 +08:00
|
|
|
def FeatureSSE1 : SubtargetFeature<"sse", "X86SSELevel", "SSE1",
|
2018-08-27 02:29:33 +08:00
|
|
|
"Enable SSE instructions">;
|
2007-05-05 04:38:40 +08:00
|
|
|
def FeatureSSE2 : SubtargetFeature<"sse2", "X86SSELevel", "SSE2",
|
|
|
|
"Enable SSE2 instructions",
|
|
|
|
[FeatureSSE1]>;
|
|
|
|
def FeatureSSE3 : SubtargetFeature<"sse3", "X86SSELevel", "SSE3",
|
|
|
|
"Enable SSE3 instructions",
|
|
|
|
[FeatureSSE2]>;
|
|
|
|
def FeatureSSSE3 : SubtargetFeature<"ssse3", "X86SSELevel", "SSSE3",
|
|
|
|
"Enable SSSE3 instructions",
|
|
|
|
[FeatureSSE3]>;
|
2013-08-24 04:21:34 +08:00
|
|
|
def FeatureSSE41 : SubtargetFeature<"sse4.1", "X86SSELevel", "SSE41",
|
2008-02-03 15:18:54 +08:00
|
|
|
"Enable SSE 4.1 instructions",
|
|
|
|
[FeatureSSSE3]>;
|
2013-08-24 04:21:34 +08:00
|
|
|
def FeatureSSE42 : SubtargetFeature<"sse4.2", "X86SSELevel", "SSE42",
|
2008-02-03 15:18:54 +08:00
|
|
|
"Enable SSE 4.2 instructions",
|
2011-12-29 23:51:45 +08:00
|
|
|
[FeatureSSE41]>;
|
2015-11-14 11:04:00 +08:00
|
|
|
// The MMX subtarget feature is separate from the rest of the SSE features
|
|
|
|
// because it's important (for odd compatibility reasons) to be able to
|
|
|
|
// turn it off explicitly while allowing SSE+ to be on.
|
|
|
|
def FeatureMMX : SubtargetFeature<"mmx","X863DNowLevel", "MMX",
|
|
|
|
"Enable MMX instructions">;
|
2007-05-05 04:38:40 +08:00
|
|
|
def Feature3DNow : SubtargetFeature<"3dnow", "X863DNowLevel", "ThreeDNow",
|
2011-04-15 08:32:41 +08:00
|
|
|
"Enable 3DNow! instructions",
|
|
|
|
[FeatureMMX]>;
|
2007-05-05 04:38:40 +08:00
|
|
|
def Feature3DNowA : SubtargetFeature<"3dnowa", "X863DNowLevel", "ThreeDNowA",
|
2007-05-06 15:56:19 +08:00
|
|
|
"Enable 3DNow! Athlon instructions",
|
|
|
|
[Feature3DNow]>;
|
2009-02-03 08:04:43 +08:00
|
|
|
// All x86-64 hardware has SSE2, but we don't mark SSE2 as an implied
|
|
|
|
// feature, because SSE2 can be disabled (e.g. for compiling OS kernels)
|
2018-08-30 14:01:05 +08:00
|
|
|
// without disabling 64-bit mode. Nothing should imply this feature bit. It
|
|
|
|
// is used to enforce that only 64-bit capable CPUs are used in 64-bit mode.
|
2007-05-06 15:56:19 +08:00
|
|
|
def Feature64Bit : SubtargetFeature<"64bit", "HasX86_64", "true",
|
2018-08-27 02:29:33 +08:00
|
|
|
"Support 64-bit instructions">;
|
2013-10-06 04:11:44 +08:00
|
|
|
def FeatureCMPXCHG16B : SubtargetFeature<"cx16", "HasCmpxchg16b", "true",
|
2019-08-09 02:11:17 +08:00
|
|
|
"64-bit with cmpxchg16b",
|
|
|
|
[FeatureCMPXCHG8B]>;
|
SHLD/SHRD are VectorPath (microcode) instructions known to have poor latency on certain architectures. While generating SHLD/SHRD instructions is acceptable when optimizing for size, optimizing for speed on these platforms should be implemented using alternative sequences of instructions composed of add, adc, shr, shl, or and lea which are directPath instructions. These alternative instructions not only have a lower latency but they also increase the decode bandwidth by allowing simultaneous decoding of a third directPath instruction.
AMD's processors family K7, K8, K10, K12, K15 and K16 are known to have SHLD/SHRD instructions with very poor latency. Optimization guides for these processors recommend using an alternative sequence of instructions. For these AMD's processors, I disabled folding (or (x << c) | (y >> (64 - c))) when we are not optimizing for size.
It might be beneficial to disable this folding for some of the Intel's processors. However, since I couldn't find specific recommendations regarding using SHLD/SHRD instructions on Intel's processors, I haven't disabled this peephole for Intel.
llvm-svn: 195383
2013-11-22 07:21:26 +08:00
|
|
|
def FeatureSlowSHLD : SubtargetFeature<"slow-shld", "IsSHLDSlow", "true",
|
|
|
|
"SHLD instruction is slow">;
|
2016-12-07 03:35:20 +08:00
|
|
|
def FeatureSlowPMULLD : SubtargetFeature<"slow-pmulld", "IsPMULLDSlow", "true",
|
|
|
|
"PMULLD instruction is slow">;
|
2018-10-26 01:29:00 +08:00
|
|
|
def FeatureSlowPMADDWD : SubtargetFeature<"slow-pmaddwd", "IsPMADDWDSlow",
|
|
|
|
"true",
|
|
|
|
"PMADDWD is slower than PMULLD">;
|
2015-09-02 04:51:51 +08:00
|
|
|
// FIXME: This should not apply to CPUs that do not have SSE.
|
|
|
|
def FeatureSlowUAMem16 : SubtargetFeature<"slow-unaligned-mem-16",
|
|
|
|
"IsUAMem16Slow", "true",
|
|
|
|
"Slow unaligned 16-byte memory access">;
|
2014-11-22 01:40:04 +08:00
|
|
|
def FeatureSlowUAMem32 : SubtargetFeature<"slow-unaligned-mem-32",
|
2015-08-22 04:17:26 +08:00
|
|
|
"IsUAMem32Slow", "true",
|
|
|
|
"Slow unaligned 32-byte memory access">;
|
2009-05-27 05:04:35 +08:00
|
|
|
def FeatureSSE4A : SubtargetFeature<"sse4a", "HasSSE4A", "true",
|
2011-12-30 15:16:00 +08:00
|
|
|
"Support SSE 4a instructions",
|
|
|
|
[FeatureSSE3]>;
|
2006-10-06 17:17:41 +08:00
|
|
|
|
2012-01-09 17:02:13 +08:00
|
|
|
def FeatureAVX : SubtargetFeature<"avx", "X86SSELevel", "AVX",
|
|
|
|
"Enable AVX instructions",
|
|
|
|
[FeatureSSE42]>;
|
|
|
|
def FeatureAVX2 : SubtargetFeature<"avx2", "X86SSELevel", "AVX2",
|
2011-10-31 03:57:21 +08:00
|
|
|
"Enable AVX2 instructions",
|
|
|
|
[FeatureAVX]>;
|
2017-11-07 06:49:01 +08:00
|
|
|
def FeatureFMA : SubtargetFeature<"fma", "HasFMA", "true",
|
|
|
|
"Enable three-operand fused multiple-add",
|
|
|
|
[FeatureAVX]>;
|
2017-11-07 06:49:04 +08:00
|
|
|
def FeatureF16C : SubtargetFeature<"f16c", "HasF16C", "true",
|
|
|
|
"Support 16-bit floating point conversion instructions",
|
|
|
|
[FeatureAVX]>;
|
2013-08-21 11:57:57 +08:00
|
|
|
def FeatureAVX512 : SubtargetFeature<"avx512f", "X86SSELevel", "AVX512F",
|
2013-07-24 19:02:47 +08:00
|
|
|
"Enable AVX-512 instructions",
|
2017-11-07 06:49:04 +08:00
|
|
|
[FeatureAVX2, FeatureFMA, FeatureF16C]>;
|
2013-08-21 11:57:57 +08:00
|
|
|
def FeatureERI : SubtargetFeature<"avx512er", "HasERI", "true",
|
2013-07-28 16:28:38 +08:00
|
|
|
"Enable AVX-512 Exponential and Reciprocal Instructions",
|
|
|
|
[FeatureAVX512]>;
|
2013-08-21 11:57:57 +08:00
|
|
|
def FeatureCDI : SubtargetFeature<"avx512cd", "HasCDI", "true",
|
2013-07-28 16:28:38 +08:00
|
|
|
"Enable AVX-512 Conflict Detection Instructions",
|
|
|
|
[FeatureAVX512]>;
|
2017-05-25 21:45:23 +08:00
|
|
|
def FeatureVPOPCNTDQ : SubtargetFeature<"avx512vpopcntdq", "HasVPOPCNTDQ",
|
|
|
|
"true", "Enable AVX-512 Population Count Instructions",
|
|
|
|
[FeatureAVX512]>;
|
2013-08-21 11:57:57 +08:00
|
|
|
def FeaturePFI : SubtargetFeature<"avx512pf", "HasPFI", "true",
|
2013-07-28 16:28:38 +08:00
|
|
|
"Enable AVX-512 PreFetch Instructions",
|
|
|
|
[FeatureAVX512]>;
|
2017-12-22 10:30:30 +08:00
|
|
|
def FeaturePREFETCHWT1 : SubtargetFeature<"prefetchwt1", "HasPREFETCHWT1",
|
2016-01-24 18:41:28 +08:00
|
|
|
"true",
|
|
|
|
"Prefetch with Intent to Write and T1 Hint">;
|
2014-07-21 22:54:21 +08:00
|
|
|
def FeatureDQI : SubtargetFeature<"avx512dq", "HasDQI", "true",
|
|
|
|
"Enable AVX-512 Doubleword and Quadword Instructions",
|
|
|
|
[FeatureAVX512]>;
|
|
|
|
def FeatureBWI : SubtargetFeature<"avx512bw", "HasBWI", "true",
|
|
|
|
"Enable AVX-512 Byte and Word Instructions",
|
|
|
|
[FeatureAVX512]>;
|
|
|
|
def FeatureVLX : SubtargetFeature<"avx512vl", "HasVLX", "true",
|
|
|
|
"Enable AVX-512 Vector Length eXtensions",
|
|
|
|
[FeatureAVX512]>;
|
2016-01-17 21:42:12 +08:00
|
|
|
def FeatureVBMI : SubtargetFeature<"avx512vbmi", "HasVBMI", "true",
|
2016-11-09 12:50:48 +08:00
|
|
|
"Enable AVX-512 Vector Byte Manipulation Instructions",
|
|
|
|
[FeatureBWI]>;
|
2017-11-21 17:48:44 +08:00
|
|
|
def FeatureVBMI2 : SubtargetFeature<"avx512vbmi2", "HasVBMI2", "true",
|
|
|
|
"Enable AVX-512 further Vector Byte Manipulation Instructions",
|
|
|
|
[FeatureBWI]>;
|
2016-02-08 09:23:15 +08:00
|
|
|
def FeatureIFMA : SubtargetFeature<"avx512ifma", "HasIFMA", "true",
|
2016-01-24 18:41:28 +08:00
|
|
|
"Enable AVX-512 Integer Fused Multiple-Add",
|
|
|
|
[FeatureAVX512]>;
|
2015-12-15 21:35:29 +08:00
|
|
|
def FeaturePKU : SubtargetFeature<"pku", "HasPKU", "true",
|
|
|
|
"Enable protection keys">;
|
2017-11-21 18:04:28 +08:00
|
|
|
def FeatureVNNI : SubtargetFeature<"avx512vnni", "HasVNNI", "true",
|
|
|
|
"Enable AVX-512 Vector Neural Network Instructions",
|
|
|
|
[FeatureAVX512]>;
|
Enable AVX512_BF16 instructions, which are supported for BFLOAT16 in Cooper Lake
Summary:
1. Enable infrastructure of AVX512_BF16, which is supported for BFLOAT16 in Cooper Lake;
2. Enable VCVTNE2PS2BF16, VCVTNEPS2BF16 and DPBF16PS instructions, which are Vector Neural Network Instructions supporting BFLOAT16 inputs and conversion instructions from IEEE single precision.
VCVTNE2PS2BF16: Convert Two Packed Single Data to One Packed BF16 Data.
VCVTNEPS2BF16: Convert Packed Single Data to Packed BF16 Data.
VDPBF16PS: Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
For more details about BF16 isa, please refer to the latest ISE document: https://software.intel.com/en-us/download/intel-architecture-instruction-set-extensions-programming-reference
Author: LiuTianle
Reviewers: craig.topper, smaslov, LuoYuanke, wxiao3, annita.zhang, RKSimon, spatel
Reviewed By: craig.topper
Subscribers: kristina, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60550
llvm-svn: 360017
2019-05-06 16:22:37 +08:00
|
|
|
def FeatureBF16 : SubtargetFeature<"avx512bf16", "HasBF16", "true",
|
|
|
|
"Support bfloat16 floating point",
|
|
|
|
[FeatureBWI]>;
|
2017-11-21 18:32:42 +08:00
|
|
|
def FeatureBITALG : SubtargetFeature<"avx512bitalg", "HasBITALG", "true",
|
|
|
|
"Enable AVX-512 Bit Algorithms",
|
|
|
|
[FeatureBWI]>;
|
2019-05-31 10:50:41 +08:00
|
|
|
def FeatureVP2INTERSECT : SubtargetFeature<"avx512vp2intersect",
|
|
|
|
"HasVP2INTERSECT", "true",
|
|
|
|
"Enable AVX-512 vp2intersect",
|
|
|
|
[FeatureAVX512]>;
|
2012-05-31 22:34:17 +08:00
|
|
|
def FeaturePCLMUL : SubtargetFeature<"pclmul", "HasPCLMUL", "true",
|
|
|
|
"Enable packed carry-less multiplication instructions",
|
2012-05-01 13:28:32 +08:00
|
|
|
[FeatureSSE2]>;
|
2017-11-26 17:36:41 +08:00
|
|
|
def FeatureGFNI : SubtargetFeature<"gfni", "HasGFNI", "true",
|
|
|
|
"Enable Galois Field Arithmetic Instructions",
|
|
|
|
[FeatureSSE2]>;
|
2017-11-21 17:30:33 +08:00
|
|
|
def FeatureVPCLMULQDQ : SubtargetFeature<"vpclmulqdq", "HasVPCLMULQDQ", "true",
|
|
|
|
"Enable vpclmulqdq instructions",
|
|
|
|
[FeatureAVX, FeaturePCLMUL]>;
|
2009-06-27 06:46:54 +08:00
|
|
|
def FeatureFMA4 : SubtargetFeature<"fma4", "HasFMA4", "true",
|
2011-12-30 15:16:00 +08:00
|
|
|
"Enable four-operand fused multiple-add",
|
2012-05-01 14:54:48 +08:00
|
|
|
[FeatureAVX, FeatureSSE4A]>;
|
2011-12-30 15:16:00 +08:00
|
|
|
def FeatureXOP : SubtargetFeature<"xop", "HasXOP", "true",
|
2012-05-01 13:41:41 +08:00
|
|
|
"Enable XOP instructions",
|
2012-08-16 12:04:02 +08:00
|
|
|
[FeatureFMA4]>;
|
2015-02-04 01:13:04 +08:00
|
|
|
def FeatureSSEUnalignedMem : SubtargetFeature<"sse-unaligned-mem",
|
|
|
|
"HasSSEUnalignedMem", "true",
|
|
|
|
"Allow unaligned memory operands with SSE instructions">;
|
2010-04-03 05:54:27 +08:00
|
|
|
def FeatureAES : SubtargetFeature<"aes", "HasAES", "true",
|
2012-05-01 13:28:32 +08:00
|
|
|
"Enable AES instructions",
|
|
|
|
[FeatureSSE2]>;
|
2017-11-21 17:11:41 +08:00
|
|
|
def FeatureVAES : SubtargetFeature<"vaes", "HasVAES", "true",
|
|
|
|
"Promote selected AES instructions to AVX512/AVX registers",
|
|
|
|
[FeatureAVX, FeatureAES]>;
|
2013-09-25 02:21:52 +08:00
|
|
|
def FeatureTBM : SubtargetFeature<"tbm", "HasTBM", "true",
|
|
|
|
"Enable TBM instructions">;
|
2017-05-03 23:51:39 +08:00
|
|
|
def FeatureLWP : SubtargetFeature<"lwp", "HasLWP", "true",
|
|
|
|
"Enable LWP instructions">;
|
2011-10-04 01:28:23 +08:00
|
|
|
def FeatureMOVBE : SubtargetFeature<"movbe", "HasMOVBE", "true",
|
|
|
|
"Support MOVBE instruction">;
|
2013-08-24 04:21:34 +08:00
|
|
|
def FeatureRDRAND : SubtargetFeature<"rdrnd", "HasRDRAND", "true",
|
2011-10-04 01:28:23 +08:00
|
|
|
"Support RDRAND instruction">;
|
2011-10-31 03:57:21 +08:00
|
|
|
def FeatureFSGSBase : SubtargetFeature<"fsgsbase", "HasFSGSBase", "true",
|
|
|
|
"Support FS/GS Base instructions">;
|
2011-10-11 14:44:02 +08:00
|
|
|
def FeatureLZCNT : SubtargetFeature<"lzcnt", "HasLZCNT", "true",
|
|
|
|
"Support LZCNT instruction">;
|
2011-10-14 11:21:46 +08:00
|
|
|
def FeatureBMI : SubtargetFeature<"bmi", "HasBMI", "true",
|
|
|
|
"Support BMI instructions">;
|
2011-10-16 15:55:05 +08:00
|
|
|
def FeatureBMI2 : SubtargetFeature<"bmi2", "HasBMI2", "true",
|
|
|
|
"Support BMI2 instructions">;
|
2012-11-08 15:28:54 +08:00
|
|
|
def FeatureRTM : SubtargetFeature<"rtm", "HasRTM", "true",
|
|
|
|
"Support RTM instructions">;
|
2013-02-15 03:08:21 +08:00
|
|
|
def FeatureADX : SubtargetFeature<"adx", "HasADX", "true",
|
|
|
|
"Support ADX instructions">;
|
2013-09-12 23:51:31 +08:00
|
|
|
def FeatureSHA : SubtargetFeature<"sha", "HasSHA", "true",
|
|
|
|
"Enable SHA instructions",
|
|
|
|
[FeatureSSE2]>;
|
2017-11-26 21:02:45 +08:00
|
|
|
def FeatureSHSTK : SubtargetFeature<"shstk", "HasSHSTK", "true",
|
|
|
|
"Support CET Shadow-Stack instructions">;
|
2013-03-27 01:47:11 +08:00
|
|
|
def FeaturePRFCHW : SubtargetFeature<"prfchw", "HasPRFCHW", "true",
|
|
|
|
"Support PRFCHW instructions">;
|
2013-03-29 07:41:26 +08:00
|
|
|
def FeatureRDSEED : SubtargetFeature<"rdseed", "HasRDSEED", "true",
|
|
|
|
"Support RDSEED instruction">;
|
2020-07-23 07:19:38 +08:00
|
|
|
def FeatureLAHFSAHF : SubtargetFeature<"sahf", "HasLAHFSAHF64", "true",
|
|
|
|
"Support LAHF and SAHF instructions in 64-bit mode">;
|
2016-05-18 19:59:12 +08:00
|
|
|
def FeatureMWAITX : SubtargetFeature<"mwaitx", "HasMWAITX", "true",
|
|
|
|
"Enable MONITORX/MWAITX timer functionality">;
|
2017-02-09 12:27:34 +08:00
|
|
|
def FeatureCLZERO : SubtargetFeature<"clzero", "HasCLZERO", "true",
|
|
|
|
"Enable Cache Line Zero">;
|
2018-04-13 15:35:08 +08:00
|
|
|
def FeatureCLDEMOTE : SubtargetFeature<"cldemote", "HasCLDEMOTE", "true",
|
|
|
|
"Enable Cache Demote">;
|
2018-05-10 15:26:05 +08:00
|
|
|
def FeaturePTWRITE : SubtargetFeature<"ptwrite", "HasPTWRITE", "true",
|
|
|
|
"Support ptwrite instruction">;
|
2020-07-02 08:36:45 +08:00
|
|
|
def FeatureAMXTILE : SubtargetFeature<"amx-tile", "HasAMXTILE", "true",
|
|
|
|
"Support AMX-TILE instructions">;
|
|
|
|
def FeatureAMXINT8 : SubtargetFeature<"amx-int8", "HasAMXINT8", "true",
|
|
|
|
"Support AMX-INT8 instructions",
|
|
|
|
[FeatureAMXTILE]>;
|
|
|
|
def FeatureAMXBF16 : SubtargetFeature<"amx-bf16", "HasAMXBF16", "true",
|
|
|
|
"Support AMX-BF16 instructions",
|
|
|
|
[FeatureAMXTILE]>;
|
2015-10-12 23:24:01 +08:00
|
|
|
def FeatureLEAForSP : SubtargetFeature<"lea-sp", "UseLeaForSP", "true",
|
2012-02-08 06:50:41 +08:00
|
|
|
"Use LEA for adjusting the stack pointer">;
|
2014-11-21 19:19:34 +08:00
|
|
|
def FeatureSlowDivide32 : SubtargetFeature<"idivl-to-divb",
|
|
|
|
"HasSlowDivide32", "true",
|
|
|
|
"Use 8-bit divide for positive values less than 256">;
|
2017-01-13 03:34:15 +08:00
|
|
|
def FeatureSlowDivide64 : SubtargetFeature<"idivq-to-divl",
|
2014-11-21 19:19:34 +08:00
|
|
|
"HasSlowDivide64", "true",
|
2017-01-13 03:34:15 +08:00
|
|
|
"Use 32-bit divide for positive values less than 2^32">;
|
2013-01-09 02:27:24 +08:00
|
|
|
def FeaturePadShortFunctions : SubtargetFeature<"pad-short-functions",
|
|
|
|
"PadShortFunctions", "true",
|
|
|
|
"Pad short functions">;
|
2018-05-25 14:32:05 +08:00
|
|
|
def FeatureINVPCID : SubtargetFeature<"invpcid", "HasINVPCID", "true",
|
|
|
|
"Invalidate Process-Context Identifier">;
|
2016-01-24 18:41:28 +08:00
|
|
|
def FeatureSGX : SubtargetFeature<"sgx", "HasSGX", "true",
|
|
|
|
"Enable Software Guard Extensions">;
|
|
|
|
def FeatureCLFLUSHOPT : SubtargetFeature<"clflushopt", "HasCLFLUSHOPT", "true",
|
|
|
|
"Flush A Cache Line Optimized">;
|
|
|
|
def FeatureCLWB : SubtargetFeature<"clwb", "HasCLWB", "true",
|
|
|
|
"Cache Line Write Back">;
|
2018-04-12 04:01:57 +08:00
|
|
|
def FeatureWBNOINVD : SubtargetFeature<"wbnoinvd", "HasWBNOINVD", "true",
|
|
|
|
"Write Back No Invalidate">;
|
2018-01-19 07:52:31 +08:00
|
|
|
def FeatureRDPID : SubtargetFeature<"rdpid", "HasRDPID", "true",
|
|
|
|
"Support RDPID instructions">;
|
2018-04-21 02:42:47 +08:00
|
|
|
def FeatureWAITPKG : SubtargetFeature<"waitpkg", "HasWAITPKG", "true",
|
|
|
|
"Wait and pause enhancements">;
|
2019-05-30 11:59:16 +08:00
|
|
|
def FeatureENQCMD : SubtargetFeature<"enqcmd", "HasENQCMD", "true",
|
|
|
|
"Has ENQCMD instructions">;
|
2020-09-30 18:01:15 +08:00
|
|
|
def FeatureKL : SubtargetFeature<"kl", "HasKL", "true",
|
|
|
|
"Support Key Locker kl Instructions",
|
|
|
|
[FeatureSSE2]>;
|
|
|
|
def FeatureWIDEKL : SubtargetFeature<"widekl", "HasWIDEKL", "true",
|
|
|
|
"Support Key Locker wide Instructions",
|
|
|
|
[FeatureKL]>;
|
2020-10-13 08:42:46 +08:00
|
|
|
def FeatureHRESET : SubtargetFeature<"hreset", "HasHRESET", "true",
|
|
|
|
"Has hreset instruction">;
|
2020-04-02 16:15:34 +08:00
|
|
|
def FeatureSERIALIZE : SubtargetFeature<"serialize", "HasSERIALIZE", "true",
|
|
|
|
"Has serialize instruction">;
|
2020-04-09 13:15:42 +08:00
|
|
|
def FeatureTSXLDTRK : SubtargetFeature<"tsxldtrk", "HasTSXLDTRK", "true",
|
|
|
|
"Support TSXLDTRK instructions">;
|
2017-08-29 13:14:27 +08:00
|
|
|
// On some processors, instructions that implicitly take two memory operands are
|
|
|
|
// slow. In practice, this means that CALL, PUSH, and POP with memory operands
|
|
|
|
// should be avoided in favor of a MOV + register CALL/PUSH/POP.
|
|
|
|
def FeatureSlowTwoMemOps : SubtargetFeature<"slow-two-mem-ops",
|
|
|
|
"SlowTwoMemOps", "true",
|
|
|
|
"Two memory operand instructions are slow">;
|
2013-04-26 04:29:37 +08:00
|
|
|
def FeatureLEAUsesAG : SubtargetFeature<"lea-uses-ag", "LEAUsesAG", "true",
|
|
|
|
"LEA instruction needs inputs at AG stage">;
|
2014-05-20 16:55:50 +08:00
|
|
|
def FeatureSlowLEA : SubtargetFeature<"slow-lea", "SlowLEA", "true",
|
|
|
|
"LEA instruction with certain arguments is slow">;
|
2017-05-18 16:11:50 +08:00
|
|
|
def FeatureSlow3OpsLEA : SubtargetFeature<"slow-3ops-lea", "Slow3OpsLEA", "true",
|
|
|
|
"LEA instruction with 3 ops or certain registers is slow">;
|
2014-06-09 19:40:41 +08:00
|
|
|
def FeatureSlowIncDec : SubtargetFeature<"slow-incdec", "SlowIncDec", "true",
|
|
|
|
"INC and DEC instructions are slower than ADD and SUB">;
|
2015-05-12 09:26:05 +08:00
|
|
|
def FeatureSoftFloat
|
|
|
|
: SubtargetFeature<"soft-float", "UseSoftFloat", "true",
|
2019-03-06 10:36:48 +08:00
|
|
|
"Use software floating point features">;
|
2018-01-22 18:07:01 +08:00
|
|
|
def FeaturePOPCNTFalseDeps : SubtargetFeature<"false-deps-popcnt",
|
|
|
|
"HasPOPCNTFalseDeps", "true",
|
|
|
|
"POPCNT has a false dependency on dest register">;
|
|
|
|
def FeatureLZCNTFalseDeps : SubtargetFeature<"false-deps-lzcnt-tzcnt",
|
|
|
|
"HasLZCNTFalseDeps", "true",
|
|
|
|
"LZCNT/TZCNT have a false dependency on dest register">;
|
2018-05-08 14:47:36 +08:00
|
|
|
def FeaturePCONFIG : SubtargetFeature<"pconfig", "HasPCONFIG", "true",
|
|
|
|
"platform configuration instruction">;
|
2017-12-19 21:16:43 +08:00
|
|
|
// On recent X86 (port bound) processors, its preferable to combine to a single shuffle
|
|
|
|
// using a variable mask over multiple fixed shuffles.
|
|
|
|
def FeatureFastVariableShuffle
|
|
|
|
: SubtargetFeature<"fast-variable-shuffle",
|
|
|
|
"HasFastVariableShuffle",
|
|
|
|
"true", "Shuffles with variable masks are fast">;
|
2019-11-05 02:20:00 +08:00
|
|
|
// On some X86 processors, a vzeroupper instruction should be inserted after
|
|
|
|
// using ymm/zmm registers before executing code that may use SSE instructions.
|
|
|
|
def FeatureInsertVZEROUPPER
|
|
|
|
: SubtargetFeature<"vzeroupper",
|
|
|
|
"InsertVZEROUPPER",
|
|
|
|
"true", "Should insert vzeroupper instructions">;
|
2016-08-04 20:47:28 +08:00
|
|
|
// FeatureFastScalarFSQRT should be enabled if scalar FSQRT has shorter latency
|
|
|
|
// than the corresponding NR code. FeatureFastVectorFSQRT should be enabled if
|
|
|
|
// vector FSQRT has higher throughput than the corresponding NR code.
|
|
|
|
// The idea is that throughput bound code is likely to be vectorized, so for
|
|
|
|
// vectorized code we should care about the throughput of SQRT operations.
|
|
|
|
// But if the code is scalar that probably means that the code has some kind of
|
|
|
|
// dependency and we should care more about reducing the latency.
|
|
|
|
def FeatureFastScalarFSQRT
|
|
|
|
: SubtargetFeature<"fast-scalar-fsqrt", "HasFastScalarFSQRT",
|
|
|
|
"true", "Scalar SQRT is fast (disable Newton-Raphson)">;
|
|
|
|
def FeatureFastVectorFSQRT
|
|
|
|
: SubtargetFeature<"fast-vector-fsqrt", "HasFastVectorFSQRT",
|
|
|
|
"true", "Vector SQRT is fast (disable Newton-Raphson)">;
|
2016-10-15 00:41:38 +08:00
|
|
|
// If lzcnt has equivalent latency/throughput to most simple integer ops, it can
|
|
|
|
// be used to replace test/set sequences.
|
|
|
|
def FeatureFastLZCNT
|
|
|
|
: SubtargetFeature<
|
|
|
|
"fast-lzcnt", "HasFastLZCNT", "true",
|
|
|
|
"LZCNT instructions are as fast as most simple integer ops">;
|
2020-03-12 21:06:12 +08:00
|
|
|
// If the target can efficiently decode NOPs upto 7-bytes in length.
|
|
|
|
def FeatureFast7ByteNOP
|
|
|
|
: SubtargetFeature<
|
|
|
|
"fast-7bytenop", "HasFast7ByteNOP", "true",
|
|
|
|
"Target can quickly decode up to 7 byte NOPs">;
|
2018-01-30 05:24:31 +08:00
|
|
|
// If the target can efficiently decode NOPs upto 11-bytes in length.
|
|
|
|
def FeatureFast11ByteNOP
|
|
|
|
: SubtargetFeature<
|
|
|
|
"fast-11bytenop", "HasFast11ByteNOP", "true",
|
|
|
|
"Target can quickly decode up to 11 byte NOPs">;
|
|
|
|
// If the target can efficiently decode NOPs upto 15-bytes in length.
|
|
|
|
def FeatureFast15ByteNOP
|
|
|
|
: SubtargetFeature<
|
|
|
|
"fast-15bytenop", "HasFast15ByteNOP", "true",
|
|
|
|
"Target can quickly decode up to 15 byte NOPs">;
|
2017-02-21 14:39:13 +08:00
|
|
|
// Sandy Bridge and newer processors can use SHLD with the same source on both
|
|
|
|
// inputs to implement rotate to avoid the partial flag update of the normal
|
|
|
|
// rotate instructions.
|
|
|
|
def FeatureFastSHLDRotate
|
|
|
|
: SubtargetFeature<
|
|
|
|
"fast-shld-rotate", "HasFastSHLDRotate", "true",
|
|
|
|
"SHLD can be used as a faster rotate">;
|
|
|
|
|
2017-04-21 17:20:50 +08:00
|
|
|
// Ivy Bridge and newer processors have enhanced REP MOVSB and STOSB (aka
|
|
|
|
// "string operations"). See "REP String Enhancement" in the Intel Software
|
2017-04-21 17:21:05 +08:00
|
|
|
// Development Manual. This feature essentially means that REP MOVSB will copy
|
2017-04-21 17:20:50 +08:00
|
|
|
// using the largest available size instead of copying bytes one by one, making
|
|
|
|
// it at least as fast as REPMOVS{W,D,Q}.
|
|
|
|
def FeatureERMSB
|
2017-04-21 17:20:39 +08:00
|
|
|
: SubtargetFeature<
|
2017-04-21 17:20:50 +08:00
|
|
|
"ermsb", "HasERMSB", "true",
|
2017-04-21 17:20:39 +08:00
|
|
|
"REP MOVS/STOS are fast">;
|
|
|
|
|
2020-08-15 03:20:08 +08:00
|
|
|
// Icelake and newer processors have Fast Short REP MOV.
|
|
|
|
def FeatureFSRM
|
|
|
|
: SubtargetFeature<
|
|
|
|
"fsrm", "HasFSRM", "true",
|
|
|
|
"REP MOVSB of short lengths is faster">;
|
|
|
|
|
2019-03-28 22:12:46 +08:00
|
|
|
// Bulldozer and newer processors can merge CMP/TEST (but not other
|
|
|
|
// instructions) with conditional branches.
|
|
|
|
def FeatureBranchFusion
|
|
|
|
: SubtargetFeature<"branchfusion", "HasBranchFusion", "true",
|
|
|
|
"CMP/TEST can be fused with conditional branches">;
|
|
|
|
|
2017-08-30 12:34:48 +08:00
|
|
|
// Sandy Bridge and newer processors have many instructions that can be
|
|
|
|
// fused with conditional branches and pass through the CPU as a single
|
|
|
|
// operation.
|
|
|
|
def FeatureMacroFusion
|
|
|
|
: SubtargetFeature<"macrofusion", "HasMacroFusion", "true",
|
|
|
|
"Various instructions can be fused with conditional branches">;
|
|
|
|
|
2017-11-26 02:09:37 +08:00
|
|
|
// Gather is available since Haswell (AVX2 set). So technically, we can
|
|
|
|
// generate Gathers on all AVX2 processors. But the overhead on HSW is high.
|
|
|
|
// Skylake Client processor has faster Gathers than HSW and performance is
|
|
|
|
// similar to Skylake Server (AVX-512).
|
|
|
|
def FeatureHasFastGather
|
|
|
|
: SubtargetFeature<"fast-gather", "HasFastGather", "true",
|
2019-03-06 10:36:48 +08:00
|
|
|
"Indicates if gather is reasonably fast">;
|
2017-11-26 02:09:37 +08:00
|
|
|
|
2019-09-08 03:54:22 +08:00
|
|
|
def FeaturePrefer128Bit
|
|
|
|
: SubtargetFeature<"prefer-128-bit", "Prefer128Bit", "true",
|
|
|
|
"Prefer 128-bit AVX instructions">;
|
|
|
|
|
2018-01-20 08:26:08 +08:00
|
|
|
def FeaturePrefer256Bit
|
|
|
|
: SubtargetFeature<"prefer-256-bit", "Prefer256Bit", "true",
|
|
|
|
"Prefer 256-bit AVX instructions">;
|
|
|
|
|
2019-10-17 17:38:15 +08:00
|
|
|
def FeaturePreferMaskRegisters
|
|
|
|
: SubtargetFeature<"prefer-mask-registers", "PreferMaskRegisters", "true",
|
|
|
|
"Prefer AVX512 mask registers over PTEST/MOVMSK">;
|
|
|
|
|
2018-08-23 14:06:38 +08:00
|
|
|
// Lower indirect calls using a special construct called a `retpoline` to
|
|
|
|
// mitigate potential Spectre v2 attacks against them.
|
|
|
|
def FeatureRetpolineIndirectCalls
|
|
|
|
: SubtargetFeature<
|
|
|
|
"retpoline-indirect-calls", "UseRetpolineIndirectCalls", "true",
|
2019-03-06 10:36:48 +08:00
|
|
|
"Remove speculation of indirect calls from the generated code">;
|
2018-08-23 14:06:38 +08:00
|
|
|
|
|
|
|
// Lower indirect branches and switches either using conditional branch trees
|
|
|
|
// or using a special construct called a `retpoline` to mitigate potential
|
|
|
|
// Spectre v2 attacks against them.
|
|
|
|
def FeatureRetpolineIndirectBranches
|
|
|
|
: SubtargetFeature<
|
|
|
|
"retpoline-indirect-branches", "UseRetpolineIndirectBranches", "true",
|
2019-03-06 10:36:48 +08:00
|
|
|
"Remove speculation of indirect branches from the generated code">;
|
2018-08-23 14:06:38 +08:00
|
|
|
|
|
|
|
// Deprecated umbrella feature for enabling both `retpoline-indirect-calls` and
|
|
|
|
// `retpoline-indirect-branches` above.
|
Introduce the "retpoline" x86 mitigation technique for variant #2 of the speculative execution vulnerabilities disclosed today, specifically identified by CVE-2017-5715, "Branch Target Injection", and is one of the two halves to Spectre..
Summary:
First, we need to explain the core of the vulnerability. Note that this
is a very incomplete description, please see the Project Zero blog post
for details:
https://googleprojectzero.blogspot.com/2018/01/reading-privileged-memory-with-side.html
The basis for branch target injection is to direct speculative execution
of the processor to some "gadget" of executable code by poisoning the
prediction of indirect branches with the address of that gadget. The
gadget in turn contains an operation that provides a side channel for
reading data. Most commonly, this will look like a load of secret data
followed by a branch on the loaded value and then a load of some
predictable cache line. The attacker then uses timing of the processors
cache to determine which direction the branch took *in the speculative
execution*, and in turn what one bit of the loaded value was. Due to the
nature of these timing side channels and the branch predictor on Intel
processors, this allows an attacker to leak data only accessible to
a privileged domain (like the kernel) back into an unprivileged domain.
The goal is simple: avoid generating code which contains an indirect
branch that could have its prediction poisoned by an attacker. In many
cases, the compiler can simply use directed conditional branches and
a small search tree. LLVM already has support for lowering switches in
this way and the first step of this patch is to disable jump-table
lowering of switches and introduce a pass to rewrite explicit indirectbr
sequences into a switch over integers.
However, there is no fully general alternative to indirect calls. We
introduce a new construct we call a "retpoline" to implement indirect
calls in a non-speculatable way. It can be thought of loosely as
a trampoline for indirect calls which uses the RET instruction on x86.
Further, we arrange for a specific call->ret sequence which ensures the
processor predicts the return to go to a controlled, known location. The
retpoline then "smashes" the return address pushed onto the stack by the
call with the desired target of the original indirect call. The result
is a predicted return to the next instruction after a call (which can be
used to trap speculative execution within an infinite loop) and an
actual indirect branch to an arbitrary address.
On 64-bit x86 ABIs, this is especially easily done in the compiler by
using a guaranteed scratch register to pass the target into this device.
For 32-bit ABIs there isn't a guaranteed scratch register and so several
different retpoline variants are introduced to use a scratch register if
one is available in the calling convention and to otherwise use direct
stack push/pop sequences to pass the target address.
This "retpoline" mitigation is fully described in the following blog
post: https://support.google.com/faqs/answer/7625886
We also support a target feature that disables emission of the retpoline
thunk by the compiler to allow for custom thunks if users want them.
These are particularly useful in environments like kernels that
routinely do hot-patching on boot and want to hot-patch their thunk to
different code sequences. They can write this custom thunk and use
`-mretpoline-external-thunk` *in addition* to `-mretpoline`. In this
case, on x86-64 thu thunk names must be:
```
__llvm_external_retpoline_r11
```
or on 32-bit:
```
__llvm_external_retpoline_eax
__llvm_external_retpoline_ecx
__llvm_external_retpoline_edx
__llvm_external_retpoline_push
```
And the target of the retpoline is passed in the named register, or in
the case of the `push` suffix on the top of the stack via a `pushl`
instruction.
There is one other important source of indirect branches in x86 ELF
binaries: the PLT. These patches also include support for LLD to
generate PLT entries that perform a retpoline-style indirection.
The only other indirect branches remaining that we are aware of are from
precompiled runtimes (such as crt0.o and similar). The ones we have
found are not really attackable, and so we have not focused on them
here, but eventually these runtimes should also be replicated for
retpoline-ed configurations for completeness.
For kernels or other freestanding or fully static executables, the
compiler switch `-mretpoline` is sufficient to fully mitigate this
particular attack. For dynamic executables, you must compile *all*
libraries with `-mretpoline` and additionally link the dynamic
executable and all shared libraries with LLD and pass `-z retpolineplt`
(or use similar functionality from some other linker). We strongly
recommend also using `-z now` as non-lazy binding allows the
retpoline-mitigated PLT to be substantially smaller.
When manually apply similar transformations to `-mretpoline` to the
Linux kernel we observed very small performance hits to applications
running typical workloads, and relatively minor hits (approximately 2%)
even for extremely syscall-heavy applications. This is largely due to
the small number of indirect branches that occur in performance
sensitive paths of the kernel.
When using these patches on statically linked applications, especially
C++ applications, you should expect to see a much more dramatic
performance hit. For microbenchmarks that are switch, indirect-, or
virtual-call heavy we have seen overheads ranging from 10% to 50%.
However, real-world workloads exhibit substantially lower performance
impact. Notably, techniques such as PGO and ThinLTO dramatically reduce
the impact of hot indirect calls (by speculatively promoting them to
direct calls) and allow optimized search trees to be used to lower
switches. If you need to deploy these techniques in C++ applications, we
*strongly* recommend that you ensure all hot call targets are statically
linked (avoiding PLT indirection) and use both PGO and ThinLTO. Well
tuned servers using all of these techniques saw 5% - 10% overhead from
the use of retpoline.
We will add detailed documentation covering these components in
subsequent patches, but wanted to make the core functionality available
as soon as possible. Happy for more code review, but we'd really like to
get these patches landed and backported ASAP for obvious reasons. We're
planning to backport this to both 6.0 and 5.0 release streams and get
a 5.0 release with just this cherry picked ASAP for distros and vendors.
This patch is the work of a number of people over the past month: Eric, Reid,
Rui, and myself. I'm mailing it out as a single commit due to the time
sensitive nature of landing this and the need to backport it. Huge thanks to
everyone who helped out here, and everyone at Intel who helped out in
discussions about how to craft this. Also, credit goes to Paul Turner (at
Google, but not an LLVM contributor) for much of the underlying retpoline
design.
Reviewers: echristo, rnk, ruiu, craig.topper, DavidKreitzer
Subscribers: sanjoy, emaste, mcrosier, mgorny, mehdi_amini, hiraditya, llvm-commits
Differential Revision: https://reviews.llvm.org/D41723
llvm-svn: 323155
2018-01-23 06:05:25 +08:00
|
|
|
def FeatureRetpoline
|
2018-08-23 14:06:38 +08:00
|
|
|
: SubtargetFeature<"retpoline", "DeprecatedUseRetpoline", "true",
|
Introduce the "retpoline" x86 mitigation technique for variant #2 of the speculative execution vulnerabilities disclosed today, specifically identified by CVE-2017-5715, "Branch Target Injection", and is one of the two halves to Spectre..
Summary:
First, we need to explain the core of the vulnerability. Note that this
is a very incomplete description, please see the Project Zero blog post
for details:
https://googleprojectzero.blogspot.com/2018/01/reading-privileged-memory-with-side.html
The basis for branch target injection is to direct speculative execution
of the processor to some "gadget" of executable code by poisoning the
prediction of indirect branches with the address of that gadget. The
gadget in turn contains an operation that provides a side channel for
reading data. Most commonly, this will look like a load of secret data
followed by a branch on the loaded value and then a load of some
predictable cache line. The attacker then uses timing of the processors
cache to determine which direction the branch took *in the speculative
execution*, and in turn what one bit of the loaded value was. Due to the
nature of these timing side channels and the branch predictor on Intel
processors, this allows an attacker to leak data only accessible to
a privileged domain (like the kernel) back into an unprivileged domain.
The goal is simple: avoid generating code which contains an indirect
branch that could have its prediction poisoned by an attacker. In many
cases, the compiler can simply use directed conditional branches and
a small search tree. LLVM already has support for lowering switches in
this way and the first step of this patch is to disable jump-table
lowering of switches and introduce a pass to rewrite explicit indirectbr
sequences into a switch over integers.
However, there is no fully general alternative to indirect calls. We
introduce a new construct we call a "retpoline" to implement indirect
calls in a non-speculatable way. It can be thought of loosely as
a trampoline for indirect calls which uses the RET instruction on x86.
Further, we arrange for a specific call->ret sequence which ensures the
processor predicts the return to go to a controlled, known location. The
retpoline then "smashes" the return address pushed onto the stack by the
call with the desired target of the original indirect call. The result
is a predicted return to the next instruction after a call (which can be
used to trap speculative execution within an infinite loop) and an
actual indirect branch to an arbitrary address.
On 64-bit x86 ABIs, this is especially easily done in the compiler by
using a guaranteed scratch register to pass the target into this device.
For 32-bit ABIs there isn't a guaranteed scratch register and so several
different retpoline variants are introduced to use a scratch register if
one is available in the calling convention and to otherwise use direct
stack push/pop sequences to pass the target address.
This "retpoline" mitigation is fully described in the following blog
post: https://support.google.com/faqs/answer/7625886
We also support a target feature that disables emission of the retpoline
thunk by the compiler to allow for custom thunks if users want them.
These are particularly useful in environments like kernels that
routinely do hot-patching on boot and want to hot-patch their thunk to
different code sequences. They can write this custom thunk and use
`-mretpoline-external-thunk` *in addition* to `-mretpoline`. In this
case, on x86-64 thu thunk names must be:
```
__llvm_external_retpoline_r11
```
or on 32-bit:
```
__llvm_external_retpoline_eax
__llvm_external_retpoline_ecx
__llvm_external_retpoline_edx
__llvm_external_retpoline_push
```
And the target of the retpoline is passed in the named register, or in
the case of the `push` suffix on the top of the stack via a `pushl`
instruction.
There is one other important source of indirect branches in x86 ELF
binaries: the PLT. These patches also include support for LLD to
generate PLT entries that perform a retpoline-style indirection.
The only other indirect branches remaining that we are aware of are from
precompiled runtimes (such as crt0.o and similar). The ones we have
found are not really attackable, and so we have not focused on them
here, but eventually these runtimes should also be replicated for
retpoline-ed configurations for completeness.
For kernels or other freestanding or fully static executables, the
compiler switch `-mretpoline` is sufficient to fully mitigate this
particular attack. For dynamic executables, you must compile *all*
libraries with `-mretpoline` and additionally link the dynamic
executable and all shared libraries with LLD and pass `-z retpolineplt`
(or use similar functionality from some other linker). We strongly
recommend also using `-z now` as non-lazy binding allows the
retpoline-mitigated PLT to be substantially smaller.
When manually apply similar transformations to `-mretpoline` to the
Linux kernel we observed very small performance hits to applications
running typical workloads, and relatively minor hits (approximately 2%)
even for extremely syscall-heavy applications. This is largely due to
the small number of indirect branches that occur in performance
sensitive paths of the kernel.
When using these patches on statically linked applications, especially
C++ applications, you should expect to see a much more dramatic
performance hit. For microbenchmarks that are switch, indirect-, or
virtual-call heavy we have seen overheads ranging from 10% to 50%.
However, real-world workloads exhibit substantially lower performance
impact. Notably, techniques such as PGO and ThinLTO dramatically reduce
the impact of hot indirect calls (by speculatively promoting them to
direct calls) and allow optimized search trees to be used to lower
switches. If you need to deploy these techniques in C++ applications, we
*strongly* recommend that you ensure all hot call targets are statically
linked (avoiding PLT indirection) and use both PGO and ThinLTO. Well
tuned servers using all of these techniques saw 5% - 10% overhead from
the use of retpoline.
We will add detailed documentation covering these components in
subsequent patches, but wanted to make the core functionality available
as soon as possible. Happy for more code review, but we'd really like to
get these patches landed and backported ASAP for obvious reasons. We're
planning to backport this to both 6.0 and 5.0 release streams and get
a 5.0 release with just this cherry picked ASAP for distros and vendors.
This patch is the work of a number of people over the past month: Eric, Reid,
Rui, and myself. I'm mailing it out as a single commit due to the time
sensitive nature of landing this and the need to backport it. Huge thanks to
everyone who helped out here, and everyone at Intel who helped out in
discussions about how to craft this. Also, credit goes to Paul Turner (at
Google, but not an LLVM contributor) for much of the underlying retpoline
design.
Reviewers: echristo, rnk, ruiu, craig.topper, DavidKreitzer
Subscribers: sanjoy, emaste, mcrosier, mgorny, mehdi_amini, hiraditya, llvm-commits
Differential Revision: https://reviews.llvm.org/D41723
llvm-svn: 323155
2018-01-23 06:05:25 +08:00
|
|
|
"Remove speculation of indirect branches from the "
|
|
|
|
"generated code, either by avoiding them entirely or "
|
2019-03-06 10:36:48 +08:00
|
|
|
"lowering them with a speculation blocking construct",
|
2018-08-23 14:06:38 +08:00
|
|
|
[FeatureRetpolineIndirectCalls,
|
|
|
|
FeatureRetpolineIndirectBranches]>;
|
Introduce the "retpoline" x86 mitigation technique for variant #2 of the speculative execution vulnerabilities disclosed today, specifically identified by CVE-2017-5715, "Branch Target Injection", and is one of the two halves to Spectre..
Summary:
First, we need to explain the core of the vulnerability. Note that this
is a very incomplete description, please see the Project Zero blog post
for details:
https://googleprojectzero.blogspot.com/2018/01/reading-privileged-memory-with-side.html
The basis for branch target injection is to direct speculative execution
of the processor to some "gadget" of executable code by poisoning the
prediction of indirect branches with the address of that gadget. The
gadget in turn contains an operation that provides a side channel for
reading data. Most commonly, this will look like a load of secret data
followed by a branch on the loaded value and then a load of some
predictable cache line. The attacker then uses timing of the processors
cache to determine which direction the branch took *in the speculative
execution*, and in turn what one bit of the loaded value was. Due to the
nature of these timing side channels and the branch predictor on Intel
processors, this allows an attacker to leak data only accessible to
a privileged domain (like the kernel) back into an unprivileged domain.
The goal is simple: avoid generating code which contains an indirect
branch that could have its prediction poisoned by an attacker. In many
cases, the compiler can simply use directed conditional branches and
a small search tree. LLVM already has support for lowering switches in
this way and the first step of this patch is to disable jump-table
lowering of switches and introduce a pass to rewrite explicit indirectbr
sequences into a switch over integers.
However, there is no fully general alternative to indirect calls. We
introduce a new construct we call a "retpoline" to implement indirect
calls in a non-speculatable way. It can be thought of loosely as
a trampoline for indirect calls which uses the RET instruction on x86.
Further, we arrange for a specific call->ret sequence which ensures the
processor predicts the return to go to a controlled, known location. The
retpoline then "smashes" the return address pushed onto the stack by the
call with the desired target of the original indirect call. The result
is a predicted return to the next instruction after a call (which can be
used to trap speculative execution within an infinite loop) and an
actual indirect branch to an arbitrary address.
On 64-bit x86 ABIs, this is especially easily done in the compiler by
using a guaranteed scratch register to pass the target into this device.
For 32-bit ABIs there isn't a guaranteed scratch register and so several
different retpoline variants are introduced to use a scratch register if
one is available in the calling convention and to otherwise use direct
stack push/pop sequences to pass the target address.
This "retpoline" mitigation is fully described in the following blog
post: https://support.google.com/faqs/answer/7625886
We also support a target feature that disables emission of the retpoline
thunk by the compiler to allow for custom thunks if users want them.
These are particularly useful in environments like kernels that
routinely do hot-patching on boot and want to hot-patch their thunk to
different code sequences. They can write this custom thunk and use
`-mretpoline-external-thunk` *in addition* to `-mretpoline`. In this
case, on x86-64 thu thunk names must be:
```
__llvm_external_retpoline_r11
```
or on 32-bit:
```
__llvm_external_retpoline_eax
__llvm_external_retpoline_ecx
__llvm_external_retpoline_edx
__llvm_external_retpoline_push
```
And the target of the retpoline is passed in the named register, or in
the case of the `push` suffix on the top of the stack via a `pushl`
instruction.
There is one other important source of indirect branches in x86 ELF
binaries: the PLT. These patches also include support for LLD to
generate PLT entries that perform a retpoline-style indirection.
The only other indirect branches remaining that we are aware of are from
precompiled runtimes (such as crt0.o and similar). The ones we have
found are not really attackable, and so we have not focused on them
here, but eventually these runtimes should also be replicated for
retpoline-ed configurations for completeness.
For kernels or other freestanding or fully static executables, the
compiler switch `-mretpoline` is sufficient to fully mitigate this
particular attack. For dynamic executables, you must compile *all*
libraries with `-mretpoline` and additionally link the dynamic
executable and all shared libraries with LLD and pass `-z retpolineplt`
(or use similar functionality from some other linker). We strongly
recommend also using `-z now` as non-lazy binding allows the
retpoline-mitigated PLT to be substantially smaller.
When manually apply similar transformations to `-mretpoline` to the
Linux kernel we observed very small performance hits to applications
running typical workloads, and relatively minor hits (approximately 2%)
even for extremely syscall-heavy applications. This is largely due to
the small number of indirect branches that occur in performance
sensitive paths of the kernel.
When using these patches on statically linked applications, especially
C++ applications, you should expect to see a much more dramatic
performance hit. For microbenchmarks that are switch, indirect-, or
virtual-call heavy we have seen overheads ranging from 10% to 50%.
However, real-world workloads exhibit substantially lower performance
impact. Notably, techniques such as PGO and ThinLTO dramatically reduce
the impact of hot indirect calls (by speculatively promoting them to
direct calls) and allow optimized search trees to be used to lower
switches. If you need to deploy these techniques in C++ applications, we
*strongly* recommend that you ensure all hot call targets are statically
linked (avoiding PLT indirection) and use both PGO and ThinLTO. Well
tuned servers using all of these techniques saw 5% - 10% overhead from
the use of retpoline.
We will add detailed documentation covering these components in
subsequent patches, but wanted to make the core functionality available
as soon as possible. Happy for more code review, but we'd really like to
get these patches landed and backported ASAP for obvious reasons. We're
planning to backport this to both 6.0 and 5.0 release streams and get
a 5.0 release with just this cherry picked ASAP for distros and vendors.
This patch is the work of a number of people over the past month: Eric, Reid,
Rui, and myself. I'm mailing it out as a single commit due to the time
sensitive nature of landing this and the need to backport it. Huge thanks to
everyone who helped out here, and everyone at Intel who helped out in
discussions about how to craft this. Also, credit goes to Paul Turner (at
Google, but not an LLVM contributor) for much of the underlying retpoline
design.
Reviewers: echristo, rnk, ruiu, craig.topper, DavidKreitzer
Subscribers: sanjoy, emaste, mcrosier, mgorny, mehdi_amini, hiraditya, llvm-commits
Differential Revision: https://reviews.llvm.org/D41723
llvm-svn: 323155
2018-01-23 06:05:25 +08:00
|
|
|
|
|
|
|
// Rely on external thunks for the emitted retpoline calls. This allows users
|
|
|
|
// to provide their own custom thunk definitions in highly specialized
|
|
|
|
// environments such as a kernel that does boot-time hot patching.
|
|
|
|
def FeatureRetpolineExternalThunk
|
|
|
|
: SubtargetFeature<
|
|
|
|
"retpoline-external-thunk", "UseRetpolineExternalThunk", "true",
|
2018-08-23 14:06:38 +08:00
|
|
|
"When lowering an indirect call or branch using a `retpoline`, rely "
|
|
|
|
"on the specified user provided thunk rather than emitting one "
|
|
|
|
"ourselves. Only has effect when combined with some other retpoline "
|
2019-03-06 10:36:48 +08:00
|
|
|
"feature", [FeatureRetpolineIndirectCalls]>;
|
Introduce the "retpoline" x86 mitigation technique for variant #2 of the speculative execution vulnerabilities disclosed today, specifically identified by CVE-2017-5715, "Branch Target Injection", and is one of the two halves to Spectre..
Summary:
First, we need to explain the core of the vulnerability. Note that this
is a very incomplete description, please see the Project Zero blog post
for details:
https://googleprojectzero.blogspot.com/2018/01/reading-privileged-memory-with-side.html
The basis for branch target injection is to direct speculative execution
of the processor to some "gadget" of executable code by poisoning the
prediction of indirect branches with the address of that gadget. The
gadget in turn contains an operation that provides a side channel for
reading data. Most commonly, this will look like a load of secret data
followed by a branch on the loaded value and then a load of some
predictable cache line. The attacker then uses timing of the processors
cache to determine which direction the branch took *in the speculative
execution*, and in turn what one bit of the loaded value was. Due to the
nature of these timing side channels and the branch predictor on Intel
processors, this allows an attacker to leak data only accessible to
a privileged domain (like the kernel) back into an unprivileged domain.
The goal is simple: avoid generating code which contains an indirect
branch that could have its prediction poisoned by an attacker. In many
cases, the compiler can simply use directed conditional branches and
a small search tree. LLVM already has support for lowering switches in
this way and the first step of this patch is to disable jump-table
lowering of switches and introduce a pass to rewrite explicit indirectbr
sequences into a switch over integers.
However, there is no fully general alternative to indirect calls. We
introduce a new construct we call a "retpoline" to implement indirect
calls in a non-speculatable way. It can be thought of loosely as
a trampoline for indirect calls which uses the RET instruction on x86.
Further, we arrange for a specific call->ret sequence which ensures the
processor predicts the return to go to a controlled, known location. The
retpoline then "smashes" the return address pushed onto the stack by the
call with the desired target of the original indirect call. The result
is a predicted return to the next instruction after a call (which can be
used to trap speculative execution within an infinite loop) and an
actual indirect branch to an arbitrary address.
On 64-bit x86 ABIs, this is especially easily done in the compiler by
using a guaranteed scratch register to pass the target into this device.
For 32-bit ABIs there isn't a guaranteed scratch register and so several
different retpoline variants are introduced to use a scratch register if
one is available in the calling convention and to otherwise use direct
stack push/pop sequences to pass the target address.
This "retpoline" mitigation is fully described in the following blog
post: https://support.google.com/faqs/answer/7625886
We also support a target feature that disables emission of the retpoline
thunk by the compiler to allow for custom thunks if users want them.
These are particularly useful in environments like kernels that
routinely do hot-patching on boot and want to hot-patch their thunk to
different code sequences. They can write this custom thunk and use
`-mretpoline-external-thunk` *in addition* to `-mretpoline`. In this
case, on x86-64 thu thunk names must be:
```
__llvm_external_retpoline_r11
```
or on 32-bit:
```
__llvm_external_retpoline_eax
__llvm_external_retpoline_ecx
__llvm_external_retpoline_edx
__llvm_external_retpoline_push
```
And the target of the retpoline is passed in the named register, or in
the case of the `push` suffix on the top of the stack via a `pushl`
instruction.
There is one other important source of indirect branches in x86 ELF
binaries: the PLT. These patches also include support for LLD to
generate PLT entries that perform a retpoline-style indirection.
The only other indirect branches remaining that we are aware of are from
precompiled runtimes (such as crt0.o and similar). The ones we have
found are not really attackable, and so we have not focused on them
here, but eventually these runtimes should also be replicated for
retpoline-ed configurations for completeness.
For kernels or other freestanding or fully static executables, the
compiler switch `-mretpoline` is sufficient to fully mitigate this
particular attack. For dynamic executables, you must compile *all*
libraries with `-mretpoline` and additionally link the dynamic
executable and all shared libraries with LLD and pass `-z retpolineplt`
(or use similar functionality from some other linker). We strongly
recommend also using `-z now` as non-lazy binding allows the
retpoline-mitigated PLT to be substantially smaller.
When manually apply similar transformations to `-mretpoline` to the
Linux kernel we observed very small performance hits to applications
running typical workloads, and relatively minor hits (approximately 2%)
even for extremely syscall-heavy applications. This is largely due to
the small number of indirect branches that occur in performance
sensitive paths of the kernel.
When using these patches on statically linked applications, especially
C++ applications, you should expect to see a much more dramatic
performance hit. For microbenchmarks that are switch, indirect-, or
virtual-call heavy we have seen overheads ranging from 10% to 50%.
However, real-world workloads exhibit substantially lower performance
impact. Notably, techniques such as PGO and ThinLTO dramatically reduce
the impact of hot indirect calls (by speculatively promoting them to
direct calls) and allow optimized search trees to be used to lower
switches. If you need to deploy these techniques in C++ applications, we
*strongly* recommend that you ensure all hot call targets are statically
linked (avoiding PLT indirection) and use both PGO and ThinLTO. Well
tuned servers using all of these techniques saw 5% - 10% overhead from
the use of retpoline.
We will add detailed documentation covering these components in
subsequent patches, but wanted to make the core functionality available
as soon as possible. Happy for more code review, but we'd really like to
get these patches landed and backported ASAP for obvious reasons. We're
planning to backport this to both 6.0 and 5.0 release streams and get
a 5.0 release with just this cherry picked ASAP for distros and vendors.
This patch is the work of a number of people over the past month: Eric, Reid,
Rui, and myself. I'm mailing it out as a single commit due to the time
sensitive nature of landing this and the need to backport it. Huge thanks to
everyone who helped out here, and everyone at Intel who helped out in
discussions about how to craft this. Also, credit goes to Paul Turner (at
Google, but not an LLVM contributor) for much of the underlying retpoline
design.
Reviewers: echristo, rnk, ruiu, craig.topper, DavidKreitzer
Subscribers: sanjoy, emaste, mcrosier, mgorny, mehdi_amini, hiraditya, llvm-commits
Differential Revision: https://reviews.llvm.org/D41723
llvm-svn: 323155
2018-01-23 06:05:25 +08:00
|
|
|
|
2020-04-03 12:59:47 +08:00
|
|
|
// Mitigate LVI attacks against indirect calls/branches and call returns
|
|
|
|
def FeatureLVIControlFlowIntegrity
|
|
|
|
: SubtargetFeature<
|
|
|
|
"lvi-cfi", "UseLVIControlFlowIntegrity", "true",
|
|
|
|
"Prevent indirect calls/branches from using a memory operand, and "
|
|
|
|
"precede all indirect calls/branches from a register with an "
|
|
|
|
"LFENCE instruction to serialize control flow. Also decompose RET "
|
|
|
|
"instructions into a POP+LFENCE+JMP sequence.">;
|
|
|
|
|
2020-05-14 02:25:08 +08:00
|
|
|
// Enable SESES to mitigate speculative execution attacks
|
|
|
|
def FeatureSpeculativeExecutionSideEffectSuppression
|
|
|
|
: SubtargetFeature<
|
|
|
|
"seses", "UseSpeculativeExecutionSideEffectSuppression", "true",
|
|
|
|
"Prevent speculative execution side channel timing attacks by "
|
|
|
|
"inserting a speculation barrier before memory reads, memory writes, "
|
|
|
|
"and conditional branches. Implies LVI Control Flow integrity.",
|
|
|
|
[FeatureLVIControlFlowIntegrity]>;
|
|
|
|
|
2020-05-12 01:25:35 +08:00
|
|
|
// Mitigate LVI attacks against data loads
|
|
|
|
def FeatureLVILoadHardening
|
|
|
|
: SubtargetFeature<
|
|
|
|
"lvi-load-hardening", "UseLVILoadHardening", "true",
|
|
|
|
"Insert LFENCE instructions to prevent data speculatively injected "
|
|
|
|
"into loads from being used maliciously.">;
|
|
|
|
|
2018-05-01 18:01:16 +08:00
|
|
|
// Direct Move instructions.
|
|
|
|
def FeatureMOVDIRI : SubtargetFeature<"movdiri", "HasMOVDIRI", "true",
|
|
|
|
"Support movdiri instruction">;
|
|
|
|
def FeatureMOVDIR64B : SubtargetFeature<"movdir64b", "HasMOVDIR64B", "true",
|
|
|
|
"Support movdir64b instruction">;
|
|
|
|
|
2018-09-30 11:01:46 +08:00
|
|
|
def FeatureFastBEXTR : SubtargetFeature<"fast-bextr", "HasFastBEXTR", "true",
|
|
|
|
"Indicates that the BEXTR instruction is implemented as a single uop "
|
2019-03-06 10:36:48 +08:00
|
|
|
"with good throughput">;
|
2018-09-30 11:01:46 +08:00
|
|
|
|
[x86] add and use fast horizontal vector math subtarget feature
This is the planned follow-up to D52997. Here we are reducing horizontal vector math codegen
by default. AMD Jaguar (btver2) should have no difference with this patch because it has
fast-hops. (If we want to set that bit for other CPUs, let me know.)
The code changes are small, but there are many test diffs. For files that are specifically
testing for hops, I added RUNs to distinguish fast/slow, so we can see the consequences
side-by-side. For files that are primarily concerned with codegen other than hops, I just
updated the CHECK lines to reflect the new default codegen.
To recap the recent horizontal op story:
1. Before rL343727, we were producing hops for all subtargets for a variety of patterns.
Hops were likely not optimal for all targets though.
2. The IR improvement in r343727 exposed a hole in the backend hop pattern matching, so
we reduced hop codegen for all subtargets. That was bad for Jaguar (PR39195).
3. We restored the hop codegen for all targets with rL344141. Good for Jaguar, but
probably bad for other CPUs.
4. This patch allows us to distinguish when we want to produce hops, so everyone can be
happy. I'm not sure if we have the best predicate here, but the intent is to undo the
extra hop-iness that was enabled by r344141.
Differential Revision: https://reviews.llvm.org/D53095
llvm-svn: 344361
2018-10-13 00:41:02 +08:00
|
|
|
// Combine vector math operations with shuffles into horizontal math
|
|
|
|
// instructions if a CPU implements horizontal operations (introduced with
|
|
|
|
// SSE3) with better latency/throughput than the alternative sequence.
|
|
|
|
def FeatureFastHorizontalOps
|
|
|
|
: SubtargetFeature<
|
|
|
|
"fast-hops", "HasFastHorizontalOps", "true",
|
|
|
|
"Prefer horizontal vector math instructions (haddp, phsub, etc.) over "
|
2019-11-02 14:17:23 +08:00
|
|
|
"normal vector instructions with shuffles">;
|
[x86] add and use fast horizontal vector math subtarget feature
This is the planned follow-up to D52997. Here we are reducing horizontal vector math codegen
by default. AMD Jaguar (btver2) should have no difference with this patch because it has
fast-hops. (If we want to set that bit for other CPUs, let me know.)
The code changes are small, but there are many test diffs. For files that are specifically
testing for hops, I added RUNs to distinguish fast/slow, so we can see the consequences
side-by-side. For files that are primarily concerned with codegen other than hops, I just
updated the CHECK lines to reflect the new default codegen.
To recap the recent horizontal op story:
1. Before rL343727, we were producing hops for all subtargets for a variety of patterns.
Hops were likely not optimal for all targets though.
2. The IR improvement in r343727 exposed a hole in the backend hop pattern matching, so
we reduced hop codegen for all subtargets. That was bad for Jaguar (PR39195).
3. We restored the hop codegen for all targets with rL344141. Good for Jaguar, but
probably bad for other CPUs.
4. This patch allows us to distinguish when we want to produce hops, so everyone can be
happy. I'm not sure if we have the best predicate here, but the intent is to undo the
extra hop-iness that was enabled by r344141.
Differential Revision: https://reviews.llvm.org/D53095
llvm-svn: 344361
2018-10-13 00:41:02 +08:00
|
|
|
|
2019-05-14 23:21:28 +08:00
|
|
|
def FeatureFastScalarShiftMasks
|
|
|
|
: SubtargetFeature<
|
|
|
|
"fast-scalar-shift-masks", "HasFastScalarShiftMasks", "true",
|
|
|
|
"Prefer a left/right scalar logical shift pair over a shift+and pair">;
|
|
|
|
|
2019-04-26 18:49:13 +08:00
|
|
|
def FeatureFastVectorShiftMasks
|
|
|
|
: SubtargetFeature<
|
|
|
|
"fast-vector-shift-masks", "HasFastVectorShiftMasks", "true",
|
|
|
|
"Prefer a left/right vector logical shift pair over a shift+and pair">;
|
|
|
|
|
2019-12-06 02:24:10 +08:00
|
|
|
def FeatureUseGLMDivSqrtCosts
|
|
|
|
: SubtargetFeature<"use-glm-div-sqrt-costs", "UseGLMDivSqrtCosts", "true",
|
|
|
|
"Use Goldmont specific floating point div/sqrt costs">;
|
|
|
|
|
2019-09-16 22:05:28 +08:00
|
|
|
// Enable use of alias analysis during code generation.
|
|
|
|
def FeatureUseAA : SubtargetFeature<"use-aa", "UseAA", "true",
|
|
|
|
"Use alias analysis during codegen">;
|
|
|
|
|
2019-03-12 06:29:00 +08:00
|
|
|
// Bonnell
|
|
|
|
def ProcIntelAtom : SubtargetFeature<"", "X86ProcFamily", "IntelAtom", "">;
|
|
|
|
// Silvermont
|
|
|
|
def ProcIntelSLM : SubtargetFeature<"", "X86ProcFamily", "IntelSLM", "">;
|
|
|
|
|
2006-10-06 17:17:41 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2017-12-11 01:42:36 +08:00
|
|
|
// Register File Description
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
include "X86RegisterInfo.td"
|
|
|
|
include "X86RegisterBanks.td"
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Instruction Descriptions
|
2006-10-06 17:17:41 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-02-02 07:20:51 +08:00
|
|
|
include "X86Schedule.td"
|
2017-12-11 01:42:36 +08:00
|
|
|
include "X86InstrInfo.td"
|
2018-07-20 00:42:15 +08:00
|
|
|
include "X86SchedPredicates.td"
|
2017-12-11 01:42:36 +08:00
|
|
|
|
|
|
|
def X86InstrInfo : InstrInfo;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2019-03-12 06:29:00 +08:00
|
|
|
// X86 Scheduler Models
|
2017-12-11 01:42:36 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
include "X86ScheduleAtom.td"
|
|
|
|
include "X86SchedSandyBridge.td"
|
|
|
|
include "X86SchedHaswell.td"
|
|
|
|
include "X86SchedBroadwell.td"
|
|
|
|
include "X86ScheduleSLM.td"
|
|
|
|
include "X86ScheduleZnver1.td"
|
2020-01-09 22:41:32 +08:00
|
|
|
include "X86ScheduleZnver2.td"
|
AMD BdVer2 (Piledriver) Initial Scheduler model
Summary:
# Overview
This is somewhat partial.
* Latencies are good {F7371125}
* All of these remaining inconsistencies //appear// to be noise/noisy/flaky.
* NumMicroOps are somewhat good {F7371158}
* Most of the remaining inconsistencies are from `Ld` / `Ld_ReadAfterLd` classes
* Actual unit occupation (pipes, `ResourceCycles`) are undiscovered lands, i did not really look there.
They are basically verbatum copy from `btver2`
* Many `InstRW`. And there are still inconsistencies left...
To be noted:
I think this is the first new schedule profile produced with the new next-gen tools like llvm-exegesis!
# Benchmark
I realize that isn't what was suggested, but i'll start with some "internal" public real-world benchmark i understand - [[ https://github.com/darktable-org/rawspeed | RawSpeed raw image decoding library ]].
Diff (the exact clang from trunk without/with this patch):
```
Comparing /home/lebedevri/rawspeed/build-old/src/utilities/rsbench/rsbench to /home/lebedevri/rawspeed/build-new/src/utilities/rsbench/rsbench
Benchmark Time CPU Time Old Time New CPU Old CPU New
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Canon/EOS 5D Mark II/09.canon.sraw1.cr2/threads:8/real_time_pvalue 0.0000 0.0000 U Test, Repetitions: 25 vs 25
Canon/EOS 5D Mark II/09.canon.sraw1.cr2/threads:8/real_time_mean -0.0607 -0.0604 234 219 233 219
Canon/EOS 5D Mark II/09.canon.sraw1.cr2/threads:8/real_time_median -0.0630 -0.0626 233 219 233 219
Canon/EOS 5D Mark II/09.canon.sraw1.cr2/threads:8/real_time_stddev +0.2581 +0.2587 1 2 1 2
Canon/EOS 5D Mark II/10.canon.sraw2.cr2/threads:8/real_time_pvalue 0.0000 0.0000 U Test, Repetitions: 25 vs 25
Canon/EOS 5D Mark II/10.canon.sraw2.cr2/threads:8/real_time_mean -0.0770 -0.0767 144 133 144 133
Canon/EOS 5D Mark II/10.canon.sraw2.cr2/threads:8/real_time_median -0.0767 -0.0763 144 133 144 133
Canon/EOS 5D Mark II/10.canon.sraw2.cr2/threads:8/real_time_stddev -0.4170 -0.4156 1 0 1 0
Canon/EOS 5DS/2K4A9927.CR2/threads:8/real_time_pvalue 0.0000 0.0000 U Test, Repetitions: 25 vs 25
Canon/EOS 5DS/2K4A9927.CR2/threads:8/real_time_mean -0.0271 -0.0270 463 450 463 450
Canon/EOS 5DS/2K4A9927.CR2/threads:8/real_time_median -0.0093 -0.0093 453 449 453 449
Canon/EOS 5DS/2K4A9927.CR2/threads:8/real_time_stddev -0.7280 -0.7280 13 4 13 4
Canon/EOS 5DS/2K4A9928.CR2/threads:8/real_time_pvalue 0.0004 0.0004 U Test, Repetitions: 25 vs 25
Canon/EOS 5DS/2K4A9928.CR2/threads:8/real_time_mean -0.0065 -0.0065 569 565 569 565
Canon/EOS 5DS/2K4A9928.CR2/threads:8/real_time_median -0.0077 -0.0077 569 564 569 564
Canon/EOS 5DS/2K4A9928.CR2/threads:8/real_time_stddev +1.0077 +1.0068 2 5 2 5
Canon/EOS 5DS/2K4A9929.CR2/threads:8/real_time_pvalue 0.0220 0.0199 U Test, Repetitions: 25 vs 25
Canon/EOS 5DS/2K4A9929.CR2/threads:8/real_time_mean +0.0006 +0.0007 312 312 312 312
Canon/EOS 5DS/2K4A9929.CR2/threads:8/real_time_median +0.0031 +0.0032 311 312 311 312
Canon/EOS 5DS/2K4A9929.CR2/threads:8/real_time_stddev -0.7069 -0.7072 4 1 4 1
Canon/EOS 10D/CRW_7673.CRW/threads:8/real_time_pvalue 0.0004 0.0004 U Test, Repetitions: 25 vs 25
Canon/EOS 10D/CRW_7673.CRW/threads:8/real_time_mean -0.0015 -0.0015 141 141 141 141
Canon/EOS 10D/CRW_7673.CRW/threads:8/real_time_median -0.0010 -0.0011 141 141 141 141
Canon/EOS 10D/CRW_7673.CRW/threads:8/real_time_stddev -0.1486 -0.1456 0 0 0 0
Canon/EOS 40D/_MG_0154.CR2/threads:8/real_time_pvalue 0.6139 0.8766 U Test, Repetitions: 25 vs 25
Canon/EOS 40D/_MG_0154.CR2/threads:8/real_time_mean -0.0008 -0.0005 60 60 60 60
Canon/EOS 40D/_MG_0154.CR2/threads:8/real_time_median -0.0006 -0.0002 60 60 60 60
Canon/EOS 40D/_MG_0154.CR2/threads:8/real_time_stddev -0.1467 -0.1390 0 0 0 0
Canon/EOS 77D/IMG_4049.CR2/threads:8/real_time_pvalue 0.0137 0.0137 U Test, Repetitions: 25 vs 25
Canon/EOS 77D/IMG_4049.CR2/threads:8/real_time_mean +0.0002 +0.0002 275 275 275 275
Canon/EOS 77D/IMG_4049.CR2/threads:8/real_time_median -0.0015 -0.0014 275 275 275 275
Canon/EOS 77D/IMG_4049.CR2/threads:8/real_time_stddev +3.3687 +3.3587 0 2 0 2
Canon/PowerShot G1/crw_1693.crw/threads:8/real_time_pvalue 0.4041 0.3933 U Test, Repetitions: 25 vs 25
Canon/PowerShot G1/crw_1693.crw/threads:8/real_time_mean +0.0004 +0.0004 67 67 67 67
Canon/PowerShot G1/crw_1693.crw/threads:8/real_time_median -0.0000 -0.0000 67 67 67 67
Canon/PowerShot G1/crw_1693.crw/threads:8/real_time_stddev +0.1947 +0.1995 0 0 0 0
Fujifilm/GFX 50S/20170525_0037TEST.RAF/threads:8/real_time_pvalue 0.0074 0.0001 U Test, Repetitions: 25 vs 25
Fujifilm/GFX 50S/20170525_0037TEST.RAF/threads:8/real_time_mean -0.0092 +0.0074 547 542 25 25
Fujifilm/GFX 50S/20170525_0037TEST.RAF/threads:8/real_time_median -0.0054 +0.0115 544 541 25 25
Fujifilm/GFX 50S/20170525_0037TEST.RAF/threads:8/real_time_stddev -0.4086 -0.3486 8 5 0 0
Fujifilm/X-Pro2/_DSF3051.RAF/threads:8/real_time_pvalue 0.3320 0.0000 U Test, Repetitions: 25 vs 25
Fujifilm/X-Pro2/_DSF3051.RAF/threads:8/real_time_mean +0.0015 +0.0204 218 218 12 12
Fujifilm/X-Pro2/_DSF3051.RAF/threads:8/real_time_median +0.0001 +0.0203 218 218 12 12
Fujifilm/X-Pro2/_DSF3051.RAF/threads:8/real_time_stddev +0.2259 +0.2023 1 1 0 0
GoPro/HERO6 Black/GOPR9172.GPR/threads:8/real_time_pvalue 0.0000 0.0001 U Test, Repetitions: 25 vs 25
GoPro/HERO6 Black/GOPR9172.GPR/threads:8/real_time_mean -0.0209 -0.0179 96 94 90 88
GoPro/HERO6 Black/GOPR9172.GPR/threads:8/real_time_median -0.0182 -0.0155 95 93 90 88
GoPro/HERO6 Black/GOPR9172.GPR/threads:8/real_time_stddev -0.6164 -0.2703 2 1 2 1
Kodak/DCS Pro 14nx/D7465857.DCR/threads:8/real_time_pvalue 0.0000 0.0000 U Test, Repetitions: 25 vs 25
Kodak/DCS Pro 14nx/D7465857.DCR/threads:8/real_time_mean -0.0098 -0.0098 176 175 176 175
Kodak/DCS Pro 14nx/D7465857.DCR/threads:8/real_time_median -0.0126 -0.0126 176 174 176 174
Kodak/DCS Pro 14nx/D7465857.DCR/threads:8/real_time_stddev +6.9789 +6.9157 0 2 0 2
Nikon/D850/Nikon-D850-14bit-lossless-compressed.NEF/threads:8/real_time_pvalue 0.0000 0.0000 U Test, Repetitions: 25 vs 25
Nikon/D850/Nikon-D850-14bit-lossless-compressed.NEF/threads:8/real_time_mean -0.0237 -0.0238 474 463 474 463
Nikon/D850/Nikon-D850-14bit-lossless-compressed.NEF/threads:8/real_time_median -0.0267 -0.0267 473 461 473 461
Nikon/D850/Nikon-D850-14bit-lossless-compressed.NEF/threads:8/real_time_stddev +0.7179 +0.7178 3 5 3 5
Olympus/E-M1MarkII/Olympus_EM1mk2__HIRES_50MP.ORF/threads:8/real_time_pvalue 0.6837 0.6554 U Test, Repetitions: 25 vs 25
Olympus/E-M1MarkII/Olympus_EM1mk2__HIRES_50MP.ORF/threads:8/real_time_mean -0.0014 -0.0013 1375 1373 1375 1373
Olympus/E-M1MarkII/Olympus_EM1mk2__HIRES_50MP.ORF/threads:8/real_time_median +0.0018 +0.0019 1371 1374 1371 1374
Olympus/E-M1MarkII/Olympus_EM1mk2__HIRES_50MP.ORF/threads:8/real_time_stddev -0.7457 -0.7382 11 3 10 3
Panasonic/DC-G9/P1000476.RW2/threads:8/real_time_pvalue 0.0000 0.0000 U Test, Repetitions: 25 vs 25
Panasonic/DC-G9/P1000476.RW2/threads:8/real_time_mean -0.0080 -0.0289 22 22 10 10
Panasonic/DC-G9/P1000476.RW2/threads:8/real_time_median -0.0070 -0.0287 22 22 10 10
Panasonic/DC-G9/P1000476.RW2/threads:8/real_time_stddev +1.0977 +0.6614 0 0 0 0
Panasonic/DC-GH5/_T012014.RW2/threads:8/real_time_pvalue 0.0000 0.0000 U Test, Repetitions: 25 vs 25
Panasonic/DC-GH5/_T012014.RW2/threads:8/real_time_mean +0.0132 +0.0967 35 36 10 11
Panasonic/DC-GH5/_T012014.RW2/threads:8/real_time_median +0.0132 +0.0956 35 36 10 11
Panasonic/DC-GH5/_T012014.RW2/threads:8/real_time_stddev -0.0407 -0.1695 0 0 0 0
Panasonic/DC-GH5S/P1022085.RW2/threads:8/real_time_pvalue 0.0000 0.0000 U Test, Repetitions: 25 vs 25
Panasonic/DC-GH5S/P1022085.RW2/threads:8/real_time_mean +0.0331 +0.1307 13 13 6 6
Panasonic/DC-GH5S/P1022085.RW2/threads:8/real_time_median +0.0430 +0.1373 12 13 6 6
Panasonic/DC-GH5S/P1022085.RW2/threads:8/real_time_stddev -0.9006 -0.8847 1 0 0 0
Pentax/645Z/IMGP2837.PEF/threads:8/real_time_pvalue 0.0016 0.0010 U Test, Repetitions: 25 vs 25
Pentax/645Z/IMGP2837.PEF/threads:8/real_time_mean -0.0023 -0.0024 395 394 395 394
Pentax/645Z/IMGP2837.PEF/threads:8/real_time_median -0.0029 -0.0030 395 394 395 393
Pentax/645Z/IMGP2837.PEF/threads:8/real_time_stddev -0.0275 -0.0375 1 1 1 1
Phase One/P65/CF027310.IIQ/threads:8/real_time_pvalue 0.0232 0.0000 U Test, Repetitions: 25 vs 25
Phase One/P65/CF027310.IIQ/threads:8/real_time_mean -0.0047 +0.0039 114 113 28 28
Phase One/P65/CF027310.IIQ/threads:8/real_time_median -0.0050 +0.0037 114 113 28 28
Phase One/P65/CF027310.IIQ/threads:8/real_time_stddev -0.0599 -0.2683 1 1 0 0
Samsung/NX1/2016-07-23-142101_sam_9364.srw/threads:8/real_time_pvalue 0.0000 0.0000 U Test, Repetitions: 25 vs 25
Samsung/NX1/2016-07-23-142101_sam_9364.srw/threads:8/real_time_mean +0.0206 +0.0207 405 414 405 414
Samsung/NX1/2016-07-23-142101_sam_9364.srw/threads:8/real_time_median +0.0204 +0.0205 405 414 405 414
Samsung/NX1/2016-07-23-142101_sam_9364.srw/threads:8/real_time_stddev +0.2155 +0.2212 1 1 1 1
Samsung/NX30/2015-03-07-163604_sam_7204.srw/threads:8/real_time_pvalue 0.0000 0.0000 U Test, Repetitions: 25 vs 25
Samsung/NX30/2015-03-07-163604_sam_7204.srw/threads:8/real_time_mean -0.0109 -0.0108 147 145 147 145
Samsung/NX30/2015-03-07-163604_sam_7204.srw/threads:8/real_time_median -0.0104 -0.0103 147 145 147 145
Samsung/NX30/2015-03-07-163604_sam_7204.srw/threads:8/real_time_stddev -0.4919 -0.4800 0 0 0 0
Samsung/NX3000/_3184416.SRW/threads:8/real_time_pvalue 0.0000 0.0000 U Test, Repetitions: 25 vs 25
Samsung/NX3000/_3184416.SRW/threads:8/real_time_mean -0.0149 -0.0147 220 217 220 217
Samsung/NX3000/_3184416.SRW/threads:8/real_time_median -0.0173 -0.0169 221 217 220 217
Samsung/NX3000/_3184416.SRW/threads:8/real_time_stddev +1.0337 +1.0341 1 3 1 3
Sony/DSLR-A350/DSC05472.ARW/threads:8/real_time_pvalue 0.0001 0.0001 U Test, Repetitions: 25 vs 25
Sony/DSLR-A350/DSC05472.ARW/threads:8/real_time_mean -0.0019 -0.0019 194 193 194 193
Sony/DSLR-A350/DSC05472.ARW/threads:8/real_time_median -0.0021 -0.0021 194 193 194 193
Sony/DSLR-A350/DSC05472.ARW/threads:8/real_time_stddev -0.4441 -0.4282 0 0 0 0
Sony/ILCE-7RM2/14-bit-compressed.ARW/threads:8/real_time_pvalue 0.0000 0.4263 U Test, Repetitions: 25 vs 25
Sony/ILCE-7RM2/14-bit-compressed.ARW/threads:8/real_time_mean +0.0258 -0.0006 81 83 19 19
Sony/ILCE-7RM2/14-bit-compressed.ARW/threads:8/real_time_median +0.0235 -0.0011 81 82 19 19
Sony/ILCE-7RM2/14-bit-compressed.ARW/threads:8/real_time_stddev +0.1634 +0.1070 1 1 0 0
```
{F7443905}
If we look at the `_mean`s, the time column, the biggest win is `-7.7%` (`Canon/EOS 5D Mark II/10.canon.sraw2.cr2`),
and the biggest loose is `+3.3%` (`Panasonic/DC-GH5S/P1022085.RW2`);
Overall: mean `-0.7436%`, median `-0.23%`, `cbrt(sum(time^3))` = `-8.73%`
Looks good so far i'd say.
llvm-exegesis details:
{F7371117} {F7371125}
{F7371128} {F7371144} {F7371158}
Reviewers: craig.topper, RKSimon, andreadb, courbet, avt77, spatel, GGanesh
Reviewed By: andreadb
Subscribers: javed.absar, gbedwell, jfb, llvm-commits
Differential Revision: https://reviews.llvm.org/D52779
llvm-svn: 345463
2018-10-28 04:46:30 +08:00
|
|
|
include "X86ScheduleBdVer2.td"
|
2017-12-11 01:42:36 +08:00
|
|
|
include "X86ScheduleBtVer2.td"
|
|
|
|
include "X86SchedSkylakeClient.td"
|
|
|
|
include "X86SchedSkylakeServer.td"
|
2012-02-02 07:20:51 +08:00
|
|
|
|
2019-03-12 06:29:00 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// X86 Processor Feature Lists
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
def ProcessorFeatures {
|
2020-10-13 00:35:22 +08:00
|
|
|
// x86-64 and x86-64-v[234]
|
|
|
|
list<SubtargetFeature> X86_64V1Features = [
|
|
|
|
FeatureX87, FeatureCMPXCHG8B, FeatureCMOV, FeatureMMX, FeatureSSE2,
|
|
|
|
FeatureFXSR, FeatureNOPL, Feature64Bit
|
|
|
|
];
|
|
|
|
list<SubtargetFeature> X86_64V2Features = !listconcat(
|
|
|
|
X86_64V1Features,
|
|
|
|
[FeatureCMPXCHG16B, FeatureLAHFSAHF, FeaturePOPCNT, FeatureSSE42]);
|
|
|
|
list<SubtargetFeature> X86_64V3Features = !listconcat(X86_64V2Features, [
|
|
|
|
FeatureAVX2, FeatureBMI, FeatureBMI2, FeatureF16C, FeatureFMA, FeatureLZCNT,
|
|
|
|
FeatureMOVBE, FeatureXSAVE
|
|
|
|
]);
|
|
|
|
list<SubtargetFeature> X86_64V4Features = !listconcat(X86_64V3Features, [
|
|
|
|
FeatureBWI,
|
|
|
|
FeatureCDI,
|
|
|
|
FeatureDQI,
|
|
|
|
FeatureVLX,
|
|
|
|
]);
|
|
|
|
|
2019-03-13 00:35:30 +08:00
|
|
|
// Nehalem
|
2020-10-13 00:35:22 +08:00
|
|
|
list<SubtargetFeature> NHMFeatures = X86_64V2Features;
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> NHMTuning = [FeatureMacroFusion,
|
|
|
|
FeatureInsertVZEROUPPER];
|
2019-03-13 00:35:30 +08:00
|
|
|
|
|
|
|
// Westmere
|
|
|
|
list<SubtargetFeature> WSMAdditionalFeatures = [FeaturePCLMUL];
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> WSMTuning = NHMTuning;
|
2019-03-13 00:35:30 +08:00
|
|
|
list<SubtargetFeature> WSMFeatures =
|
2020-07-31 08:05:06 +08:00
|
|
|
!listconcat(NHMFeatures, WSMAdditionalFeatures);
|
2019-03-13 00:35:30 +08:00
|
|
|
|
|
|
|
// Sandybridge
|
|
|
|
list<SubtargetFeature> SNBAdditionalFeatures = [FeatureAVX,
|
|
|
|
FeatureXSAVE,
|
2020-07-31 08:05:06 +08:00
|
|
|
FeatureXSAVEOPT];
|
|
|
|
list<SubtargetFeature> SNBTuning = [FeatureMacroFusion,
|
|
|
|
FeatureSlow3OpsLEA,
|
|
|
|
FeatureSlowDivide64,
|
|
|
|
FeatureSlowUAMem32,
|
|
|
|
FeatureFastScalarFSQRT,
|
|
|
|
FeatureFastSHLDRotate,
|
|
|
|
FeatureFast15ByteNOP,
|
|
|
|
FeaturePOPCNTFalseDeps,
|
|
|
|
FeatureInsertVZEROUPPER];
|
2019-03-12 06:29:00 +08:00
|
|
|
list<SubtargetFeature> SNBFeatures =
|
2020-07-31 08:05:06 +08:00
|
|
|
!listconcat(WSMFeatures, SNBAdditionalFeatures);
|
2019-03-12 06:29:00 +08:00
|
|
|
|
|
|
|
// Ivybridge
|
|
|
|
list<SubtargetFeature> IVBAdditionalFeatures = [FeatureRDRAND,
|
|
|
|
FeatureF16C,
|
|
|
|
FeatureFSGSBase];
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> IVBTuning = SNBTuning;
|
2019-03-12 06:29:00 +08:00
|
|
|
list<SubtargetFeature> IVBFeatures =
|
2020-07-31 08:05:06 +08:00
|
|
|
!listconcat(SNBFeatures, IVBAdditionalFeatures);
|
2019-03-12 06:29:00 +08:00
|
|
|
|
|
|
|
// Haswell
|
|
|
|
list<SubtargetFeature> HSWAdditionalFeatures = [FeatureAVX2,
|
|
|
|
FeatureBMI,
|
|
|
|
FeatureBMI2,
|
|
|
|
FeatureERMSB,
|
|
|
|
FeatureFMA,
|
|
|
|
FeatureINVPCID,
|
|
|
|
FeatureLZCNT,
|
2020-07-31 08:05:06 +08:00
|
|
|
FeatureMOVBE];
|
|
|
|
list<SubtargetFeature> HSWTuning = [FeatureMacroFusion,
|
|
|
|
FeatureSlow3OpsLEA,
|
|
|
|
FeatureSlowDivide64,
|
|
|
|
FeatureFastScalarFSQRT,
|
|
|
|
FeatureFastSHLDRotate,
|
|
|
|
FeatureFast15ByteNOP,
|
|
|
|
FeatureFastVariableShuffle,
|
|
|
|
FeaturePOPCNTFalseDeps,
|
|
|
|
FeatureLZCNTFalseDeps,
|
|
|
|
FeatureInsertVZEROUPPER];
|
2019-03-12 06:29:00 +08:00
|
|
|
list<SubtargetFeature> HSWFeatures =
|
2020-07-31 08:05:06 +08:00
|
|
|
!listconcat(IVBFeatures, HSWAdditionalFeatures);
|
2019-03-12 06:29:00 +08:00
|
|
|
|
|
|
|
// Broadwell
|
|
|
|
list<SubtargetFeature> BDWAdditionalFeatures = [FeatureADX,
|
|
|
|
FeatureRDSEED,
|
|
|
|
FeaturePRFCHW];
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> BDWTuning = HSWTuning;
|
2019-03-12 06:29:00 +08:00
|
|
|
list<SubtargetFeature> BDWFeatures =
|
2020-07-31 08:05:06 +08:00
|
|
|
!listconcat(HSWFeatures, BDWAdditionalFeatures);
|
2019-03-12 06:29:00 +08:00
|
|
|
|
|
|
|
// Skylake
|
|
|
|
list<SubtargetFeature> SKLAdditionalFeatures = [FeatureAES,
|
|
|
|
FeatureXSAVEC,
|
|
|
|
FeatureXSAVES,
|
|
|
|
FeatureCLFLUSHOPT,
|
2020-07-31 08:05:06 +08:00
|
|
|
FeatureSGX];
|
|
|
|
list<SubtargetFeature> SKLTuning = [FeatureHasFastGather,
|
|
|
|
FeatureMacroFusion,
|
|
|
|
FeatureSlow3OpsLEA,
|
|
|
|
FeatureSlowDivide64,
|
|
|
|
FeatureFastScalarFSQRT,
|
|
|
|
FeatureFastVectorFSQRT,
|
|
|
|
FeatureFastSHLDRotate,
|
|
|
|
FeatureFast15ByteNOP,
|
|
|
|
FeatureFastVariableShuffle,
|
|
|
|
FeaturePOPCNTFalseDeps,
|
|
|
|
FeatureInsertVZEROUPPER];
|
2019-03-12 06:29:00 +08:00
|
|
|
list<SubtargetFeature> SKLFeatures =
|
2020-07-31 08:05:06 +08:00
|
|
|
!listconcat(BDWFeatures, SKLAdditionalFeatures);
|
2019-03-12 06:29:00 +08:00
|
|
|
|
|
|
|
// Skylake-AVX512
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> SKXAdditionalFeatures = [FeatureAES,
|
|
|
|
FeatureXSAVEC,
|
|
|
|
FeatureXSAVES,
|
|
|
|
FeatureCLFLUSHOPT,
|
|
|
|
FeatureAVX512,
|
2019-03-12 06:29:00 +08:00
|
|
|
FeatureCDI,
|
|
|
|
FeatureDQI,
|
|
|
|
FeatureBWI,
|
|
|
|
FeatureVLX,
|
|
|
|
FeaturePKU,
|
|
|
|
FeatureCLWB];
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> SKXTuning = [FeatureHasFastGather,
|
|
|
|
FeatureMacroFusion,
|
|
|
|
FeatureSlow3OpsLEA,
|
|
|
|
FeatureSlowDivide64,
|
|
|
|
FeatureFastScalarFSQRT,
|
|
|
|
FeatureFastVectorFSQRT,
|
|
|
|
FeatureFastSHLDRotate,
|
|
|
|
FeatureFast15ByteNOP,
|
|
|
|
FeatureFastVariableShuffle,
|
|
|
|
FeaturePrefer256Bit,
|
|
|
|
FeaturePOPCNTFalseDeps,
|
|
|
|
FeatureInsertVZEROUPPER];
|
2019-03-12 06:29:00 +08:00
|
|
|
list<SubtargetFeature> SKXFeatures =
|
2020-07-31 08:05:06 +08:00
|
|
|
!listconcat(BDWFeatures, SKXAdditionalFeatures);
|
2019-03-12 06:29:00 +08:00
|
|
|
|
|
|
|
// Cascadelake
|
|
|
|
list<SubtargetFeature> CLXAdditionalFeatures = [FeatureVNNI];
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> CLXTuning = SKXTuning;
|
2019-03-12 06:29:00 +08:00
|
|
|
list<SubtargetFeature> CLXFeatures =
|
2020-07-31 08:05:06 +08:00
|
|
|
!listconcat(SKXFeatures, CLXAdditionalFeatures);
|
2019-03-12 06:29:00 +08:00
|
|
|
|
2019-06-07 16:31:35 +08:00
|
|
|
// Cooperlake
|
|
|
|
list<SubtargetFeature> CPXAdditionalFeatures = [FeatureBF16];
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> CPXTuning = SKXTuning;
|
2019-06-07 16:31:35 +08:00
|
|
|
list<SubtargetFeature> CPXFeatures =
|
2020-07-31 08:05:06 +08:00
|
|
|
!listconcat(CLXFeatures, CPXAdditionalFeatures);
|
2019-06-07 16:31:35 +08:00
|
|
|
|
2019-03-12 06:29:00 +08:00
|
|
|
// Cannonlake
|
|
|
|
list<SubtargetFeature> CNLAdditionalFeatures = [FeatureAVX512,
|
|
|
|
FeatureCDI,
|
|
|
|
FeatureDQI,
|
|
|
|
FeatureBWI,
|
|
|
|
FeatureVLX,
|
|
|
|
FeaturePKU,
|
|
|
|
FeatureVBMI,
|
|
|
|
FeatureIFMA,
|
2020-07-31 08:05:06 +08:00
|
|
|
FeatureSHA];
|
|
|
|
list<SubtargetFeature> CNLTuning = [FeatureHasFastGather,
|
|
|
|
FeatureMacroFusion,
|
|
|
|
FeatureSlow3OpsLEA,
|
|
|
|
FeatureSlowDivide64,
|
|
|
|
FeatureFastScalarFSQRT,
|
|
|
|
FeatureFastVectorFSQRT,
|
|
|
|
FeatureFastSHLDRotate,
|
|
|
|
FeatureFast15ByteNOP,
|
|
|
|
FeatureFastVariableShuffle,
|
|
|
|
FeaturePrefer256Bit,
|
|
|
|
FeatureInsertVZEROUPPER];
|
2019-03-12 06:29:00 +08:00
|
|
|
list<SubtargetFeature> CNLFeatures =
|
2020-07-31 08:05:06 +08:00
|
|
|
!listconcat(SKLFeatures, CNLAdditionalFeatures);
|
2019-03-12 06:29:00 +08:00
|
|
|
|
|
|
|
// Icelake
|
|
|
|
list<SubtargetFeature> ICLAdditionalFeatures = [FeatureBITALG,
|
|
|
|
FeatureVAES,
|
|
|
|
FeatureVBMI2,
|
|
|
|
FeatureVNNI,
|
|
|
|
FeatureVPCLMULQDQ,
|
|
|
|
FeatureVPOPCNTDQ,
|
|
|
|
FeatureGFNI,
|
|
|
|
FeatureCLWB,
|
2020-08-15 03:20:08 +08:00
|
|
|
FeatureRDPID,
|
|
|
|
FeatureFSRM];
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> ICLTuning = CNLTuning;
|
2019-03-12 06:29:00 +08:00
|
|
|
list<SubtargetFeature> ICLFeatures =
|
2020-07-31 08:05:06 +08:00
|
|
|
!listconcat(CNLFeatures, ICLAdditionalFeatures);
|
2019-03-12 06:29:00 +08:00
|
|
|
|
|
|
|
// Icelake Server
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> ICXAdditionalFeatures = [FeaturePCONFIG,
|
|
|
|
FeatureWBNOINVD];
|
|
|
|
list<SubtargetFeature> ICXTuning = CNLTuning;
|
2019-03-12 06:29:00 +08:00
|
|
|
list<SubtargetFeature> ICXFeatures =
|
2020-07-31 08:05:06 +08:00
|
|
|
!listconcat(ICLFeatures, ICXAdditionalFeatures);
|
2019-03-12 06:29:00 +08:00
|
|
|
|
2019-08-12 09:29:46 +08:00
|
|
|
//Tigerlake
|
|
|
|
list<SubtargetFeature> TGLAdditionalFeatures = [FeatureVP2INTERSECT,
|
|
|
|
FeatureMOVDIRI,
|
|
|
|
FeatureMOVDIR64B,
|
|
|
|
FeatureSHSTK];
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> TGLTuning = CNLTuning;
|
2019-08-12 09:29:46 +08:00
|
|
|
list<SubtargetFeature> TGLFeatures =
|
2020-07-31 08:05:06 +08:00
|
|
|
!listconcat(ICLFeatures, TGLAdditionalFeatures );
|
2019-08-12 09:29:46 +08:00
|
|
|
|
[X86] Support -march=sapphirerapids
Support -march=sapphirerapids for x86.
Compare with Icelake Server, it includes 14 more new features. They are
amxtile, amxint8, amxbf16, avx512bf16, avx512vp2intersect, cldemote,
enqcmd, movdir64b, movdiri, ptwrite, serialize, shstk, tsxldtrk, waitpkg.
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D86503
2020-08-25 12:27:02 +08:00
|
|
|
//Sapphirerapids
|
|
|
|
list<SubtargetFeature> SPRAdditionalFeatures = [FeatureAMXTILE,
|
|
|
|
FeatureAMXINT8,
|
|
|
|
FeatureAMXBF16,
|
|
|
|
FeatureBF16,
|
|
|
|
FeatureSERIALIZE,
|
|
|
|
FeatureCLDEMOTE,
|
|
|
|
FeatureWAITPKG,
|
|
|
|
FeaturePTWRITE,
|
|
|
|
FeatureTSXLDTRK,
|
|
|
|
FeatureENQCMD,
|
|
|
|
FeatureSHSTK,
|
|
|
|
FeatureVP2INTERSECT,
|
|
|
|
FeatureMOVDIRI,
|
|
|
|
FeatureMOVDIR64B];
|
|
|
|
list<SubtargetFeature> SPRTuning = ICXTuning;
|
|
|
|
list<SubtargetFeature> SPRFeatures =
|
|
|
|
!listconcat(ICXFeatures, SPRAdditionalFeatures);
|
|
|
|
|
2019-03-13 00:35:30 +08:00
|
|
|
// Atom
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> AtomFeatures = [FeatureX87,
|
|
|
|
FeatureCMPXCHG8B,
|
|
|
|
FeatureCMOV,
|
|
|
|
FeatureMMX,
|
|
|
|
FeatureSSSE3,
|
|
|
|
FeatureFXSR,
|
|
|
|
FeatureNOPL,
|
|
|
|
Feature64Bit,
|
|
|
|
FeatureCMPXCHG16B,
|
|
|
|
FeatureMOVBE,
|
|
|
|
FeatureLAHFSAHF];
|
|
|
|
list<SubtargetFeature> AtomTuning = [ProcIntelAtom,
|
|
|
|
FeatureSlowUAMem16,
|
|
|
|
FeatureLEAForSP,
|
|
|
|
FeatureSlowDivide32,
|
|
|
|
FeatureSlowDivide64,
|
|
|
|
FeatureSlowTwoMemOps,
|
|
|
|
FeatureLEAUsesAG,
|
|
|
|
FeaturePadShortFunctions,
|
|
|
|
FeatureInsertVZEROUPPER];
|
2019-03-13 00:35:30 +08:00
|
|
|
|
|
|
|
// Silvermont
|
|
|
|
list<SubtargetFeature> SLMAdditionalFeatures = [FeatureSSE42,
|
|
|
|
FeaturePOPCNT,
|
|
|
|
FeaturePCLMUL,
|
|
|
|
FeaturePRFCHW,
|
|
|
|
FeatureRDRAND];
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> SLMTuning = [ProcIntelSLM,
|
|
|
|
FeatureSlowTwoMemOps,
|
|
|
|
FeatureSlowLEA,
|
|
|
|
FeatureSlowIncDec,
|
|
|
|
FeatureSlowDivide64,
|
|
|
|
FeatureSlowPMULLD,
|
|
|
|
FeatureFast7ByteNOP,
|
|
|
|
FeaturePOPCNTFalseDeps,
|
|
|
|
FeatureInsertVZEROUPPER];
|
2019-03-13 00:35:30 +08:00
|
|
|
list<SubtargetFeature> SLMFeatures =
|
2020-07-31 08:05:06 +08:00
|
|
|
!listconcat(AtomFeatures, SLMAdditionalFeatures);
|
2019-03-13 00:35:30 +08:00
|
|
|
|
2019-03-12 06:29:00 +08:00
|
|
|
// Goldmont
|
2019-03-13 00:35:30 +08:00
|
|
|
list<SubtargetFeature> GLMAdditionalFeatures = [FeatureAES,
|
|
|
|
FeatureSHA,
|
|
|
|
FeatureRDSEED,
|
|
|
|
FeatureXSAVE,
|
|
|
|
FeatureXSAVEOPT,
|
|
|
|
FeatureXSAVEC,
|
|
|
|
FeatureXSAVES,
|
|
|
|
FeatureCLFLUSHOPT,
|
|
|
|
FeatureFSGSBase];
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> GLMTuning = [FeatureUseGLMDivSqrtCosts,
|
|
|
|
FeatureSlowTwoMemOps,
|
|
|
|
FeatureSlowLEA,
|
|
|
|
FeatureSlowIncDec,
|
|
|
|
FeaturePOPCNTFalseDeps,
|
|
|
|
FeatureInsertVZEROUPPER];
|
2019-03-12 06:29:00 +08:00
|
|
|
list<SubtargetFeature> GLMFeatures =
|
2020-07-31 08:05:06 +08:00
|
|
|
!listconcat(SLMFeatures, GLMAdditionalFeatures);
|
2019-03-12 06:29:00 +08:00
|
|
|
|
|
|
|
// Goldmont Plus
|
|
|
|
list<SubtargetFeature> GLPAdditionalFeatures = [FeaturePTWRITE,
|
|
|
|
FeatureRDPID,
|
|
|
|
FeatureSGX];
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> GLPTuning = [FeatureUseGLMDivSqrtCosts,
|
|
|
|
FeatureSlowTwoMemOps,
|
|
|
|
FeatureSlowLEA,
|
|
|
|
FeatureSlowIncDec,
|
|
|
|
FeatureInsertVZEROUPPER];
|
2019-03-12 06:29:00 +08:00
|
|
|
list<SubtargetFeature> GLPFeatures =
|
2020-07-31 08:05:06 +08:00
|
|
|
!listconcat(GLMFeatures, GLPAdditionalFeatures);
|
2019-03-12 06:29:00 +08:00
|
|
|
|
|
|
|
// Tremont
|
2020-06-03 13:21:12 +08:00
|
|
|
list<SubtargetFeature> TRMAdditionalFeatures = [FeatureCLWB,
|
|
|
|
FeatureGFNI];
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> TRMTuning = GLPTuning;
|
2019-03-12 06:29:00 +08:00
|
|
|
list<SubtargetFeature> TRMFeatures =
|
2020-07-31 08:05:06 +08:00
|
|
|
!listconcat(GLPFeatures, TRMAdditionalFeatures);
|
2019-03-12 06:29:00 +08:00
|
|
|
|
|
|
|
// Knights Landing
|
|
|
|
list<SubtargetFeature> KNLFeatures = [FeatureX87,
|
2019-03-21 07:35:49 +08:00
|
|
|
FeatureCMPXCHG8B,
|
2019-03-12 06:29:00 +08:00
|
|
|
FeatureCMOV,
|
|
|
|
FeatureMMX,
|
|
|
|
FeatureFXSR,
|
|
|
|
FeatureNOPL,
|
|
|
|
Feature64Bit,
|
|
|
|
FeatureCMPXCHG16B,
|
|
|
|
FeaturePOPCNT,
|
|
|
|
FeaturePCLMUL,
|
|
|
|
FeatureXSAVE,
|
|
|
|
FeatureXSAVEOPT,
|
|
|
|
FeatureLAHFSAHF,
|
|
|
|
FeatureAES,
|
|
|
|
FeatureRDRAND,
|
|
|
|
FeatureF16C,
|
|
|
|
FeatureFSGSBase,
|
|
|
|
FeatureAVX512,
|
|
|
|
FeatureERI,
|
|
|
|
FeatureCDI,
|
|
|
|
FeaturePFI,
|
|
|
|
FeaturePREFETCHWT1,
|
|
|
|
FeatureADX,
|
|
|
|
FeatureRDSEED,
|
|
|
|
FeatureMOVBE,
|
|
|
|
FeatureLZCNT,
|
|
|
|
FeatureBMI,
|
|
|
|
FeatureBMI2,
|
|
|
|
FeatureFMA,
|
2020-07-31 08:05:06 +08:00
|
|
|
FeaturePRFCHW];
|
|
|
|
list<SubtargetFeature> KNLTuning = [FeatureSlowDivide64,
|
|
|
|
FeatureSlow3OpsLEA,
|
|
|
|
FeatureSlowIncDec,
|
|
|
|
FeatureSlowTwoMemOps,
|
|
|
|
FeaturePreferMaskRegisters,
|
|
|
|
FeatureHasFastGather,
|
|
|
|
FeatureSlowPMADDWD];
|
2019-03-12 06:29:00 +08:00
|
|
|
// TODO Add AVX5124FMAPS/AVX5124VNNIW features
|
|
|
|
list<SubtargetFeature> KNMFeatures =
|
|
|
|
!listconcat(KNLFeatures, [FeatureVPOPCNTDQ]);
|
|
|
|
|
2019-08-07 01:04:02 +08:00
|
|
|
// Barcelona
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> BarcelonaFeatures = [FeatureX87,
|
|
|
|
FeatureCMPXCHG8B,
|
|
|
|
FeatureSSE4A,
|
|
|
|
Feature3DNowA,
|
|
|
|
FeatureFXSR,
|
|
|
|
FeatureNOPL,
|
|
|
|
FeatureCMPXCHG16B,
|
|
|
|
FeaturePRFCHW,
|
|
|
|
FeatureLZCNT,
|
|
|
|
FeaturePOPCNT,
|
|
|
|
FeatureLAHFSAHF,
|
|
|
|
FeatureCMOV,
|
|
|
|
Feature64Bit];
|
|
|
|
list<SubtargetFeature> BarcelonaTuning = [FeatureFastScalarShiftMasks,
|
|
|
|
FeatureSlowSHLD,
|
|
|
|
FeatureInsertVZEROUPPER];
|
2019-03-13 00:35:30 +08:00
|
|
|
|
|
|
|
// Bobcat
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> BtVer1Features = [FeatureX87,
|
|
|
|
FeatureCMPXCHG8B,
|
|
|
|
FeatureCMOV,
|
|
|
|
FeatureMMX,
|
|
|
|
FeatureSSSE3,
|
|
|
|
FeatureSSE4A,
|
|
|
|
FeatureFXSR,
|
|
|
|
FeatureNOPL,
|
|
|
|
Feature64Bit,
|
|
|
|
FeatureCMPXCHG16B,
|
|
|
|
FeaturePRFCHW,
|
|
|
|
FeatureLZCNT,
|
|
|
|
FeaturePOPCNT,
|
|
|
|
FeatureLAHFSAHF];
|
|
|
|
list<SubtargetFeature> BtVer1Tuning = [FeatureFast15ByteNOP,
|
|
|
|
FeatureFastScalarShiftMasks,
|
|
|
|
FeatureFastVectorShiftMasks,
|
|
|
|
FeatureSlowSHLD,
|
|
|
|
FeatureInsertVZEROUPPER];
|
2019-03-13 00:35:30 +08:00
|
|
|
|
|
|
|
// Jaguar
|
|
|
|
list<SubtargetFeature> BtVer2AdditionalFeatures = [FeatureAVX,
|
|
|
|
FeatureAES,
|
|
|
|
FeaturePCLMUL,
|
|
|
|
FeatureBMI,
|
|
|
|
FeatureF16C,
|
|
|
|
FeatureMOVBE,
|
|
|
|
FeatureXSAVE,
|
|
|
|
FeatureXSAVEOPT];
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> BtVer2Tuning = [FeatureFastLZCNT,
|
|
|
|
FeatureFastBEXTR,
|
|
|
|
FeatureFastHorizontalOps,
|
|
|
|
FeatureFast15ByteNOP,
|
|
|
|
FeatureFastScalarShiftMasks,
|
|
|
|
FeatureFastVectorShiftMasks,
|
|
|
|
FeatureSlowSHLD];
|
2019-03-13 00:35:30 +08:00
|
|
|
list<SubtargetFeature> BtVer2Features =
|
2020-07-31 08:05:06 +08:00
|
|
|
!listconcat(BtVer1Features, BtVer2AdditionalFeatures);
|
2019-03-13 00:35:30 +08:00
|
|
|
|
|
|
|
// Bulldozer
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> BdVer1Features = [FeatureX87,
|
|
|
|
FeatureCMPXCHG8B,
|
|
|
|
FeatureCMOV,
|
|
|
|
FeatureXOP,
|
|
|
|
Feature64Bit,
|
|
|
|
FeatureCMPXCHG16B,
|
|
|
|
FeatureAES,
|
|
|
|
FeaturePRFCHW,
|
|
|
|
FeaturePCLMUL,
|
|
|
|
FeatureMMX,
|
|
|
|
FeatureFXSR,
|
|
|
|
FeatureNOPL,
|
|
|
|
FeatureLZCNT,
|
|
|
|
FeaturePOPCNT,
|
|
|
|
FeatureXSAVE,
|
|
|
|
FeatureLWP,
|
|
|
|
FeatureLAHFSAHF];
|
|
|
|
list<SubtargetFeature> BdVer1Tuning = [FeatureSlowSHLD,
|
|
|
|
FeatureFast11ByteNOP,
|
|
|
|
FeatureFastScalarShiftMasks,
|
|
|
|
FeatureBranchFusion,
|
|
|
|
FeatureInsertVZEROUPPER];
|
2019-03-13 00:35:30 +08:00
|
|
|
|
|
|
|
// PileDriver
|
|
|
|
list<SubtargetFeature> BdVer2AdditionalFeatures = [FeatureF16C,
|
|
|
|
FeatureBMI,
|
|
|
|
FeatureTBM,
|
|
|
|
FeatureFMA,
|
|
|
|
FeatureFastBEXTR];
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> BdVer2Tuning = BdVer1Tuning;
|
|
|
|
list<SubtargetFeature> BdVer2Features =
|
|
|
|
!listconcat(BdVer1Features, BdVer2AdditionalFeatures);
|
2019-03-13 00:35:30 +08:00
|
|
|
|
|
|
|
// Steamroller
|
|
|
|
list<SubtargetFeature> BdVer3AdditionalFeatures = [FeatureXSAVEOPT,
|
|
|
|
FeatureFSGSBase];
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> BdVer3Tuning = BdVer2Tuning;
|
|
|
|
list<SubtargetFeature> BdVer3Features =
|
|
|
|
!listconcat(BdVer2Features, BdVer3AdditionalFeatures);
|
2019-03-13 00:35:30 +08:00
|
|
|
|
|
|
|
// Excavator
|
|
|
|
list<SubtargetFeature> BdVer4AdditionalFeatures = [FeatureAVX2,
|
|
|
|
FeatureBMI2,
|
2020-06-27 14:32:17 +08:00
|
|
|
FeatureMOVBE,
|
|
|
|
FeatureRDRAND,
|
2019-03-13 00:35:30 +08:00
|
|
|
FeatureMWAITX];
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> BdVer4Tuning = BdVer3Tuning;
|
|
|
|
list<SubtargetFeature> BdVer4Features =
|
|
|
|
!listconcat(BdVer3Features, BdVer4AdditionalFeatures);
|
2019-03-13 00:35:30 +08:00
|
|
|
|
|
|
|
|
2019-03-12 06:29:00 +08:00
|
|
|
// AMD Zen Processors common ISAs
|
|
|
|
list<SubtargetFeature> ZNFeatures = [FeatureADX,
|
|
|
|
FeatureAES,
|
|
|
|
FeatureAVX2,
|
|
|
|
FeatureBMI,
|
|
|
|
FeatureBMI2,
|
|
|
|
FeatureCLFLUSHOPT,
|
|
|
|
FeatureCLZERO,
|
|
|
|
FeatureCMOV,
|
|
|
|
Feature64Bit,
|
|
|
|
FeatureCMPXCHG16B,
|
|
|
|
FeatureF16C,
|
|
|
|
FeatureFMA,
|
|
|
|
FeatureFSGSBase,
|
|
|
|
FeatureFXSR,
|
|
|
|
FeatureNOPL,
|
|
|
|
FeatureLAHFSAHF,
|
|
|
|
FeatureLZCNT,
|
|
|
|
FeatureMMX,
|
|
|
|
FeatureMOVBE,
|
|
|
|
FeatureMWAITX,
|
|
|
|
FeaturePCLMUL,
|
|
|
|
FeaturePOPCNT,
|
|
|
|
FeaturePRFCHW,
|
|
|
|
FeatureRDRAND,
|
|
|
|
FeatureRDSEED,
|
|
|
|
FeatureSHA,
|
|
|
|
FeatureSSE4A,
|
|
|
|
FeatureX87,
|
|
|
|
FeatureXSAVE,
|
|
|
|
FeatureXSAVEC,
|
|
|
|
FeatureXSAVEOPT,
|
|
|
|
FeatureXSAVES];
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> ZNTuning = [FeatureFastLZCNT,
|
|
|
|
FeatureFastBEXTR,
|
|
|
|
FeatureFast15ByteNOP,
|
|
|
|
FeatureBranchFusion,
|
|
|
|
FeatureFastScalarShiftMasks,
|
|
|
|
FeatureSlowSHLD,
|
|
|
|
FeatureInsertVZEROUPPER];
|
2019-03-12 06:29:00 +08:00
|
|
|
list<SubtargetFeature> ZN2AdditionalFeatures = [FeatureCLWB,
|
|
|
|
FeatureRDPID,
|
|
|
|
FeatureWBNOINVD];
|
2020-07-31 08:05:06 +08:00
|
|
|
list<SubtargetFeature> ZN2Tuning = ZNTuning;
|
2019-03-12 06:29:00 +08:00
|
|
|
list<SubtargetFeature> ZN2Features =
|
|
|
|
!listconcat(ZNFeatures, ZN2AdditionalFeatures);
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// X86 processors supported.
|
|
|
|
//===----------------------------------------------------------------------===//
|
2012-02-02 07:20:51 +08:00
|
|
|
|
2020-07-31 08:05:06 +08:00
|
|
|
class Proc<string Name, list<SubtargetFeature> Features,
|
|
|
|
list<SubtargetFeature> TuneFeatures>
|
2020-08-15 05:56:54 +08:00
|
|
|
: ProcessorModel<Name, GenericModel, Features, TuneFeatures>;
|
2020-07-31 08:05:06 +08:00
|
|
|
|
|
|
|
class ProcModel<string Name, SchedMachineModel Model,
|
|
|
|
list<SubtargetFeature> Features,
|
|
|
|
list<SubtargetFeature> TuneFeatures>
|
2020-08-15 05:56:54 +08:00
|
|
|
: ProcessorModel<Name, Model, Features, TuneFeatures>;
|
2012-02-02 07:20:51 +08:00
|
|
|
|
2020-04-23 13:26:07 +08:00
|
|
|
// NOTE: CMPXCHG8B is here for legacy compatibility so that it is only disabled
|
2019-03-21 07:35:49 +08:00
|
|
|
// if i386/i486 is specifically requested.
|
2020-07-23 23:52:06 +08:00
|
|
|
// NOTE: 64Bit is here as "generic" is the default llc CPU. The X86Subtarget
|
|
|
|
// constructor checks that any CPU used in 64-bit mode has Feature64Bit enabled.
|
|
|
|
// It has no effect on code generation.
|
2020-08-25 01:51:56 +08:00
|
|
|
def : ProcModel<"generic", SandyBridgeModel,
|
|
|
|
[FeatureX87, FeatureCMPXCHG8B, Feature64Bit],
|
|
|
|
[FeatureSlow3OpsLEA,
|
|
|
|
FeatureSlowDivide64,
|
|
|
|
FeatureSlowIncDec,
|
|
|
|
FeatureMacroFusion,
|
|
|
|
FeatureInsertVZEROUPPER]>;
|
|
|
|
|
2020-07-31 08:05:06 +08:00
|
|
|
def : Proc<"i386", [FeatureX87],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
|
|
|
def : Proc<"i486", [FeatureX87],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
|
|
|
def : Proc<"i586", [FeatureX87, FeatureCMPXCHG8B],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
|
|
|
def : Proc<"pentium", [FeatureX87, FeatureCMPXCHG8B],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
|
|
|
def : Proc<"pentium-mmx", [FeatureX87, FeatureCMPXCHG8B, FeatureMMX],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
|
|
|
|
|
|
|
def : Proc<"i686", [FeatureX87, FeatureCMPXCHG8B, FeatureCMOV],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
|
|
|
def : Proc<"pentiumpro", [FeatureX87, FeatureCMPXCHG8B, FeatureCMOV,
|
|
|
|
FeatureNOPL],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
|
|
|
|
|
|
|
def : Proc<"pentium2", [FeatureX87, FeatureCMPXCHG8B, FeatureMMX, FeatureCMOV,
|
|
|
|
FeatureFXSR, FeatureNOPL],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
2017-11-02 06:15:49 +08:00
|
|
|
|
|
|
|
foreach P = ["pentium3", "pentium3m"] in {
|
2020-07-31 08:05:06 +08:00
|
|
|
def : Proc<P, [FeatureX87, FeatureCMPXCHG8B, FeatureMMX,
|
|
|
|
FeatureSSE1, FeatureFXSR, FeatureNOPL, FeatureCMOV],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
2017-11-02 06:15:49 +08:00
|
|
|
}
|
2016-04-28 06:52:35 +08:00
|
|
|
|
|
|
|
// Enable the PostRAScheduler for SSE2 and SSE3 class cpus.
|
|
|
|
// The intent is to enable it for pentium4 which is the current default
|
|
|
|
// processor in a vanilla 32-bit clang compilation when no specific
|
|
|
|
// architecture is specified. This generally gives a nice performance
|
|
|
|
// increase on silvermont, with largely neutral behavior on other
|
|
|
|
// contemporary large core processors.
|
|
|
|
// pentium-m, pentium4m, prescott and nocona are included as a preventative
|
|
|
|
// measure to avoid performance surprises, in case clang's default cpu
|
|
|
|
// changes slightly.
|
|
|
|
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<"pentium-m", GenericPostRAModel,
|
|
|
|
[FeatureX87, FeatureCMPXCHG8B, FeatureMMX, FeatureSSE2,
|
|
|
|
FeatureFXSR, FeatureNOPL, FeatureCMOV],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
2016-04-28 06:52:35 +08:00
|
|
|
|
2017-11-02 06:15:49 +08:00
|
|
|
foreach P = ["pentium4", "pentium4m"] in {
|
2020-08-27 06:21:55 +08:00
|
|
|
def : ProcModel<P, GenericPostRAModel,
|
2020-07-31 08:05:06 +08:00
|
|
|
[FeatureX87, FeatureCMPXCHG8B, FeatureMMX, FeatureSSE2,
|
|
|
|
FeatureFXSR, FeatureNOPL, FeatureCMOV],
|
2020-08-27 06:21:55 +08:00
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
2017-11-02 06:15:49 +08:00
|
|
|
}
|
2014-05-08 01:37:03 +08:00
|
|
|
|
2016-04-01 18:16:15 +08:00
|
|
|
// Intel Quark.
|
2020-07-31 08:05:06 +08:00
|
|
|
def : Proc<"lakemont", [FeatureCMPXCHG8B],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
2016-04-01 18:16:15 +08:00
|
|
|
|
2013-03-27 06:19:12 +08:00
|
|
|
// Intel Core Duo.
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<"yonah", SandyBridgeModel,
|
|
|
|
[FeatureX87, FeatureCMPXCHG8B, FeatureMMX, FeatureSSE3,
|
|
|
|
FeatureFXSR, FeatureNOPL, FeatureCMOV],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
2013-03-27 06:19:12 +08:00
|
|
|
|
|
|
|
// NetBurst.
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<"prescott", GenericPostRAModel,
|
|
|
|
[FeatureX87, FeatureCMPXCHG8B, FeatureMMX, FeatureSSE3,
|
|
|
|
FeatureFXSR, FeatureNOPL, FeatureCMOV],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
|
|
|
def : ProcModel<"nocona", GenericPostRAModel, [
|
2016-03-23 19:13:54 +08:00
|
|
|
FeatureX87,
|
2019-03-21 07:35:49 +08:00
|
|
|
FeatureCMPXCHG8B,
|
2018-08-27 02:29:33 +08:00
|
|
|
FeatureCMOV,
|
Move the MMX subtarget feature out of the SSE set of features and into
its own variable.
This is needed so that we can explicitly turn off MMX without turning
off SSE and also so that we can diagnose feature set incompatibilities
that involve MMX without SSE.
Rationale:
// sse3
__m128d test_mm_addsub_pd(__m128d A, __m128d B) {
return _mm_addsub_pd(A, B);
}
// mmx
void shift(__m64 a, __m64 b, int c) {
_mm_slli_pi16(a, c);
_mm_slli_pi32(a, c);
_mm_slli_si64(a, c);
_mm_srli_pi16(a, c);
_mm_srli_pi32(a, c);
_mm_srli_si64(a, c);
_mm_srai_pi16(a, c);
_mm_srai_pi32(a, c);
}
clang -msse3 -mno-mmx file.c -c
For this code we should be able to explicitly turn off MMX
without affecting the compilation of the SSE3 function and then
diagnose and error on compiling the MMX function.
This matches the existing gcc behavior and follows the spirit of
the SSE/MMX separation in llvm where we can (and do) turn off
MMX code generation except in the presence of intrinsics.
Updated a couple of tests, but primarily tested with a couple of tests
for turning on only mmx and only sse.
This is paired with a patch to clang to take advantage of this behavior.
llvm-svn: 249731
2015-10-09 04:10:06 +08:00
|
|
|
FeatureMMX,
|
|
|
|
FeatureSSE3,
|
2015-10-16 14:03:09 +08:00
|
|
|
FeatureFXSR,
|
2018-01-11 06:07:16 +08:00
|
|
|
FeatureNOPL,
|
2018-08-30 14:01:05 +08:00
|
|
|
Feature64Bit,
|
2019-11-05 02:20:00 +08:00
|
|
|
FeatureCMPXCHG16B,
|
2020-07-31 08:05:06 +08:00
|
|
|
],
|
|
|
|
[
|
|
|
|
FeatureSlowUAMem16,
|
2019-11-05 02:20:00 +08:00
|
|
|
FeatureInsertVZEROUPPER
|
Move the MMX subtarget feature out of the SSE set of features and into
its own variable.
This is needed so that we can explicitly turn off MMX without turning
off SSE and also so that we can diagnose feature set incompatibilities
that involve MMX without SSE.
Rationale:
// sse3
__m128d test_mm_addsub_pd(__m128d A, __m128d B) {
return _mm_addsub_pd(A, B);
}
// mmx
void shift(__m64 a, __m64 b, int c) {
_mm_slli_pi16(a, c);
_mm_slli_pi32(a, c);
_mm_slli_si64(a, c);
_mm_srli_pi16(a, c);
_mm_srli_pi32(a, c);
_mm_srli_si64(a, c);
_mm_srai_pi16(a, c);
_mm_srai_pi32(a, c);
}
clang -msse3 -mno-mmx file.c -c
For this code we should be able to explicitly turn off MMX
without affecting the compilation of the SSE3 function and then
diagnose and error on compiling the MMX function.
This matches the existing gcc behavior and follows the spirit of
the SSE/MMX separation in llvm where we can (and do) turn off
MMX code generation except in the presence of intrinsics.
Updated a couple of tests, but primarily tested with a couple of tests
for turning on only mmx and only sse.
This is paired with a patch to clang to take advantage of this behavior.
llvm-svn: 249731
2015-10-09 04:10:06 +08:00
|
|
|
]>;
|
2013-03-27 06:19:12 +08:00
|
|
|
|
|
|
|
// Intel Core 2 Solo/Duo.
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<"core2", SandyBridgeModel, [
|
2016-03-23 19:13:54 +08:00
|
|
|
FeatureX87,
|
2019-03-21 07:35:49 +08:00
|
|
|
FeatureCMPXCHG8B,
|
2018-08-27 02:29:33 +08:00
|
|
|
FeatureCMOV,
|
Move the MMX subtarget feature out of the SSE set of features and into
its own variable.
This is needed so that we can explicitly turn off MMX without turning
off SSE and also so that we can diagnose feature set incompatibilities
that involve MMX without SSE.
Rationale:
// sse3
__m128d test_mm_addsub_pd(__m128d A, __m128d B) {
return _mm_addsub_pd(A, B);
}
// mmx
void shift(__m64 a, __m64 b, int c) {
_mm_slli_pi16(a, c);
_mm_slli_pi32(a, c);
_mm_slli_si64(a, c);
_mm_srli_pi16(a, c);
_mm_srli_pi32(a, c);
_mm_srli_si64(a, c);
_mm_srai_pi16(a, c);
_mm_srai_pi32(a, c);
}
clang -msse3 -mno-mmx file.c -c
For this code we should be able to explicitly turn off MMX
without affecting the compilation of the SSE3 function and then
diagnose and error on compiling the MMX function.
This matches the existing gcc behavior and follows the spirit of
the SSE/MMX separation in llvm where we can (and do) turn off
MMX code generation except in the presence of intrinsics.
Updated a couple of tests, but primarily tested with a couple of tests
for turning on only mmx and only sse.
This is paired with a patch to clang to take advantage of this behavior.
llvm-svn: 249731
2015-10-09 04:10:06 +08:00
|
|
|
FeatureMMX,
|
|
|
|
FeatureSSSE3,
|
2015-10-16 14:03:09 +08:00
|
|
|
FeatureFXSR,
|
2018-01-11 06:07:16 +08:00
|
|
|
FeatureNOPL,
|
2018-08-30 14:01:05 +08:00
|
|
|
Feature64Bit,
|
Move the MMX subtarget feature out of the SSE set of features and into
its own variable.
This is needed so that we can explicitly turn off MMX without turning
off SSE and also so that we can diagnose feature set incompatibilities
that involve MMX without SSE.
Rationale:
// sse3
__m128d test_mm_addsub_pd(__m128d A, __m128d B) {
return _mm_addsub_pd(A, B);
}
// mmx
void shift(__m64 a, __m64 b, int c) {
_mm_slli_pi16(a, c);
_mm_slli_pi32(a, c);
_mm_slli_si64(a, c);
_mm_srli_pi16(a, c);
_mm_srli_pi32(a, c);
_mm_srli_si64(a, c);
_mm_srai_pi16(a, c);
_mm_srai_pi32(a, c);
}
clang -msse3 -mno-mmx file.c -c
For this code we should be able to explicitly turn off MMX
without affecting the compilation of the SSE3 function and then
diagnose and error on compiling the MMX function.
This matches the existing gcc behavior and follows the spirit of
the SSE/MMX separation in llvm where we can (and do) turn off
MMX code generation except in the presence of intrinsics.
Updated a couple of tests, but primarily tested with a couple of tests
for turning on only mmx and only sse.
This is paired with a patch to clang to take advantage of this behavior.
llvm-svn: 249731
2015-10-09 04:10:06 +08:00
|
|
|
FeatureCMPXCHG16B,
|
2020-07-31 08:05:06 +08:00
|
|
|
FeatureLAHFSAHF
|
|
|
|
],
|
|
|
|
[
|
2019-11-05 02:20:00 +08:00
|
|
|
FeatureMacroFusion,
|
2020-07-31 08:05:06 +08:00
|
|
|
FeatureSlowUAMem16,
|
2019-11-05 02:20:00 +08:00
|
|
|
FeatureInsertVZEROUPPER
|
Move the MMX subtarget feature out of the SSE set of features and into
its own variable.
This is needed so that we can explicitly turn off MMX without turning
off SSE and also so that we can diagnose feature set incompatibilities
that involve MMX without SSE.
Rationale:
// sse3
__m128d test_mm_addsub_pd(__m128d A, __m128d B) {
return _mm_addsub_pd(A, B);
}
// mmx
void shift(__m64 a, __m64 b, int c) {
_mm_slli_pi16(a, c);
_mm_slli_pi32(a, c);
_mm_slli_si64(a, c);
_mm_srli_pi16(a, c);
_mm_srli_pi32(a, c);
_mm_srli_si64(a, c);
_mm_srai_pi16(a, c);
_mm_srai_pi32(a, c);
}
clang -msse3 -mno-mmx file.c -c
For this code we should be able to explicitly turn off MMX
without affecting the compilation of the SSE3 function and then
diagnose and error on compiling the MMX function.
This matches the existing gcc behavior and follows the spirit of
the SSE/MMX separation in llvm where we can (and do) turn off
MMX code generation except in the presence of intrinsics.
Updated a couple of tests, but primarily tested with a couple of tests
for turning on only mmx and only sse.
This is paired with a patch to clang to take advantage of this behavior.
llvm-svn: 249731
2015-10-09 04:10:06 +08:00
|
|
|
]>;
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<"penryn", SandyBridgeModel, [
|
2016-03-23 19:13:54 +08:00
|
|
|
FeatureX87,
|
2019-03-21 07:35:49 +08:00
|
|
|
FeatureCMPXCHG8B,
|
2018-08-27 02:29:33 +08:00
|
|
|
FeatureCMOV,
|
Move the MMX subtarget feature out of the SSE set of features and into
its own variable.
This is needed so that we can explicitly turn off MMX without turning
off SSE and also so that we can diagnose feature set incompatibilities
that involve MMX without SSE.
Rationale:
// sse3
__m128d test_mm_addsub_pd(__m128d A, __m128d B) {
return _mm_addsub_pd(A, B);
}
// mmx
void shift(__m64 a, __m64 b, int c) {
_mm_slli_pi16(a, c);
_mm_slli_pi32(a, c);
_mm_slli_si64(a, c);
_mm_srli_pi16(a, c);
_mm_srli_pi32(a, c);
_mm_srli_si64(a, c);
_mm_srai_pi16(a, c);
_mm_srai_pi32(a, c);
}
clang -msse3 -mno-mmx file.c -c
For this code we should be able to explicitly turn off MMX
without affecting the compilation of the SSE3 function and then
diagnose and error on compiling the MMX function.
This matches the existing gcc behavior and follows the spirit of
the SSE/MMX separation in llvm where we can (and do) turn off
MMX code generation except in the presence of intrinsics.
Updated a couple of tests, but primarily tested with a couple of tests
for turning on only mmx and only sse.
This is paired with a patch to clang to take advantage of this behavior.
llvm-svn: 249731
2015-10-09 04:10:06 +08:00
|
|
|
FeatureMMX,
|
|
|
|
FeatureSSE41,
|
2015-10-16 14:03:09 +08:00
|
|
|
FeatureFXSR,
|
2018-01-11 06:07:16 +08:00
|
|
|
FeatureNOPL,
|
2018-08-30 14:01:05 +08:00
|
|
|
Feature64Bit,
|
Move the MMX subtarget feature out of the SSE set of features and into
its own variable.
This is needed so that we can explicitly turn off MMX without turning
off SSE and also so that we can diagnose feature set incompatibilities
that involve MMX without SSE.
Rationale:
// sse3
__m128d test_mm_addsub_pd(__m128d A, __m128d B) {
return _mm_addsub_pd(A, B);
}
// mmx
void shift(__m64 a, __m64 b, int c) {
_mm_slli_pi16(a, c);
_mm_slli_pi32(a, c);
_mm_slli_si64(a, c);
_mm_srli_pi16(a, c);
_mm_srli_pi32(a, c);
_mm_srli_si64(a, c);
_mm_srai_pi16(a, c);
_mm_srai_pi32(a, c);
}
clang -msse3 -mno-mmx file.c -c
For this code we should be able to explicitly turn off MMX
without affecting the compilation of the SSE3 function and then
diagnose and error on compiling the MMX function.
This matches the existing gcc behavior and follows the spirit of
the SSE/MMX separation in llvm where we can (and do) turn off
MMX code generation except in the presence of intrinsics.
Updated a couple of tests, but primarily tested with a couple of tests
for turning on only mmx and only sse.
This is paired with a patch to clang to take advantage of this behavior.
llvm-svn: 249731
2015-10-09 04:10:06 +08:00
|
|
|
FeatureCMPXCHG16B,
|
2020-07-31 08:05:06 +08:00
|
|
|
FeatureLAHFSAHF
|
|
|
|
],
|
|
|
|
[
|
2019-11-05 02:20:00 +08:00
|
|
|
FeatureMacroFusion,
|
2020-07-31 08:05:06 +08:00
|
|
|
FeatureSlowUAMem16,
|
2019-11-05 02:20:00 +08:00
|
|
|
FeatureInsertVZEROUPPER
|
Move the MMX subtarget feature out of the SSE set of features and into
its own variable.
This is needed so that we can explicitly turn off MMX without turning
off SSE and also so that we can diagnose feature set incompatibilities
that involve MMX without SSE.
Rationale:
// sse3
__m128d test_mm_addsub_pd(__m128d A, __m128d B) {
return _mm_addsub_pd(A, B);
}
// mmx
void shift(__m64 a, __m64 b, int c) {
_mm_slli_pi16(a, c);
_mm_slli_pi32(a, c);
_mm_slli_si64(a, c);
_mm_srli_pi16(a, c);
_mm_srli_pi32(a, c);
_mm_srli_si64(a, c);
_mm_srai_pi16(a, c);
_mm_srai_pi32(a, c);
}
clang -msse3 -mno-mmx file.c -c
For this code we should be able to explicitly turn off MMX
without affecting the compilation of the SSE3 function and then
diagnose and error on compiling the MMX function.
This matches the existing gcc behavior and follows the spirit of
the SSE/MMX separation in llvm where we can (and do) turn off
MMX code generation except in the presence of intrinsics.
Updated a couple of tests, but primarily tested with a couple of tests
for turning on only mmx and only sse.
This is paired with a patch to clang to take advantage of this behavior.
llvm-svn: 249731
2015-10-09 04:10:06 +08:00
|
|
|
]>;
|
2013-03-27 06:19:12 +08:00
|
|
|
|
2014-12-09 18:58:36 +08:00
|
|
|
// Atom CPUs.
|
2019-03-12 06:29:00 +08:00
|
|
|
foreach P = ["bonnell", "atom"] in {
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<P, AtomModel, ProcessorFeatures.AtomFeatures,
|
|
|
|
ProcessorFeatures.AtomTuning>;
|
2018-04-16 15:47:35 +08:00
|
|
|
}
|
|
|
|
|
2019-03-12 06:29:00 +08:00
|
|
|
foreach P = ["silvermont", "slm"] in {
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<P, SLMModel, ProcessorFeatures.SLMFeatures,
|
|
|
|
ProcessorFeatures.SLMTuning>;
|
2019-03-12 06:29:00 +08:00
|
|
|
}
|
2018-05-10 15:26:05 +08:00
|
|
|
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<"goldmont", SLMModel, ProcessorFeatures.GLMFeatures,
|
|
|
|
ProcessorFeatures.GLMTuning>;
|
|
|
|
def : ProcModel<"goldmont-plus", SLMModel, ProcessorFeatures.GLPFeatures,
|
|
|
|
ProcessorFeatures.GLPTuning>;
|
|
|
|
def : ProcModel<"tremont", SLMModel, ProcessorFeatures.TRMFeatures,
|
|
|
|
ProcessorFeatures.TRMTuning>;
|
2018-04-16 15:47:35 +08:00
|
|
|
|
2010-04-03 05:54:27 +08:00
|
|
|
// "Arrandale" along with corei3 and corei5
|
2019-03-12 06:29:00 +08:00
|
|
|
foreach P = ["nehalem", "corei7"] in {
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<P, SandyBridgeModel, ProcessorFeatures.NHMFeatures,
|
|
|
|
ProcessorFeatures.NHMTuning>;
|
2019-03-12 06:29:00 +08:00
|
|
|
}
|
2013-03-27 06:19:12 +08:00
|
|
|
|
2010-04-03 05:54:27 +08:00
|
|
|
// Westmere is the corei3/i5/i7 path from nehalem to sandybridge
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<"westmere", SandyBridgeModel, ProcessorFeatures.WSMFeatures,
|
|
|
|
ProcessorFeatures.WSMTuning>;
|
2014-12-09 18:58:36 +08:00
|
|
|
|
2019-03-12 06:29:00 +08:00
|
|
|
foreach P = ["sandybridge", "corei7-avx"] in {
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<P, SandyBridgeModel, ProcessorFeatures.SNBFeatures,
|
|
|
|
ProcessorFeatures.SNBTuning>;
|
2019-03-12 06:29:00 +08:00
|
|
|
}
|
2013-07-24 19:02:47 +08:00
|
|
|
|
2019-03-12 06:29:00 +08:00
|
|
|
foreach P = ["ivybridge", "core-avx-i"] in {
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<P, SandyBridgeModel, ProcessorFeatures.IVBFeatures,
|
|
|
|
ProcessorFeatures.IVBTuning>;
|
2019-03-12 06:29:00 +08:00
|
|
|
}
|
2014-12-09 18:58:36 +08:00
|
|
|
|
2019-03-12 06:29:00 +08:00
|
|
|
foreach P = ["haswell", "core-avx2"] in {
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<P, HaswellModel, ProcessorFeatures.HSWFeatures,
|
|
|
|
ProcessorFeatures.HSWTuning>;
|
2019-03-12 06:29:00 +08:00
|
|
|
}
|
2016-01-24 18:41:28 +08:00
|
|
|
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<"broadwell", BroadwellModel, ProcessorFeatures.BDWFeatures,
|
|
|
|
ProcessorFeatures.BDWTuning>;
|
2018-11-28 02:05:00 +08:00
|
|
|
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<"skylake", SkylakeClientModel, ProcessorFeatures.SKLFeatures,
|
|
|
|
ProcessorFeatures.SKLTuning>;
|
2016-01-24 18:41:28 +08:00
|
|
|
|
2019-03-12 06:29:00 +08:00
|
|
|
// FIXME: define KNL scheduler model
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<"knl", HaswellModel, ProcessorFeatures.KNLFeatures,
|
|
|
|
ProcessorFeatures.KNLTuning>;
|
|
|
|
def : ProcModel<"knm", HaswellModel, ProcessorFeatures.KNMFeatures,
|
|
|
|
ProcessorFeatures.KNLTuning>;
|
2017-11-19 09:12:00 +08:00
|
|
|
|
2019-03-12 06:29:00 +08:00
|
|
|
foreach P = ["skylake-avx512", "skx"] in {
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<P, SkylakeServerModel, ProcessorFeatures.SKXFeatures,
|
|
|
|
ProcessorFeatures.SKXTuning>;
|
2019-03-12 06:29:00 +08:00
|
|
|
}
|
2018-04-11 02:59:13 +08:00
|
|
|
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<"cascadelake", SkylakeServerModel,
|
|
|
|
ProcessorFeatures.CLXFeatures, ProcessorFeatures.CLXTuning>;
|
|
|
|
def : ProcModel<"cooperlake", SkylakeServerModel,
|
|
|
|
ProcessorFeatures.CPXFeatures, ProcessorFeatures.CPXTuning>;
|
|
|
|
def : ProcModel<"cannonlake", SkylakeServerModel,
|
|
|
|
ProcessorFeatures.CNLFeatures, ProcessorFeatures.CNLTuning>;
|
|
|
|
def : ProcModel<"icelake-client", SkylakeServerModel,
|
|
|
|
ProcessorFeatures.ICLFeatures, ProcessorFeatures.ICLTuning>;
|
|
|
|
def : ProcModel<"icelake-server", SkylakeServerModel,
|
|
|
|
ProcessorFeatures.ICXFeatures, ProcessorFeatures.ICXTuning>;
|
|
|
|
def : ProcModel<"tigerlake", SkylakeServerModel,
|
|
|
|
ProcessorFeatures.TGLFeatures, ProcessorFeatures.TGLTuning>;
|
[X86] Support -march=sapphirerapids
Support -march=sapphirerapids for x86.
Compare with Icelake Server, it includes 14 more new features. They are
amxtile, amxint8, amxbf16, avx512bf16, avx512vp2intersect, cldemote,
enqcmd, movdir64b, movdiri, ptwrite, serialize, shstk, tsxldtrk, waitpkg.
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D86503
2020-08-25 12:27:02 +08:00
|
|
|
def : ProcModel<"sapphirerapids", SkylakeServerModel,
|
|
|
|
ProcessorFeatures.SPRFeatures, ProcessorFeatures.SPRTuning>;
|
2017-11-19 09:12:00 +08:00
|
|
|
|
2014-12-09 18:58:36 +08:00
|
|
|
// AMD CPUs.
|
2014-07-21 22:54:21 +08:00
|
|
|
|
2020-07-31 08:05:06 +08:00
|
|
|
def : Proc<"k6", [FeatureX87, FeatureCMPXCHG8B, FeatureMMX],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
|
|
|
def : Proc<"k6-2", [FeatureX87, FeatureCMPXCHG8B, Feature3DNow],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
|
|
|
def : Proc<"k6-3", [FeatureX87, FeatureCMPXCHG8B, Feature3DNow],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
2017-11-02 06:15:49 +08:00
|
|
|
|
|
|
|
foreach P = ["athlon", "athlon-tbird"] in {
|
2020-07-31 08:05:06 +08:00
|
|
|
def : Proc<P, [FeatureX87, FeatureCMPXCHG8B, FeatureCMOV, Feature3DNowA,
|
|
|
|
FeatureNOPL],
|
|
|
|
[FeatureSlowSHLD, FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
2017-11-02 06:15:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
foreach P = ["athlon-4", "athlon-xp", "athlon-mp"] in {
|
2020-07-31 08:05:06 +08:00
|
|
|
def : Proc<P, [FeatureX87, FeatureCMPXCHG8B, FeatureCMOV,
|
|
|
|
FeatureSSE1, Feature3DNowA, FeatureFXSR, FeatureNOPL],
|
|
|
|
[FeatureSlowSHLD, FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
2017-11-02 06:15:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
foreach P = ["k8", "opteron", "athlon64", "athlon-fx"] in {
|
2020-07-31 08:05:06 +08:00
|
|
|
def : Proc<P, [FeatureX87, FeatureCMPXCHG8B, FeatureSSE2, Feature3DNowA,
|
|
|
|
FeatureFXSR, FeatureNOPL, Feature64Bit, FeatureCMOV],
|
|
|
|
[FeatureFastScalarShiftMasks, FeatureSlowSHLD, FeatureSlowUAMem16,
|
|
|
|
FeatureInsertVZEROUPPER]>;
|
2017-11-02 06:15:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
foreach P = ["k8-sse3", "opteron-sse3", "athlon64-sse3"] in {
|
2020-07-31 08:05:06 +08:00
|
|
|
def : Proc<P, [FeatureX87, FeatureCMPXCHG8B, FeatureSSE3, Feature3DNowA,
|
|
|
|
FeatureFXSR, FeatureNOPL, FeatureCMPXCHG16B, FeatureCMOV,
|
|
|
|
Feature64Bit],
|
|
|
|
[FeatureFastScalarShiftMasks, FeatureSlowSHLD, FeatureSlowUAMem16,
|
|
|
|
FeatureInsertVZEROUPPER]>;
|
2017-11-02 06:15:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
foreach P = ["amdfam10", "barcelona"] in {
|
2020-07-31 08:05:06 +08:00
|
|
|
def : Proc<P, ProcessorFeatures.BarcelonaFeatures,
|
|
|
|
ProcessorFeatures.BarcelonaTuning>;
|
2017-11-02 06:15:49 +08:00
|
|
|
}
|
2015-08-22 04:17:26 +08:00
|
|
|
|
2012-01-10 19:50:02 +08:00
|
|
|
// Bobcat
|
2020-07-31 08:05:06 +08:00
|
|
|
def : Proc<"btver1", ProcessorFeatures.BtVer1Features,
|
|
|
|
ProcessorFeatures.BtVer1Tuning>;
|
2013-05-03 18:20:08 +08:00
|
|
|
// Jaguar
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<"btver2", BtVer2Model, ProcessorFeatures.BtVer2Features,
|
|
|
|
ProcessorFeatures.BtVer2Tuning>;
|
2014-11-29 02:40:18 +08:00
|
|
|
|
2012-01-10 19:50:02 +08:00
|
|
|
// Bulldozer
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<"bdver1", BdVer2Model, ProcessorFeatures.BdVer1Features,
|
|
|
|
ProcessorFeatures.BdVer1Tuning>;
|
2013-05-03 18:20:08 +08:00
|
|
|
// Piledriver
|
2020-07-31 08:05:06 +08:00
|
|
|
def : ProcModel<"bdver2", BdVer2Model, ProcessorFeatures.BdVer2Features,
|
|
|
|
ProcessorFeatures.BdVer2Tuning>;
|
2013-11-04 18:29:20 +08:00
|
|
|
// Steamroller
|
2020-07-31 08:05:06 +08:00
|
|
|
def : Proc<"bdver3", ProcessorFeatures.BdVer3Features,
|
|
|
|
ProcessorFeatures.BdVer3Tuning>;
|
2014-05-02 23:47:07 +08:00
|
|
|
// Excavator
|
2020-07-31 08:05:06 +08:00
|
|
|
def : Proc<"bdver4", ProcessorFeatures.BdVer4Features,
|
|
|
|
ProcessorFeatures.BdVer4Tuning>;
|
|
|
|
|
|
|
|
def : ProcModel<"znver1", Znver1Model, ProcessorFeatures.ZNFeatures,
|
|
|
|
ProcessorFeatures.ZNTuning>;
|
|
|
|
def : ProcModel<"znver2", Znver2Model, ProcessorFeatures.ZN2Features,
|
|
|
|
ProcessorFeatures.ZN2Tuning>;
|
|
|
|
|
|
|
|
def : Proc<"geode", [FeatureX87, FeatureCMPXCHG8B, Feature3DNowA],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
|
|
|
|
|
|
|
def : Proc<"winchip-c6", [FeatureX87, FeatureMMX],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
|
|
|
def : Proc<"winchip2", [FeatureX87, Feature3DNow],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
|
|
|
def : Proc<"c3", [FeatureX87, Feature3DNow],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
|
|
|
def : Proc<"c3-2", [FeatureX87, FeatureCMPXCHG8B, FeatureMMX,
|
|
|
|
FeatureSSE1, FeatureFXSR, FeatureCMOV],
|
|
|
|
[FeatureSlowUAMem16, FeatureInsertVZEROUPPER]>;
|
2006-10-06 17:17:41 +08:00
|
|
|
|
2014-05-08 01:37:03 +08:00
|
|
|
// We also provide a generic 64-bit specific x86 processor model which tries to
|
|
|
|
// be good for modern chips without enabling instruction set encodings past the
|
|
|
|
// basic SSE2 and 64-bit ones. It disables slow things from any mainstream and
|
|
|
|
// modern 64-bit x86 chip, and enables features that are generally beneficial.
|
2014-12-04 13:20:33 +08:00
|
|
|
//
|
2014-05-08 01:37:03 +08:00
|
|
|
// We currently use the Sandy Bridge model as the default scheduling model as
|
|
|
|
// we use it across Nehalem, Westmere, Sandy Bridge, and Ivy Bridge which
|
|
|
|
// covers a huge swath of x86 processors. If there are specific scheduling
|
|
|
|
// knobs which need to be tuned differently for AMD chips, we might consider
|
|
|
|
// forming a common base for them.
|
2020-10-13 00:35:22 +08:00
|
|
|
def : ProcModel<"x86-64", SandyBridgeModel, ProcessorFeatures.X86_64V1Features,
|
2020-07-31 08:05:06 +08:00
|
|
|
[
|
2017-08-21 16:45:22 +08:00
|
|
|
FeatureSlow3OpsLEA,
|
2020-04-29 23:47:10 +08:00
|
|
|
FeatureSlowDivide64,
|
2017-08-30 12:34:48 +08:00
|
|
|
FeatureSlowIncDec,
|
2019-11-05 02:20:00 +08:00
|
|
|
FeatureMacroFusion,
|
|
|
|
FeatureInsertVZEROUPPER
|
2017-08-21 16:45:22 +08:00
|
|
|
]>;
|
2014-05-08 01:37:03 +08:00
|
|
|
|
2020-10-13 00:35:22 +08:00
|
|
|
// x86-64 micro-architecture levels.
|
|
|
|
def : ProcModel<"x86-64-v2", SandyBridgeModel, ProcessorFeatures.X86_64V2Features,
|
|
|
|
ProcessorFeatures.SNBTuning>;
|
|
|
|
// Close to Haswell.
|
|
|
|
def : ProcModel<"x86-64-v3", HaswellModel, ProcessorFeatures.X86_64V3Features,
|
|
|
|
ProcessorFeatures.HSWTuning>;
|
|
|
|
// Close to the AVX-512 level implemented by Xeon Scalable Processors.
|
|
|
|
def : ProcModel<"x86-64-v4", HaswellModel, ProcessorFeatures.X86_64V4Features,
|
|
|
|
ProcessorFeatures.SKXTuning>;
|
|
|
|
|
2007-02-27 02:17:14 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Calling Conventions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
include "X86CallingConv.td"
|
|
|
|
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2010-10-30 21:48:28 +08:00
|
|
|
// Assembly Parser
|
2007-02-27 02:17:14 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-01-10 03:13:28 +08:00
|
|
|
def ATTAsmParserVariant : AsmParserVariant {
|
2009-07-29 08:02:19 +08:00
|
|
|
int Variant = 0;
|
2009-08-12 04:59:47 +08:00
|
|
|
|
2013-04-19 06:35:36 +08:00
|
|
|
// Variant name.
|
|
|
|
string Name = "att";
|
|
|
|
|
2009-08-12 04:59:47 +08:00
|
|
|
// Discard comments in assembly strings.
|
|
|
|
string CommentDelimiter = "#";
|
|
|
|
|
|
|
|
// Recognize hard coded registers.
|
|
|
|
string RegisterPrefix = "%";
|
2009-07-29 08:02:19 +08:00
|
|
|
}
|
|
|
|
|
2012-01-11 01:51:54 +08:00
|
|
|
def IntelAsmParserVariant : AsmParserVariant {
|
|
|
|
int Variant = 1;
|
|
|
|
|
2013-04-19 06:35:36 +08:00
|
|
|
// Variant name.
|
|
|
|
string Name = "intel";
|
|
|
|
|
2012-01-11 01:51:54 +08:00
|
|
|
// Discard comments in assembly strings.
|
|
|
|
string CommentDelimiter = ";";
|
|
|
|
|
|
|
|
// Recognize hard coded registers.
|
|
|
|
string RegisterPrefix = "";
|
|
|
|
}
|
|
|
|
|
2010-10-30 21:48:28 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Assembly Printers
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2004-10-04 04:36:57 +08:00
|
|
|
// The X86 target supports two different syntaxes for emitting machine code.
|
|
|
|
// This is controlled by the -x86-asm-syntax={att|intel}
|
|
|
|
def ATTAsmWriter : AsmWriter {
|
2009-09-14 03:30:11 +08:00
|
|
|
string AsmWriterClassName = "ATTInstPrinter";
|
2004-10-04 04:36:57 +08:00
|
|
|
int Variant = 0;
|
|
|
|
}
|
|
|
|
def IntelAsmWriter : AsmWriter {
|
2009-09-20 15:47:59 +08:00
|
|
|
string AsmWriterClassName = "IntelInstPrinter";
|
2004-10-04 04:36:57 +08:00
|
|
|
int Variant = 1;
|
|
|
|
}
|
|
|
|
|
2003-08-04 02:19:37 +08:00
|
|
|
def X86 : Target {
|
|
|
|
// Information about the instructions...
|
2003-08-04 12:59:56 +08:00
|
|
|
let InstructionSet = X86InstrInfo;
|
2012-01-11 01:51:54 +08:00
|
|
|
let AssemblyParserVariants = [ATTAsmParserVariant, IntelAsmParserVariant];
|
2004-10-04 04:36:57 +08:00
|
|
|
let AssemblyWriters = [ATTAsmWriter, IntelAsmWriter];
|
[MachineOperand][Target] MachineOperand::isRenamable semantics changes
Summary:
Add a target option AllowRegisterRenaming that is used to opt in to
post-register-allocation renaming of registers. This is set to 0 by
default, which causes the hasExtraSrcRegAllocReq/hasExtraDstRegAllocReq
fields of all opcodes to be set to 1, causing
MachineOperand::isRenamable to always return false.
Set the AllowRegisterRenaming flag to 1 for all in-tree targets that
have lit tests that were effected by enabling COPY forwarding in
MachineCopyPropagation (AArch64, AMDGPU, ARM, Hexagon, Mips, PowerPC,
RISCV, Sparc, SystemZ and X86).
Add some more comments describing the semantics of the
MachineOperand::isRenamable function and how it is set and maintained.
Change isRenamable to check the operand's opcode
hasExtraSrcRegAllocReq/hasExtraDstRegAllocReq bit directly instead of
relying on it being consistently reflected in the IsRenamable bit
setting.
Clear the IsRenamable bit when changing an operand's register value.
Remove target code that was clearing the IsRenamable bit when changing
registers/opcodes now that this is done conservatively by default.
Change setting of hasExtraSrcRegAllocReq in AMDGPU target to be done in
one place covering all opcodes that have constant pipe read limit
restrictions.
Reviewers: qcolombet, MatzeB
Subscribers: aemerson, arsenm, jyknight, mcrosier, sdardis, nhaehnle, javed.absar, tpr, arichardson, kristof.beyls, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, niosHD, escha, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D43042
llvm-svn: 325931
2018-02-24 02:25:08 +08:00
|
|
|
let AllowRegisterRenaming = 1;
|
2003-08-04 02:19:37 +08:00
|
|
|
}
|
2018-04-10 16:16:37 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Pfm Counters
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
include "X86PfmCounters.td"
|