2012-02-18 20:03:15 +08:00
|
|
|
//===-- X86.td - Target definition file for the Intel X86 --*- tablegen -*-===//
|
2011-04-14 22:33:36 +08:00
|
|
|
//
|
2003-10-21 23:17:13 +08:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2011-04-14 22:33:36 +08:00
|
|
|
//
|
2003-10-21 23:17:13 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2003-08-03 23:47:49 +08:00
|
|
|
//
|
2011-10-11 14:44:02 +08:00
|
|
|
// This is a target description file for the Intel i386 architecture, referred
|
|
|
|
// to here as the "X86" architecture.
|
2003-08-03 23:47:49 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2003-08-04 12:59:56 +08:00
|
|
|
// Get the target-independent interfaces which we are implementing...
|
2003-08-03 23:47:49 +08:00
|
|
|
//
|
2008-11-24 15:34:46 +08:00
|
|
|
include "llvm/Target/Target.td"
|
2003-08-03 23:47:49 +08:00
|
|
|
|
2011-07-08 05:06:52 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2012-08-16 11:50:04 +08:00
|
|
|
// X86 Subtarget state
|
2011-07-08 05:06:52 +08:00
|
|
|
//
|
|
|
|
|
|
|
|
def Mode64Bit : SubtargetFeature<"64bit-mode", "In64BitMode", "true",
|
|
|
|
"64-bit mode (x86_64)">;
|
2014-01-06 12:55:54 +08:00
|
|
|
def Mode32Bit : SubtargetFeature<"32bit-mode", "In32BitMode", "true",
|
|
|
|
"32-bit mode (80386)">;
|
|
|
|
def Mode16Bit : SubtargetFeature<"16bit-mode", "In16BitMode", "true",
|
|
|
|
"16-bit mode (i8086)">;
|
2011-07-08 05:06:52 +08:00
|
|
|
|
2006-10-06 17:17:41 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2012-08-16 11:50:04 +08:00
|
|
|
// X86 Subtarget features
|
2007-05-05 04:38:40 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2009-09-02 13:53:04 +08:00
|
|
|
|
|
|
|
def FeatureCMOV : SubtargetFeature<"cmov","HasCMov", "true",
|
|
|
|
"Enable conditional move instructions">;
|
|
|
|
|
2010-12-05 04:32:23 +08:00
|
|
|
def FeaturePOPCNT : SubtargetFeature<"popcnt", "HasPOPCNT", "true",
|
|
|
|
"Support POPCNT instruction">;
|
|
|
|
|
2010-01-12 00:29:42 +08:00
|
|
|
|
2007-05-05 04:38:40 +08:00
|
|
|
def FeatureMMX : SubtargetFeature<"mmx","X86SSELevel", "MMX",
|
|
|
|
"Enable MMX instructions">;
|
|
|
|
def FeatureSSE1 : SubtargetFeature<"sse", "X86SSELevel", "SSE1",
|
|
|
|
"Enable SSE instructions",
|
2009-09-02 13:53:04 +08:00
|
|
|
// SSE codegen depends on cmovs, and all
|
2011-04-14 22:33:36 +08:00
|
|
|
// SSE1+ processors support them.
|
2009-09-02 13:53:04 +08:00
|
|
|
[FeatureMMX, FeatureCMOV]>;
|
2007-05-05 04:38:40 +08:00
|
|
|
def FeatureSSE2 : SubtargetFeature<"sse2", "X86SSELevel", "SSE2",
|
|
|
|
"Enable SSE2 instructions",
|
|
|
|
[FeatureSSE1]>;
|
|
|
|
def FeatureSSE3 : SubtargetFeature<"sse3", "X86SSELevel", "SSE3",
|
|
|
|
"Enable SSE3 instructions",
|
|
|
|
[FeatureSSE2]>;
|
|
|
|
def FeatureSSSE3 : SubtargetFeature<"ssse3", "X86SSELevel", "SSSE3",
|
|
|
|
"Enable SSSE3 instructions",
|
|
|
|
[FeatureSSE3]>;
|
2013-08-24 04:21:34 +08:00
|
|
|
def FeatureSSE41 : SubtargetFeature<"sse4.1", "X86SSELevel", "SSE41",
|
2008-02-03 15:18:54 +08:00
|
|
|
"Enable SSE 4.1 instructions",
|
|
|
|
[FeatureSSSE3]>;
|
2013-08-24 04:21:34 +08:00
|
|
|
def FeatureSSE42 : SubtargetFeature<"sse4.2", "X86SSELevel", "SSE42",
|
2008-02-03 15:18:54 +08:00
|
|
|
"Enable SSE 4.2 instructions",
|
2011-12-29 23:51:45 +08:00
|
|
|
[FeatureSSE41]>;
|
2007-05-05 04:38:40 +08:00
|
|
|
def Feature3DNow : SubtargetFeature<"3dnow", "X863DNowLevel", "ThreeDNow",
|
2011-04-15 08:32:41 +08:00
|
|
|
"Enable 3DNow! instructions",
|
|
|
|
[FeatureMMX]>;
|
2007-05-05 04:38:40 +08:00
|
|
|
def Feature3DNowA : SubtargetFeature<"3dnowa", "X863DNowLevel", "ThreeDNowA",
|
2007-05-06 15:56:19 +08:00
|
|
|
"Enable 3DNow! Athlon instructions",
|
|
|
|
[Feature3DNow]>;
|
2009-02-03 08:04:43 +08:00
|
|
|
// All x86-64 hardware has SSE2, but we don't mark SSE2 as an implied
|
|
|
|
// feature, because SSE2 can be disabled (e.g. for compiling OS kernels)
|
|
|
|
// without disabling 64-bit mode.
|
2007-05-06 15:56:19 +08:00
|
|
|
def Feature64Bit : SubtargetFeature<"64bit", "HasX86_64", "true",
|
2010-03-15 06:24:34 +08:00
|
|
|
"Support 64-bit instructions",
|
|
|
|
[FeatureCMOV]>;
|
2013-10-06 04:11:44 +08:00
|
|
|
def FeatureCMPXCHG16B : SubtargetFeature<"cx16", "HasCmpxchg16b", "true",
|
2011-08-27 05:21:21 +08:00
|
|
|
"64-bit with cmpxchg16b",
|
|
|
|
[Feature64Bit]>;
|
2009-01-02 13:35:45 +08:00
|
|
|
def FeatureSlowBTMem : SubtargetFeature<"slow-bt-mem", "IsBTMemSlow", "true",
|
|
|
|
"Bit testing of memory is slow">;
|
SHLD/SHRD are VectorPath (microcode) instructions known to have poor latency on certain architectures. While generating SHLD/SHRD instructions is acceptable when optimizing for size, optimizing for speed on these platforms should be implemented using alternative sequences of instructions composed of add, adc, shr, shl, or and lea which are directPath instructions. These alternative instructions not only have a lower latency but they also increase the decode bandwidth by allowing simultaneous decoding of a third directPath instruction.
AMD's processors family K7, K8, K10, K12, K15 and K16 are known to have SHLD/SHRD instructions with very poor latency. Optimization guides for these processors recommend using an alternative sequence of instructions. For these AMD's processors, I disabled folding (or (x << c) | (y >> (64 - c))) when we are not optimizing for size.
It might be beneficial to disable this folding for some of the Intel's processors. However, since I couldn't find specific recommendations regarding using SHLD/SHRD instructions on Intel's processors, I haven't disabled this peephole for Intel.
llvm-svn: 195383
2013-11-22 07:21:26 +08:00
|
|
|
def FeatureSlowSHLD : SubtargetFeature<"slow-shld", "IsSHLDSlow", "true",
|
|
|
|
"SHLD instruction is slow">;
|
2010-04-01 13:58:17 +08:00
|
|
|
def FeatureFastUAMem : SubtargetFeature<"fast-unaligned-mem",
|
|
|
|
"IsUAMemFast", "true",
|
|
|
|
"Fast unaligned memory access">;
|
2009-05-27 05:04:35 +08:00
|
|
|
def FeatureSSE4A : SubtargetFeature<"sse4a", "HasSSE4A", "true",
|
2011-12-30 15:16:00 +08:00
|
|
|
"Support SSE 4a instructions",
|
|
|
|
[FeatureSSE3]>;
|
2006-10-06 17:17:41 +08:00
|
|
|
|
2012-01-09 17:02:13 +08:00
|
|
|
def FeatureAVX : SubtargetFeature<"avx", "X86SSELevel", "AVX",
|
|
|
|
"Enable AVX instructions",
|
|
|
|
[FeatureSSE42]>;
|
|
|
|
def FeatureAVX2 : SubtargetFeature<"avx2", "X86SSELevel", "AVX2",
|
2011-10-31 03:57:21 +08:00
|
|
|
"Enable AVX2 instructions",
|
|
|
|
[FeatureAVX]>;
|
2013-08-21 11:57:57 +08:00
|
|
|
def FeatureAVX512 : SubtargetFeature<"avx512f", "X86SSELevel", "AVX512F",
|
2013-07-24 19:02:47 +08:00
|
|
|
"Enable AVX-512 instructions",
|
|
|
|
[FeatureAVX2]>;
|
2013-08-21 11:57:57 +08:00
|
|
|
def FeatureERI : SubtargetFeature<"avx512er", "HasERI", "true",
|
2013-07-28 16:28:38 +08:00
|
|
|
"Enable AVX-512 Exponential and Reciprocal Instructions",
|
|
|
|
[FeatureAVX512]>;
|
2013-08-21 11:57:57 +08:00
|
|
|
def FeatureCDI : SubtargetFeature<"avx512cd", "HasCDI", "true",
|
2013-07-28 16:28:38 +08:00
|
|
|
"Enable AVX-512 Conflict Detection Instructions",
|
|
|
|
[FeatureAVX512]>;
|
2013-08-21 11:57:57 +08:00
|
|
|
def FeaturePFI : SubtargetFeature<"avx512pf", "HasPFI", "true",
|
2013-07-28 16:28:38 +08:00
|
|
|
"Enable AVX-512 PreFetch Instructions",
|
|
|
|
[FeatureAVX512]>;
|
2014-07-21 22:54:21 +08:00
|
|
|
def FeatureDQI : SubtargetFeature<"avx512dq", "HasDQI", "true",
|
|
|
|
"Enable AVX-512 Doubleword and Quadword Instructions",
|
|
|
|
[FeatureAVX512]>;
|
|
|
|
def FeatureBWI : SubtargetFeature<"avx512bw", "HasBWI", "true",
|
|
|
|
"Enable AVX-512 Byte and Word Instructions",
|
|
|
|
[FeatureAVX512]>;
|
|
|
|
def FeatureVLX : SubtargetFeature<"avx512vl", "HasVLX", "true",
|
|
|
|
"Enable AVX-512 Vector Length eXtensions",
|
|
|
|
[FeatureAVX512]>;
|
2012-05-31 22:34:17 +08:00
|
|
|
def FeaturePCLMUL : SubtargetFeature<"pclmul", "HasPCLMUL", "true",
|
|
|
|
"Enable packed carry-less multiplication instructions",
|
2012-05-01 13:28:32 +08:00
|
|
|
[FeatureSSE2]>;
|
2012-06-04 02:58:46 +08:00
|
|
|
def FeatureFMA : SubtargetFeature<"fma", "HasFMA", "true",
|
2011-12-30 03:46:19 +08:00
|
|
|
"Enable three-operand fused multiple-add",
|
|
|
|
[FeatureAVX]>;
|
2009-06-27 06:46:54 +08:00
|
|
|
def FeatureFMA4 : SubtargetFeature<"fma4", "HasFMA4", "true",
|
2011-12-30 15:16:00 +08:00
|
|
|
"Enable four-operand fused multiple-add",
|
2012-05-01 14:54:48 +08:00
|
|
|
[FeatureAVX, FeatureSSE4A]>;
|
2011-12-30 15:16:00 +08:00
|
|
|
def FeatureXOP : SubtargetFeature<"xop", "HasXOP", "true",
|
2012-05-01 13:41:41 +08:00
|
|
|
"Enable XOP instructions",
|
2012-08-16 12:04:02 +08:00
|
|
|
[FeatureFMA4]>;
|
2010-01-12 00:29:42 +08:00
|
|
|
def FeatureVectorUAMem : SubtargetFeature<"vector-unaligned-mem",
|
|
|
|
"HasVectorUAMem", "true",
|
|
|
|
"Allow unaligned memory operands on vector/SIMD instructions">;
|
2010-04-03 05:54:27 +08:00
|
|
|
def FeatureAES : SubtargetFeature<"aes", "HasAES", "true",
|
2012-05-01 13:28:32 +08:00
|
|
|
"Enable AES instructions",
|
|
|
|
[FeatureSSE2]>;
|
2013-09-25 02:21:52 +08:00
|
|
|
def FeatureTBM : SubtargetFeature<"tbm", "HasTBM", "true",
|
|
|
|
"Enable TBM instructions">;
|
2011-10-04 01:28:23 +08:00
|
|
|
def FeatureMOVBE : SubtargetFeature<"movbe", "HasMOVBE", "true",
|
|
|
|
"Support MOVBE instruction">;
|
2013-08-24 04:21:34 +08:00
|
|
|
def FeatureRDRAND : SubtargetFeature<"rdrnd", "HasRDRAND", "true",
|
2011-10-04 01:28:23 +08:00
|
|
|
"Support RDRAND instruction">;
|
2011-10-09 15:31:39 +08:00
|
|
|
def FeatureF16C : SubtargetFeature<"f16c", "HasF16C", "true",
|
2013-09-16 12:29:58 +08:00
|
|
|
"Support 16-bit floating point conversion instructions",
|
|
|
|
[FeatureAVX]>;
|
2011-10-31 03:57:21 +08:00
|
|
|
def FeatureFSGSBase : SubtargetFeature<"fsgsbase", "HasFSGSBase", "true",
|
|
|
|
"Support FS/GS Base instructions">;
|
2011-10-11 14:44:02 +08:00
|
|
|
def FeatureLZCNT : SubtargetFeature<"lzcnt", "HasLZCNT", "true",
|
|
|
|
"Support LZCNT instruction">;
|
2011-10-14 11:21:46 +08:00
|
|
|
def FeatureBMI : SubtargetFeature<"bmi", "HasBMI", "true",
|
|
|
|
"Support BMI instructions">;
|
2011-10-16 15:55:05 +08:00
|
|
|
def FeatureBMI2 : SubtargetFeature<"bmi2", "HasBMI2", "true",
|
|
|
|
"Support BMI2 instructions">;
|
2012-11-08 15:28:54 +08:00
|
|
|
def FeatureRTM : SubtargetFeature<"rtm", "HasRTM", "true",
|
|
|
|
"Support RTM instructions">;
|
2013-03-27 06:46:02 +08:00
|
|
|
def FeatureHLE : SubtargetFeature<"hle", "HasHLE", "true",
|
|
|
|
"Support HLE">;
|
2013-02-15 03:08:21 +08:00
|
|
|
def FeatureADX : SubtargetFeature<"adx", "HasADX", "true",
|
|
|
|
"Support ADX instructions">;
|
2013-09-12 23:51:31 +08:00
|
|
|
def FeatureSHA : SubtargetFeature<"sha", "HasSHA", "true",
|
|
|
|
"Enable SHA instructions",
|
|
|
|
[FeatureSSE2]>;
|
2013-03-27 01:47:11 +08:00
|
|
|
def FeaturePRFCHW : SubtargetFeature<"prfchw", "HasPRFCHW", "true",
|
|
|
|
"Support PRFCHW instructions">;
|
2013-03-29 07:41:26 +08:00
|
|
|
def FeatureRDSEED : SubtargetFeature<"rdseed", "HasRDSEED", "true",
|
|
|
|
"Support RDSEED instruction">;
|
2012-02-08 06:50:41 +08:00
|
|
|
def FeatureLeaForSP : SubtargetFeature<"lea-sp", "UseLeaForSP", "true",
|
|
|
|
"Use LEA for adjusting the stack pointer">;
|
2012-09-05 02:22:17 +08:00
|
|
|
def FeatureSlowDivide : SubtargetFeature<"idiv-to-divb",
|
2013-01-09 02:27:24 +08:00
|
|
|
"HasSlowDivide", "true",
|
|
|
|
"Use small divide for positive values less than 256">;
|
|
|
|
def FeaturePadShortFunctions : SubtargetFeature<"pad-short-functions",
|
|
|
|
"PadShortFunctions", "true",
|
|
|
|
"Pad short functions">;
|
2013-03-28 03:14:02 +08:00
|
|
|
def FeatureCallRegIndirect : SubtargetFeature<"call-reg-indirect",
|
|
|
|
"CallRegIndirect", "true",
|
|
|
|
"Call register indirect">;
|
2013-04-26 04:29:37 +08:00
|
|
|
def FeatureLEAUsesAG : SubtargetFeature<"lea-uses-ag", "LEAUsesAG", "true",
|
|
|
|
"LEA instruction needs inputs at AG stage">;
|
2014-05-20 16:55:50 +08:00
|
|
|
def FeatureSlowLEA : SubtargetFeature<"slow-lea", "SlowLEA", "true",
|
|
|
|
"LEA instruction with certain arguments is slow">;
|
2014-06-09 19:40:41 +08:00
|
|
|
def FeatureSlowIncDec : SubtargetFeature<"slow-incdec", "SlowIncDec", "true",
|
|
|
|
"INC and DEC instructions are slower than ADD and SUB">;
|
2009-06-27 06:46:54 +08:00
|
|
|
|
2006-10-06 17:17:41 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// X86 processors supported.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-02-02 07:20:51 +08:00
|
|
|
include "X86Schedule.td"
|
|
|
|
|
|
|
|
def ProcIntelAtom : SubtargetFeature<"atom", "X86ProcFamily", "IntelAtom",
|
|
|
|
"Intel Atom processors">;
|
2013-09-14 03:23:28 +08:00
|
|
|
def ProcIntelSLM : SubtargetFeature<"slm", "X86ProcFamily", "IntelSLM",
|
|
|
|
"Intel Silvermont processors">;
|
2012-02-02 07:20:51 +08:00
|
|
|
|
2006-10-06 17:17:41 +08:00
|
|
|
class Proc<string Name, list<SubtargetFeature> Features>
|
2012-07-07 12:00:00 +08:00
|
|
|
: ProcessorModel<Name, GenericModel, Features>;
|
2012-02-02 07:20:51 +08:00
|
|
|
|
2006-10-06 17:17:41 +08:00
|
|
|
def : Proc<"generic", []>;
|
|
|
|
def : Proc<"i386", []>;
|
|
|
|
def : Proc<"i486", []>;
|
2008-10-15 06:06:33 +08:00
|
|
|
def : Proc<"i586", []>;
|
2006-10-06 17:17:41 +08:00
|
|
|
def : Proc<"pentium", []>;
|
|
|
|
def : Proc<"pentium-mmx", [FeatureMMX]>;
|
|
|
|
def : Proc<"i686", []>;
|
2009-09-02 13:53:04 +08:00
|
|
|
def : Proc<"pentiumpro", [FeatureCMOV]>;
|
|
|
|
def : Proc<"pentium2", [FeatureMMX, FeatureCMOV]>;
|
2007-05-22 13:15:37 +08:00
|
|
|
def : Proc<"pentium3", [FeatureSSE1]>;
|
2011-05-03 11:42:50 +08:00
|
|
|
def : Proc<"pentium3m", [FeatureSSE1, FeatureSlowBTMem]>;
|
2009-01-02 13:35:45 +08:00
|
|
|
def : Proc<"pentium-m", [FeatureSSE2, FeatureSlowBTMem]>;
|
2007-05-22 13:15:37 +08:00
|
|
|
def : Proc<"pentium4", [FeatureSSE2]>;
|
2011-05-03 11:42:50 +08:00
|
|
|
def : Proc<"pentium4m", [FeatureSSE2, FeatureSlowBTMem]>;
|
2014-05-08 01:37:03 +08:00
|
|
|
|
2013-03-27 06:19:12 +08:00
|
|
|
// Intel Core Duo.
|
|
|
|
def : ProcessorModel<"yonah", SandyBridgeModel,
|
|
|
|
[FeatureSSE3, FeatureSlowBTMem]>;
|
|
|
|
|
|
|
|
// NetBurst.
|
|
|
|
def : Proc<"prescott", [FeatureSSE3, FeatureSlowBTMem]>;
|
|
|
|
def : Proc<"nocona", [FeatureSSE3, FeatureCMPXCHG16B, FeatureSlowBTMem]>;
|
|
|
|
|
|
|
|
// Intel Core 2 Solo/Duo.
|
|
|
|
def : ProcessorModel<"core2", SandyBridgeModel,
|
|
|
|
[FeatureSSSE3, FeatureCMPXCHG16B, FeatureSlowBTMem]>;
|
|
|
|
def : ProcessorModel<"penryn", SandyBridgeModel,
|
|
|
|
[FeatureSSE41, FeatureCMPXCHG16B, FeatureSlowBTMem]>;
|
|
|
|
|
|
|
|
// Atom.
|
|
|
|
def : ProcessorModel<"atom", AtomModel,
|
|
|
|
[ProcIntelAtom, FeatureSSSE3, FeatureCMPXCHG16B,
|
|
|
|
FeatureMOVBE, FeatureSlowBTMem, FeatureLeaForSP,
|
2013-03-28 03:14:02 +08:00
|
|
|
FeatureSlowDivide,
|
|
|
|
FeatureCallRegIndirect,
|
2013-04-26 04:29:37 +08:00
|
|
|
FeatureLEAUsesAG,
|
2013-03-28 03:14:02 +08:00
|
|
|
FeaturePadShortFunctions]>;
|
2013-03-27 06:19:12 +08:00
|
|
|
|
2013-09-14 03:23:28 +08:00
|
|
|
// Atom Silvermont.
|
|
|
|
def : ProcessorModel<"slm", SLMModel, [ProcIntelSLM,
|
|
|
|
FeatureSSE42, FeatureCMPXCHG16B,
|
|
|
|
FeatureMOVBE, FeaturePOPCNT,
|
|
|
|
FeaturePCLMUL, FeatureAES,
|
|
|
|
FeatureCallRegIndirect,
|
|
|
|
FeaturePRFCHW,
|
2014-06-09 19:40:41 +08:00
|
|
|
FeatureSlowLEA, FeatureSlowIncDec,
|
2014-03-07 17:03:49 +08:00
|
|
|
FeatureSlowBTMem, FeatureFastUAMem]>;
|
2010-04-03 05:54:27 +08:00
|
|
|
// "Arrandale" along with corei3 and corei5
|
2013-03-27 06:19:12 +08:00
|
|
|
def : ProcessorModel<"corei7", SandyBridgeModel,
|
|
|
|
[FeatureSSE42, FeatureCMPXCHG16B, FeatureSlowBTMem,
|
|
|
|
FeatureFastUAMem, FeaturePOPCNT, FeatureAES]>;
|
|
|
|
|
|
|
|
def : ProcessorModel<"nehalem", SandyBridgeModel,
|
|
|
|
[FeatureSSE42, FeatureCMPXCHG16B, FeatureSlowBTMem,
|
|
|
|
FeatureFastUAMem, FeaturePOPCNT]>;
|
2010-04-03 05:54:27 +08:00
|
|
|
// Westmere is a similar machine to nehalem with some additional features.
|
|
|
|
// Westmere is the corei3/i5/i7 path from nehalem to sandybridge
|
2013-03-27 06:19:12 +08:00
|
|
|
def : ProcessorModel<"westmere", SandyBridgeModel,
|
|
|
|
[FeatureSSE42, FeatureCMPXCHG16B, FeatureSlowBTMem,
|
|
|
|
FeatureFastUAMem, FeaturePOPCNT, FeatureAES,
|
|
|
|
FeaturePCLMUL]>;
|
2011-10-11 03:35:07 +08:00
|
|
|
// Sandy Bridge
|
2010-12-10 08:26:57 +08:00
|
|
|
// SSE is not listed here since llvm treats AVX as a reimplementation of SSE,
|
|
|
|
// rather than a superset.
|
2013-03-27 06:19:12 +08:00
|
|
|
def : ProcessorModel<"corei7-avx", SandyBridgeModel,
|
|
|
|
[FeatureAVX, FeatureCMPXCHG16B, FeatureFastUAMem,
|
|
|
|
FeaturePOPCNT, FeatureAES, FeaturePCLMUL]>;
|
2011-10-11 03:35:07 +08:00
|
|
|
// Ivy Bridge
|
2013-03-27 06:19:12 +08:00
|
|
|
def : ProcessorModel<"core-avx-i", SandyBridgeModel,
|
|
|
|
[FeatureAVX, FeatureCMPXCHG16B, FeatureFastUAMem,
|
|
|
|
FeaturePOPCNT, FeatureAES, FeaturePCLMUL, FeatureRDRAND,
|
|
|
|
FeatureF16C, FeatureFSGSBase]>;
|
2006-10-06 17:17:41 +08:00
|
|
|
|
2011-10-14 11:21:46 +08:00
|
|
|
// Haswell
|
2013-03-29 06:34:46 +08:00
|
|
|
def : ProcessorModel<"core-avx2", HaswellModel,
|
2013-03-27 06:19:12 +08:00
|
|
|
[FeatureAVX2, FeatureCMPXCHG16B, FeatureFastUAMem,
|
|
|
|
FeaturePOPCNT, FeatureAES, FeaturePCLMUL, FeatureRDRAND,
|
|
|
|
FeatureF16C, FeatureFSGSBase, FeatureMOVBE, FeatureLZCNT,
|
2013-03-27 06:46:02 +08:00
|
|
|
FeatureBMI, FeatureBMI2, FeatureFMA, FeatureRTM,
|
|
|
|
FeatureHLE]>;
|
2011-10-14 11:21:46 +08:00
|
|
|
|
2013-07-24 19:02:47 +08:00
|
|
|
// KNL
|
|
|
|
// FIXME: define KNL model
|
|
|
|
def : ProcessorModel<"knl", HaswellModel,
|
|
|
|
[FeatureAVX512, FeatureERI, FeatureCDI, FeaturePFI,
|
|
|
|
FeatureCMPXCHG16B, FeatureFastUAMem, FeaturePOPCNT,
|
|
|
|
FeatureAES, FeaturePCLMUL, FeatureRDRAND, FeatureF16C,
|
|
|
|
FeatureFSGSBase, FeatureMOVBE, FeatureLZCNT, FeatureBMI,
|
2014-07-02 22:11:05 +08:00
|
|
|
FeatureBMI2, FeatureFMA, FeatureRTM, FeatureHLE,
|
|
|
|
FeatureSlowIncDec]>;
|
2013-07-24 19:02:47 +08:00
|
|
|
|
2014-07-21 22:54:21 +08:00
|
|
|
// SKX
|
|
|
|
// FIXME: define SKX model
|
|
|
|
def : ProcessorModel<"skx", HaswellModel,
|
|
|
|
[FeatureAVX512, FeatureCDI,
|
|
|
|
FeatureDQI, FeatureBWI, FeatureVLX,
|
|
|
|
FeatureCMPXCHG16B, FeatureFastUAMem, FeaturePOPCNT,
|
|
|
|
FeatureAES, FeaturePCLMUL, FeatureRDRAND, FeatureF16C,
|
|
|
|
FeatureFSGSBase, FeatureMOVBE, FeatureLZCNT, FeatureBMI,
|
|
|
|
FeatureBMI2, FeatureFMA, FeatureRTM, FeatureHLE,
|
|
|
|
FeatureSlowIncDec]>;
|
|
|
|
|
2006-10-06 17:17:41 +08:00
|
|
|
def : Proc<"k6", [FeatureMMX]>;
|
2011-04-15 08:32:41 +08:00
|
|
|
def : Proc<"k6-2", [Feature3DNow]>;
|
|
|
|
def : Proc<"k6-3", [Feature3DNow]>;
|
2014-03-07 16:28:44 +08:00
|
|
|
def : Proc<"athlon", [Feature3DNowA, FeatureSlowBTMem,
|
SHLD/SHRD are VectorPath (microcode) instructions known to have poor latency on certain architectures. While generating SHLD/SHRD instructions is acceptable when optimizing for size, optimizing for speed on these platforms should be implemented using alternative sequences of instructions composed of add, adc, shr, shl, or and lea which are directPath instructions. These alternative instructions not only have a lower latency but they also increase the decode bandwidth by allowing simultaneous decoding of a third directPath instruction.
AMD's processors family K7, K8, K10, K12, K15 and K16 are known to have SHLD/SHRD instructions with very poor latency. Optimization guides for these processors recommend using an alternative sequence of instructions. For these AMD's processors, I disabled folding (or (x << c) | (y >> (64 - c))) when we are not optimizing for size.
It might be beneficial to disable this folding for some of the Intel's processors. However, since I couldn't find specific recommendations regarding using SHLD/SHRD instructions on Intel's processors, I haven't disabled this peephole for Intel.
llvm-svn: 195383
2013-11-22 07:21:26 +08:00
|
|
|
FeatureSlowSHLD]>;
|
|
|
|
def : Proc<"athlon-tbird", [Feature3DNowA, FeatureSlowBTMem,
|
|
|
|
FeatureSlowSHLD]>;
|
|
|
|
def : Proc<"athlon-4", [FeatureSSE1, Feature3DNowA, FeatureSlowBTMem,
|
|
|
|
FeatureSlowSHLD]>;
|
|
|
|
def : Proc<"athlon-xp", [FeatureSSE1, Feature3DNowA, FeatureSlowBTMem,
|
|
|
|
FeatureSlowSHLD]>;
|
|
|
|
def : Proc<"athlon-mp", [FeatureSSE1, Feature3DNowA, FeatureSlowBTMem,
|
|
|
|
FeatureSlowSHLD]>;
|
2009-02-03 08:04:43 +08:00
|
|
|
def : Proc<"k8", [FeatureSSE2, Feature3DNowA, Feature64Bit,
|
SHLD/SHRD are VectorPath (microcode) instructions known to have poor latency on certain architectures. While generating SHLD/SHRD instructions is acceptable when optimizing for size, optimizing for speed on these platforms should be implemented using alternative sequences of instructions composed of add, adc, shr, shl, or and lea which are directPath instructions. These alternative instructions not only have a lower latency but they also increase the decode bandwidth by allowing simultaneous decoding of a third directPath instruction.
AMD's processors family K7, K8, K10, K12, K15 and K16 are known to have SHLD/SHRD instructions with very poor latency. Optimization guides for these processors recommend using an alternative sequence of instructions. For these AMD's processors, I disabled folding (or (x << c) | (y >> (64 - c))) when we are not optimizing for size.
It might be beneficial to disable this folding for some of the Intel's processors. However, since I couldn't find specific recommendations regarding using SHLD/SHRD instructions on Intel's processors, I haven't disabled this peephole for Intel.
llvm-svn: 195383
2013-11-22 07:21:26 +08:00
|
|
|
FeatureSlowBTMem, FeatureSlowSHLD]>;
|
2009-02-03 08:04:43 +08:00
|
|
|
def : Proc<"opteron", [FeatureSSE2, Feature3DNowA, Feature64Bit,
|
SHLD/SHRD are VectorPath (microcode) instructions known to have poor latency on certain architectures. While generating SHLD/SHRD instructions is acceptable when optimizing for size, optimizing for speed on these platforms should be implemented using alternative sequences of instructions composed of add, adc, shr, shl, or and lea which are directPath instructions. These alternative instructions not only have a lower latency but they also increase the decode bandwidth by allowing simultaneous decoding of a third directPath instruction.
AMD's processors family K7, K8, K10, K12, K15 and K16 are known to have SHLD/SHRD instructions with very poor latency. Optimization guides for these processors recommend using an alternative sequence of instructions. For these AMD's processors, I disabled folding (or (x << c) | (y >> (64 - c))) when we are not optimizing for size.
It might be beneficial to disable this folding for some of the Intel's processors. However, since I couldn't find specific recommendations regarding using SHLD/SHRD instructions on Intel's processors, I haven't disabled this peephole for Intel.
llvm-svn: 195383
2013-11-22 07:21:26 +08:00
|
|
|
FeatureSlowBTMem, FeatureSlowSHLD]>;
|
2009-02-03 08:04:43 +08:00
|
|
|
def : Proc<"athlon64", [FeatureSSE2, Feature3DNowA, Feature64Bit,
|
SHLD/SHRD are VectorPath (microcode) instructions known to have poor latency on certain architectures. While generating SHLD/SHRD instructions is acceptable when optimizing for size, optimizing for speed on these platforms should be implemented using alternative sequences of instructions composed of add, adc, shr, shl, or and lea which are directPath instructions. These alternative instructions not only have a lower latency but they also increase the decode bandwidth by allowing simultaneous decoding of a third directPath instruction.
AMD's processors family K7, K8, K10, K12, K15 and K16 are known to have SHLD/SHRD instructions with very poor latency. Optimization guides for these processors recommend using an alternative sequence of instructions. For these AMD's processors, I disabled folding (or (x << c) | (y >> (64 - c))) when we are not optimizing for size.
It might be beneficial to disable this folding for some of the Intel's processors. However, since I couldn't find specific recommendations regarding using SHLD/SHRD instructions on Intel's processors, I haven't disabled this peephole for Intel.
llvm-svn: 195383
2013-11-22 07:21:26 +08:00
|
|
|
FeatureSlowBTMem, FeatureSlowSHLD]>;
|
2009-02-03 08:04:43 +08:00
|
|
|
def : Proc<"athlon-fx", [FeatureSSE2, Feature3DNowA, Feature64Bit,
|
SHLD/SHRD are VectorPath (microcode) instructions known to have poor latency on certain architectures. While generating SHLD/SHRD instructions is acceptable when optimizing for size, optimizing for speed on these platforms should be implemented using alternative sequences of instructions composed of add, adc, shr, shl, or and lea which are directPath instructions. These alternative instructions not only have a lower latency but they also increase the decode bandwidth by allowing simultaneous decoding of a third directPath instruction.
AMD's processors family K7, K8, K10, K12, K15 and K16 are known to have SHLD/SHRD instructions with very poor latency. Optimization guides for these processors recommend using an alternative sequence of instructions. For these AMD's processors, I disabled folding (or (x << c) | (y >> (64 - c))) when we are not optimizing for size.
It might be beneficial to disable this folding for some of the Intel's processors. However, since I couldn't find specific recommendations regarding using SHLD/SHRD instructions on Intel's processors, I haven't disabled this peephole for Intel.
llvm-svn: 195383
2013-11-22 07:21:26 +08:00
|
|
|
FeatureSlowBTMem, FeatureSlowSHLD]>;
|
2011-08-27 05:21:21 +08:00
|
|
|
def : Proc<"k8-sse3", [FeatureSSE3, Feature3DNowA, FeatureCMPXCHG16B,
|
SHLD/SHRD are VectorPath (microcode) instructions known to have poor latency on certain architectures. While generating SHLD/SHRD instructions is acceptable when optimizing for size, optimizing for speed on these platforms should be implemented using alternative sequences of instructions composed of add, adc, shr, shl, or and lea which are directPath instructions. These alternative instructions not only have a lower latency but they also increase the decode bandwidth by allowing simultaneous decoding of a third directPath instruction.
AMD's processors family K7, K8, K10, K12, K15 and K16 are known to have SHLD/SHRD instructions with very poor latency. Optimization guides for these processors recommend using an alternative sequence of instructions. For these AMD's processors, I disabled folding (or (x << c) | (y >> (64 - c))) when we are not optimizing for size.
It might be beneficial to disable this folding for some of the Intel's processors. However, since I couldn't find specific recommendations regarding using SHLD/SHRD instructions on Intel's processors, I haven't disabled this peephole for Intel.
llvm-svn: 195383
2013-11-22 07:21:26 +08:00
|
|
|
FeatureSlowBTMem, FeatureSlowSHLD]>;
|
2011-08-27 05:21:21 +08:00
|
|
|
def : Proc<"opteron-sse3", [FeatureSSE3, Feature3DNowA, FeatureCMPXCHG16B,
|
SHLD/SHRD are VectorPath (microcode) instructions known to have poor latency on certain architectures. While generating SHLD/SHRD instructions is acceptable when optimizing for size, optimizing for speed on these platforms should be implemented using alternative sequences of instructions composed of add, adc, shr, shl, or and lea which are directPath instructions. These alternative instructions not only have a lower latency but they also increase the decode bandwidth by allowing simultaneous decoding of a third directPath instruction.
AMD's processors family K7, K8, K10, K12, K15 and K16 are known to have SHLD/SHRD instructions with very poor latency. Optimization guides for these processors recommend using an alternative sequence of instructions. For these AMD's processors, I disabled folding (or (x << c) | (y >> (64 - c))) when we are not optimizing for size.
It might be beneficial to disable this folding for some of the Intel's processors. However, since I couldn't find specific recommendations regarding using SHLD/SHRD instructions on Intel's processors, I haven't disabled this peephole for Intel.
llvm-svn: 195383
2013-11-22 07:21:26 +08:00
|
|
|
FeatureSlowBTMem, FeatureSlowSHLD]>;
|
2011-08-27 05:21:21 +08:00
|
|
|
def : Proc<"athlon64-sse3", [FeatureSSE3, Feature3DNowA, FeatureCMPXCHG16B,
|
SHLD/SHRD are VectorPath (microcode) instructions known to have poor latency on certain architectures. While generating SHLD/SHRD instructions is acceptable when optimizing for size, optimizing for speed on these platforms should be implemented using alternative sequences of instructions composed of add, adc, shr, shl, or and lea which are directPath instructions. These alternative instructions not only have a lower latency but they also increase the decode bandwidth by allowing simultaneous decoding of a third directPath instruction.
AMD's processors family K7, K8, K10, K12, K15 and K16 are known to have SHLD/SHRD instructions with very poor latency. Optimization guides for these processors recommend using an alternative sequence of instructions. For these AMD's processors, I disabled folding (or (x << c) | (y >> (64 - c))) when we are not optimizing for size.
It might be beneficial to disable this folding for some of the Intel's processors. However, since I couldn't find specific recommendations regarding using SHLD/SHRD instructions on Intel's processors, I haven't disabled this peephole for Intel.
llvm-svn: 195383
2013-11-22 07:21:26 +08:00
|
|
|
FeatureSlowBTMem, FeatureSlowSHLD]>;
|
2012-05-01 14:54:48 +08:00
|
|
|
def : Proc<"amdfam10", [FeatureSSE4A,
|
2011-11-30 23:48:16 +08:00
|
|
|
Feature3DNowA, FeatureCMPXCHG16B, FeatureLZCNT,
|
SHLD/SHRD are VectorPath (microcode) instructions known to have poor latency on certain architectures. While generating SHLD/SHRD instructions is acceptable when optimizing for size, optimizing for speed on these platforms should be implemented using alternative sequences of instructions composed of add, adc, shr, shl, or and lea which are directPath instructions. These alternative instructions not only have a lower latency but they also increase the decode bandwidth by allowing simultaneous decoding of a third directPath instruction.
AMD's processors family K7, K8, K10, K12, K15 and K16 are known to have SHLD/SHRD instructions with very poor latency. Optimization guides for these processors recommend using an alternative sequence of instructions. For these AMD's processors, I disabled folding (or (x << c) | (y >> (64 - c))) when we are not optimizing for size.
It might be beneficial to disable this folding for some of the Intel's processors. However, since I couldn't find specific recommendations regarding using SHLD/SHRD instructions on Intel's processors, I haven't disabled this peephole for Intel.
llvm-svn: 195383
2013-11-22 07:21:26 +08:00
|
|
|
FeaturePOPCNT, FeatureSlowBTMem,
|
|
|
|
FeatureSlowSHLD]>;
|
2012-01-10 19:50:02 +08:00
|
|
|
// Bobcat
|
|
|
|
def : Proc<"btver1", [FeatureSSSE3, FeatureSSE4A, FeatureCMPXCHG16B,
|
SHLD/SHRD are VectorPath (microcode) instructions known to have poor latency on certain architectures. While generating SHLD/SHRD instructions is acceptable when optimizing for size, optimizing for speed on these platforms should be implemented using alternative sequences of instructions composed of add, adc, shr, shl, or and lea which are directPath instructions. These alternative instructions not only have a lower latency but they also increase the decode bandwidth by allowing simultaneous decoding of a third directPath instruction.
AMD's processors family K7, K8, K10, K12, K15 and K16 are known to have SHLD/SHRD instructions with very poor latency. Optimization guides for these processors recommend using an alternative sequence of instructions. For these AMD's processors, I disabled folding (or (x << c) | (y >> (64 - c))) when we are not optimizing for size.
It might be beneficial to disable this folding for some of the Intel's processors. However, since I couldn't find specific recommendations regarding using SHLD/SHRD instructions on Intel's processors, I haven't disabled this peephole for Intel.
llvm-svn: 195383
2013-11-22 07:21:26 +08:00
|
|
|
FeaturePRFCHW, FeatureLZCNT, FeaturePOPCNT,
|
|
|
|
FeatureSlowSHLD]>;
|
2013-05-03 18:20:08 +08:00
|
|
|
// Jaguar
|
|
|
|
def : Proc<"btver2", [FeatureAVX, FeatureSSE4A, FeatureCMPXCHG16B,
|
2013-10-17 03:04:11 +08:00
|
|
|
FeaturePRFCHW, FeatureAES, FeaturePCLMUL,
|
|
|
|
FeatureBMI, FeatureF16C, FeatureMOVBE,
|
SHLD/SHRD are VectorPath (microcode) instructions known to have poor latency on certain architectures. While generating SHLD/SHRD instructions is acceptable when optimizing for size, optimizing for speed on these platforms should be implemented using alternative sequences of instructions composed of add, adc, shr, shl, or and lea which are directPath instructions. These alternative instructions not only have a lower latency but they also increase the decode bandwidth by allowing simultaneous decoding of a third directPath instruction.
AMD's processors family K7, K8, K10, K12, K15 and K16 are known to have SHLD/SHRD instructions with very poor latency. Optimization guides for these processors recommend using an alternative sequence of instructions. For these AMD's processors, I disabled folding (or (x << c) | (y >> (64 - c))) when we are not optimizing for size.
It might be beneficial to disable this folding for some of the Intel's processors. However, since I couldn't find specific recommendations regarding using SHLD/SHRD instructions on Intel's processors, I haven't disabled this peephole for Intel.
llvm-svn: 195383
2013-11-22 07:21:26 +08:00
|
|
|
FeatureLZCNT, FeaturePOPCNT, FeatureSlowSHLD]>;
|
2012-01-10 19:50:02 +08:00
|
|
|
// Bulldozer
|
2012-05-01 14:54:48 +08:00
|
|
|
def : Proc<"bdver1", [FeatureXOP, FeatureFMA4, FeatureCMPXCHG16B,
|
2013-10-17 03:04:11 +08:00
|
|
|
FeatureAES, FeaturePRFCHW, FeaturePCLMUL,
|
SHLD/SHRD are VectorPath (microcode) instructions known to have poor latency on certain architectures. While generating SHLD/SHRD instructions is acceptable when optimizing for size, optimizing for speed on these platforms should be implemented using alternative sequences of instructions composed of add, adc, shr, shl, or and lea which are directPath instructions. These alternative instructions not only have a lower latency but they also increase the decode bandwidth by allowing simultaneous decoding of a third directPath instruction.
AMD's processors family K7, K8, K10, K12, K15 and K16 are known to have SHLD/SHRD instructions with very poor latency. Optimization guides for these processors recommend using an alternative sequence of instructions. For these AMD's processors, I disabled folding (or (x << c) | (y >> (64 - c))) when we are not optimizing for size.
It might be beneficial to disable this folding for some of the Intel's processors. However, since I couldn't find specific recommendations regarding using SHLD/SHRD instructions on Intel's processors, I haven't disabled this peephole for Intel.
llvm-svn: 195383
2013-11-22 07:21:26 +08:00
|
|
|
FeatureLZCNT, FeaturePOPCNT, FeatureSlowSHLD]>;
|
2013-05-03 18:20:08 +08:00
|
|
|
// Piledriver
|
2012-05-01 14:54:48 +08:00
|
|
|
def : Proc<"bdver2", [FeatureXOP, FeatureFMA4, FeatureCMPXCHG16B,
|
2013-10-17 03:04:11 +08:00
|
|
|
FeatureAES, FeaturePRFCHW, FeaturePCLMUL,
|
2012-05-01 14:54:48 +08:00
|
|
|
FeatureF16C, FeatureLZCNT,
|
SHLD/SHRD are VectorPath (microcode) instructions known to have poor latency on certain architectures. While generating SHLD/SHRD instructions is acceptable when optimizing for size, optimizing for speed on these platforms should be implemented using alternative sequences of instructions composed of add, adc, shr, shl, or and lea which are directPath instructions. These alternative instructions not only have a lower latency but they also increase the decode bandwidth by allowing simultaneous decoding of a third directPath instruction.
AMD's processors family K7, K8, K10, K12, K15 and K16 are known to have SHLD/SHRD instructions with very poor latency. Optimization guides for these processors recommend using an alternative sequence of instructions. For these AMD's processors, I disabled folding (or (x << c) | (y >> (64 - c))) when we are not optimizing for size.
It might be beneficial to disable this folding for some of the Intel's processors. However, since I couldn't find specific recommendations regarding using SHLD/SHRD instructions on Intel's processors, I haven't disabled this peephole for Intel.
llvm-svn: 195383
2013-11-22 07:21:26 +08:00
|
|
|
FeaturePOPCNT, FeatureBMI, FeatureTBM,
|
|
|
|
FeatureFMA, FeatureSlowSHLD]>;
|
2013-11-04 18:29:20 +08:00
|
|
|
|
|
|
|
// Steamroller
|
|
|
|
def : Proc<"bdver3", [FeatureXOP, FeatureFMA4, FeatureCMPXCHG16B,
|
|
|
|
FeatureAES, FeaturePRFCHW, FeaturePCLMUL,
|
|
|
|
FeatureF16C, FeatureLZCNT,
|
|
|
|
FeaturePOPCNT, FeatureBMI, FeatureTBM,
|
|
|
|
FeatureFMA, FeatureFSGSBase]>;
|
|
|
|
|
2014-05-02 23:47:07 +08:00
|
|
|
// Excavator
|
|
|
|
def : Proc<"bdver4", [FeatureAVX2, FeatureXOP, FeatureFMA4,
|
|
|
|
FeatureCMPXCHG16B, FeatureAES, FeaturePRFCHW,
|
|
|
|
FeaturePCLMUL, FeatureF16C, FeatureLZCNT,
|
|
|
|
FeaturePOPCNT, FeatureBMI, FeatureBMI2,
|
|
|
|
FeatureTBM, FeatureFMA, FeatureFSGSBase]>;
|
|
|
|
|
2012-09-12 22:36:02 +08:00
|
|
|
def : Proc<"geode", [Feature3DNowA]>;
|
2006-10-06 17:17:41 +08:00
|
|
|
|
|
|
|
def : Proc<"winchip-c6", [FeatureMMX]>;
|
2011-04-15 08:32:41 +08:00
|
|
|
def : Proc<"winchip2", [Feature3DNow]>;
|
|
|
|
def : Proc<"c3", [Feature3DNow]>;
|
2007-05-22 13:15:37 +08:00
|
|
|
def : Proc<"c3-2", [FeatureSSE1]>;
|
2006-10-06 17:17:41 +08:00
|
|
|
|
2014-05-08 01:37:03 +08:00
|
|
|
// We also provide a generic 64-bit specific x86 processor model which tries to
|
|
|
|
// be good for modern chips without enabling instruction set encodings past the
|
|
|
|
// basic SSE2 and 64-bit ones. It disables slow things from any mainstream and
|
|
|
|
// modern 64-bit x86 chip, and enables features that are generally beneficial.
|
|
|
|
//
|
|
|
|
// We currently use the Sandy Bridge model as the default scheduling model as
|
|
|
|
// we use it across Nehalem, Westmere, Sandy Bridge, and Ivy Bridge which
|
|
|
|
// covers a huge swath of x86 processors. If there are specific scheduling
|
|
|
|
// knobs which need to be tuned differently for AMD chips, we might consider
|
|
|
|
// forming a common base for them.
|
|
|
|
def : ProcessorModel<"x86-64", SandyBridgeModel,
|
|
|
|
[FeatureSSE2, Feature64Bit, FeatureSlowBTMem,
|
|
|
|
FeatureFastUAMem]>;
|
|
|
|
|
2003-08-03 23:47:49 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Register File Description
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
include "X86RegisterInfo.td"
|
|
|
|
|
2003-08-04 02:19:37 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Instruction Descriptions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2003-08-04 05:54:21 +08:00
|
|
|
include "X86InstrInfo.td"
|
|
|
|
|
2010-04-05 11:10:20 +08:00
|
|
|
def X86InstrInfo : InstrInfo;
|
2003-08-04 02:19:37 +08:00
|
|
|
|
2007-02-27 02:17:14 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Calling Conventions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
include "X86CallingConv.td"
|
|
|
|
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2010-10-30 21:48:28 +08:00
|
|
|
// Assembly Parser
|
2007-02-27 02:17:14 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2009-07-29 08:02:19 +08:00
|
|
|
def ATTAsmParser : AsmParser {
|
2012-01-13 02:03:40 +08:00
|
|
|
string AsmParserClassName = "AsmParser";
|
2012-01-10 03:13:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
def ATTAsmParserVariant : AsmParserVariant {
|
2009-07-29 08:02:19 +08:00
|
|
|
int Variant = 0;
|
2009-08-12 04:59:47 +08:00
|
|
|
|
2013-04-19 06:35:36 +08:00
|
|
|
// Variant name.
|
|
|
|
string Name = "att";
|
|
|
|
|
2009-08-12 04:59:47 +08:00
|
|
|
// Discard comments in assembly strings.
|
|
|
|
string CommentDelimiter = "#";
|
|
|
|
|
|
|
|
// Recognize hard coded registers.
|
|
|
|
string RegisterPrefix = "%";
|
2009-07-29 08:02:19 +08:00
|
|
|
}
|
|
|
|
|
2012-01-11 01:51:54 +08:00
|
|
|
def IntelAsmParserVariant : AsmParserVariant {
|
|
|
|
int Variant = 1;
|
|
|
|
|
2013-04-19 06:35:36 +08:00
|
|
|
// Variant name.
|
|
|
|
string Name = "intel";
|
|
|
|
|
2012-01-11 01:51:54 +08:00
|
|
|
// Discard comments in assembly strings.
|
|
|
|
string CommentDelimiter = ";";
|
|
|
|
|
|
|
|
// Recognize hard coded registers.
|
|
|
|
string RegisterPrefix = "";
|
|
|
|
}
|
|
|
|
|
2010-10-30 21:48:28 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Assembly Printers
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2004-10-04 04:36:57 +08:00
|
|
|
// The X86 target supports two different syntaxes for emitting machine code.
|
|
|
|
// This is controlled by the -x86-asm-syntax={att|intel}
|
|
|
|
def ATTAsmWriter : AsmWriter {
|
2009-09-14 03:30:11 +08:00
|
|
|
string AsmWriterClassName = "ATTInstPrinter";
|
2004-10-04 04:36:57 +08:00
|
|
|
int Variant = 0;
|
|
|
|
}
|
|
|
|
def IntelAsmWriter : AsmWriter {
|
2009-09-20 15:47:59 +08:00
|
|
|
string AsmWriterClassName = "IntelInstPrinter";
|
2004-10-04 04:36:57 +08:00
|
|
|
int Variant = 1;
|
|
|
|
}
|
|
|
|
|
2003-08-04 02:19:37 +08:00
|
|
|
def X86 : Target {
|
|
|
|
// Information about the instructions...
|
2003-08-04 12:59:56 +08:00
|
|
|
let InstructionSet = X86InstrInfo;
|
2009-07-29 08:02:19 +08:00
|
|
|
let AssemblyParsers = [ATTAsmParser];
|
2012-01-11 01:51:54 +08:00
|
|
|
let AssemblyParserVariants = [ATTAsmParserVariant, IntelAsmParserVariant];
|
2004-10-04 04:36:57 +08:00
|
|
|
let AssemblyWriters = [ATTAsmWriter, IntelAsmWriter];
|
2003-08-04 02:19:37 +08:00
|
|
|
}
|