[X86] Add command line switches for xsave/xsaveopt/xsavec/xsaves. Macro defines for the same. And add the flags to correct CPU names.

llvm-svn: 250368
This commit is contained in:
Craig Topper 2015-10-15 05:23:38 +00:00
parent 8c9526400e
commit da9fe56bf6
5 changed files with 126 additions and 4 deletions
clang

View File

@ -1321,6 +1321,10 @@ def mno_prfchw : Flag<["-"], "mno-prfchw">, Group<m_x86_Features_Group>;
def mno_rdseed : Flag<["-"], "mno-rdseed">, Group<m_x86_Features_Group>;
def mno_adx : Flag<["-"], "mno-adx">, Group<m_x86_Features_Group>;
def mno_sha : Flag<["-"], "mno-sha">, Group<m_x86_Features_Group>;
def mno_xsave : Flag<["-"], "mno-xsave">, Group<m_x86_Features_Group>;
def mno_xsaveopt : Flag<["-"], "mno-xsaveopt">, Group<m_x86_Features_Group>;
def mno_xsavec : Flag<["-"], "mno-xsavec">, Group<m_x86_Features_Group>;
def mno_xsaves : Flag<["-"], "mno-xsaves">, Group<m_x86_Features_Group>;
def munaligned_access : Flag<["-"], "munaligned-access">, Group<m_arm_Features_Group>,
HelpText<"Allow memory accesses to be unaligned (AArch32/AArch64 only)">;
@ -1468,6 +1472,10 @@ def mrdseed : Flag<["-"], "mrdseed">, Group<m_x86_Features_Group>;
def madx : Flag<["-"], "madx">, Group<m_x86_Features_Group>;
def msha : Flag<["-"], "msha">, Group<m_x86_Features_Group>;
def mcx16 : Flag<["-"], "mcx16">, Group<m_x86_Features_Group>;
def mxsave : Flag<["-"], "mxsave">, Group<m_x86_Features_Group>;
def mxsaveopt : Flag<["-"], "mxsaveopt">, Group<m_x86_Features_Group>;
def mxsavec : Flag<["-"], "mxsavec">, Group<m_x86_Features_Group>;
def mxsaves : Flag<["-"], "mxsaves">, Group<m_x86_Features_Group>;
def mips16 : Flag<["-"], "mips16">, Group<m_Group>;
def mno_mips16 : Flag<["-"], "mno-mips16">, Group<m_Group>;
def mmicromips : Flag<["-"], "mmicromips">, Group<m_Group>;

View File

@ -2094,6 +2094,10 @@ class X86TargetInfo : public TargetInfo {
bool HasAVX512VL = false;
bool HasSHA = false;
bool HasCX16 = false;
bool HasXSAVE = false;
bool HasXSAVEOPT = false;
bool HasXSAVEC = false;
bool HasXSAVES = false;
/// \brief Enumeration of all of the X86 CPUs supported by Clang.
///
@ -2581,6 +2585,8 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "avx512dq", true);
setFeatureEnabledImpl(Features, "avx512bw", true);
setFeatureEnabledImpl(Features, "avx512vl", true);
setFeatureEnabledImpl(Features, "xsavec", true);
setFeatureEnabledImpl(Features, "xsaves", true);
// FALLTHROUGH
case CK_Broadwell:
setFeatureEnabledImpl(Features, "rdseed", true);
@ -2601,6 +2607,8 @@ bool X86TargetInfo::initFeatureMap(
// FALLTHROUGH
case CK_SandyBridge:
setFeatureEnabledImpl(Features, "avx", true);
setFeatureEnabledImpl(Features, "xsave", true);
setFeatureEnabledImpl(Features, "xsaveopt", true);
// FALLTHROUGH
case CK_Westmere:
case CK_Silvermont:
@ -2629,6 +2637,8 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "aes", true);
setFeatureEnabledImpl(Features, "pclmul", true);
setFeatureEnabledImpl(Features, "cx16", true);
setFeatureEnabledImpl(Features, "xsaveopt", true);
setFeatureEnabledImpl(Features, "xsave", true);
break;
case CK_K6_2:
case CK_K6_3:
@ -2671,6 +2681,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "pclmul", true);
setFeatureEnabledImpl(Features, "bmi", true);
setFeatureEnabledImpl(Features, "f16c", true);
setFeatureEnabledImpl(Features, "xsaveopt", true);
// FALLTHROUGH
case CK_BTVER1:
setFeatureEnabledImpl(Features, "ssse3", true);
@ -2679,6 +2690,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "popcnt", true);
setFeatureEnabledImpl(Features, "prfchw", true);
setFeatureEnabledImpl(Features, "cx16", true);
setFeatureEnabledImpl(Features, "xsave", true);
break;
case CK_BDVER4:
setFeatureEnabledImpl(Features, "avx2", true);
@ -2686,6 +2698,7 @@ bool X86TargetInfo::initFeatureMap(
// FALLTHROUGH
case CK_BDVER3:
setFeatureEnabledImpl(Features, "fsgsbase", true);
setFeatureEnabledImpl(Features, "xsaveopt", true);
// FALLTHROUGH
case CK_BDVER2:
setFeatureEnabledImpl(Features, "bmi", true);
@ -2701,6 +2714,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "pclmul", true);
setFeatureEnabledImpl(Features, "prfchw", true);
setFeatureEnabledImpl(Features, "cx16", true);
setFeatureEnabledImpl(Features, "xsave", true);
break;
}
if (!TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec))
@ -2744,6 +2758,7 @@ void X86TargetInfo::setSSELevel(llvm::StringMap<bool> &Features,
Features["avx2"] = true;
case AVX:
Features["avx"] = true;
Features["xsave"] = true;
case SSE42:
Features["sse4.2"] = true;
case SSE41:
@ -2779,7 +2794,8 @@ void X86TargetInfo::setSSELevel(llvm::StringMap<bool> &Features,
case SSE42:
Features["sse4.2"] = false;
case AVX:
Features["fma"] = Features["avx"] = Features["f16c"] = false;
Features["fma"] = Features["avx"] = Features["f16c"] = Features["xsave"] =
Features["xsaveopt"] = false;
setXOPLevel(Features, FMA4, false);
case AVX2:
Features["avx2"] = false;
@ -2912,6 +2928,16 @@ void X86TargetInfo::setFeatureEnabledImpl(llvm::StringMap<bool> &Features,
setSSELevel(Features, SSE42, Enabled);
else
setSSELevel(Features, SSE41, Enabled);
} else if (Name == "xsave") {
if (Enabled)
setSSELevel(Features, AVX, Enabled);
else
Features["xsaveopt"] = false;
} else if (Name == "xsaveopt" || Name == "xsavec" || Name == "xsaves") {
if (Enabled) {
Features["xsave"] = true;
setSSELevel(Features, AVX, Enabled);
}
}
}
@ -2969,6 +2995,14 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasSHA = true;
} else if (Feature == "+cx16") {
HasCX16 = true;
} else if (Feature == "+xsave") {
HasXSAVE = true;
} else if (Feature == "+xsaveopt") {
HasXSAVEOPT = true;
} else if (Feature == "+xsavec") {
HasXSAVEC = true;
} else if (Feature == "+xsaves") {
HasXSAVES = true;
}
X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
@ -3261,6 +3295,15 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasSHA)
Builder.defineMacro("__SHA__");
if (HasXSAVE)
Builder.defineMacro("__XSAVE__");
if (HasXSAVEOPT)
Builder.defineMacro("__XSAVEOPT__");
if (HasXSAVEC)
Builder.defineMacro("__XSAVEC__");
if (HasXSAVES)
Builder.defineMacro("__XSAVES__");
if (HasCX16)
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
@ -3373,6 +3416,10 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("x86_32", getTriple().getArch() == llvm::Triple::x86)
.Case("x86_64", getTriple().getArch() == llvm::Triple::x86_64)
.Case("xop", XOPLevel >= XOP)
.Case("xsave", HasXSAVE)
.Case("xsavec", HasXSAVEC)
.Case("xsaves", HasXSAVES)
.Case("xsaveopt", HasXSAVEOPT)
.Default(false);
}

View File

@ -32,8 +32,8 @@ int __attribute__((target("no-mmx"))) qq(int a) { return 40; }
// CHECK: qax{{.*}} #4
// CHECK: qq{{.*}} #5
// CHECK: #0 = {{.*}}"target-cpu"="x86-64" "target-features"="+mmx,+sse,+sse2"
// CHECK: #1 = {{.*}}"target-cpu"="ivybridge" "target-features"="+aes,+avx,+cx16,+f16c,+fsgsbase,+mmx,+pclmul,+popcnt,+rdrnd,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3"
// CHECK: #2 = {{.*}}"target-cpu"="x86-64" "target-features"="+mmx,+sse,-aes,-avx,-avx2,-avx512bw,-avx512cd,-avx512dq,-avx512er,-avx512f,-avx512pf,-avx512vl,-f16c,-fma,-fma4,-pclmul,-sha,-sse2,-sse3,-sse4.1,-sse4.2,-sse4a,-ssse3,-xop"
// CHECK: #1 = {{.*}}"target-cpu"="ivybridge" "target-features"="+aes,+avx,+cx16,+f16c,+fsgsbase,+mmx,+pclmul,+popcnt,+rdrnd,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+xsave,+xsaveopt"
// CHECK: #2 = {{.*}}"target-cpu"="x86-64" "target-features"="+mmx,+sse,-aes,-avx,-avx2,-avx512bw,-avx512cd,-avx512dq,-avx512er,-avx512f,-avx512pf,-avx512vl,-f16c,-fma,-fma4,-pclmul,-sha,-sse2,-sse3,-sse4.1,-sse4.2,-sse4a,-ssse3,-xop,-xsave,-xsaveopt"
// CHECK: #3 = {{.*}}"target-cpu"="x86-64" "target-features"="+mmx,+popcnt,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3"
// CHECK: #4 = {{.*}}"target-cpu"="ivybridge" "target-features"="+avx,+cx16,+f16c,+fsgsbase,+mmx,+pclmul,+popcnt,+rdrnd,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,-aes"
// CHECK: #4 = {{.*}}"target-cpu"="ivybridge" "target-features"="+avx,+cx16,+f16c,+fsgsbase,+mmx,+pclmul,+popcnt,+rdrnd,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+xsave,+xsaveopt,-aes"
// CHECK: #5 = {{.*}}"target-cpu"="x86-64" "target-features"="+sse,+sse2,-3dnow,-3dnowa,-mmx"

View File

@ -425,6 +425,8 @@
// CHECK_COREI7_AVX_M32: #define __SSE4_2__ 1
// CHECK_COREI7_AVX_M32: #define __SSE__ 1
// CHECK_COREI7_AVX_M32: #define __SSSE3__ 1
// CHECK_COREI7_AVX_M32: #define __XSAVEOPT__ 1
// CHECK_COREI7_AVX_M32: #define __XSAVE__ 1
// CHECK_COREI7_AVX_M32: #define __corei7 1
// CHECK_COREI7_AVX_M32: #define __corei7__ 1
// CHECK_COREI7_AVX_M32: #define __i386 1
@ -448,6 +450,8 @@
// CHECK_COREI7_AVX_M64: #define __SSE_MATH__ 1
// CHECK_COREI7_AVX_M64: #define __SSE__ 1
// CHECK_COREI7_AVX_M64: #define __SSSE3__ 1
// CHECK_COREI7_AVX_M64: #define __XSAVEOPT__ 1
// CHECK_COREI7_AVX_M64: #define __XSAVE__ 1
// CHECK_COREI7_AVX_M64: #define __amd64 1
// CHECK_COREI7_AVX_M64: #define __amd64__ 1
// CHECK_COREI7_AVX_M64: #define __corei7 1
@ -471,6 +475,8 @@
// CHECK_CORE_AVX_I_M32: #define __SSE4_2__ 1
// CHECK_CORE_AVX_I_M32: #define __SSE__ 1
// CHECK_CORE_AVX_I_M32: #define __SSSE3__ 1
// CHECK_CORE_AVX_I_M32: #define __XSAVEOPT__ 1
// CHECK_CORE_AVX_I_M32: #define __XSAVE__ 1
// CHECK_CORE_AVX_I_M32: #define __corei7 1
// CHECK_CORE_AVX_I_M32: #define __corei7__ 1
// CHECK_CORE_AVX_I_M32: #define __i386 1
@ -494,6 +500,8 @@
// CHECK_CORE_AVX_I_M64: #define __SSE_MATH__ 1
// CHECK_CORE_AVX_I_M64: #define __SSE__ 1
// CHECK_CORE_AVX_I_M64: #define __SSSE3__ 1
// CHECK_CORE_AVX_I_M64: #define __XSAVEOPT__ 1
// CHECK_CORE_AVX_I_M64: #define __XSAVE__ 1
// CHECK_CORE_AVX_I_M64: #define __amd64 1
// CHECK_CORE_AVX_I_M64: #define __amd64__ 1
// CHECK_CORE_AVX_I_M64: #define __corei7 1
@ -524,6 +532,8 @@
// CHECK_CORE_AVX2_M32: #define __SSE4_2__ 1
// CHECK_CORE_AVX2_M32: #define __SSE__ 1
// CHECK_CORE_AVX2_M32: #define __SSSE3__ 1
// CHECK_CORE_AVX2_M32: #define __XSAVEOPT__ 1
// CHECK_CORE_AVX2_M32: #define __XSAVE__ 1
// CHECK_CORE_AVX2_M32: #define __corei7 1
// CHECK_CORE_AVX2_M32: #define __corei7__ 1
// CHECK_CORE_AVX2_M32: #define __i386 1
@ -554,6 +564,8 @@
// CHECK_CORE_AVX2_M64: #define __SSE_MATH__ 1
// CHECK_CORE_AVX2_M64: #define __SSE__ 1
// CHECK_CORE_AVX2_M64: #define __SSSE3__ 1
// CHECK_CORE_AVX2_M64: #define __XSAVEOPT__ 1
// CHECK_CORE_AVX2_M64: #define __XSAVE__ 1
// CHECK_CORE_AVX2_M64: #define __amd64 1
// CHECK_CORE_AVX2_M64: #define __amd64__ 1
// CHECK_CORE_AVX2_M64: #define __corei7 1
@ -586,6 +598,8 @@
// CHECK_BROADWELL_M32: #define __SSE4_2__ 1
// CHECK_BROADWELL_M32: #define __SSE__ 1
// CHECK_BROADWELL_M32: #define __SSSE3__ 1
// CHECK_BROADWELL_M32: #define __XSAVEOPT__ 1
// CHECK_BROADWELL_M32: #define __XSAVE__ 1
// CHECK_BROADWELL_M32: #define __corei7 1
// CHECK_BROADWELL_M32: #define __corei7__ 1
// CHECK_BROADWELL_M32: #define __i386 1
@ -618,6 +632,8 @@
// CHECK_BROADWELL_M64: #define __SSE_MATH__ 1
// CHECK_BROADWELL_M64: #define __SSE__ 1
// CHECK_BROADWELL_M64: #define __SSSE3__ 1
// CHECK_BROADWELL_M64: #define __XSAVEOPT__ 1
// CHECK_BROADWELL_M64: #define __XSAVE__ 1
// CHECK_BROADWELL_M64: #define __amd64 1
// CHECK_BROADWELL_M64: #define __amd64__ 1
// CHECK_BROADWELL_M64: #define __corei7 1
@ -652,6 +668,8 @@
// CHECK_KNL_M32: #define __SSE4_2__ 1
// CHECK_KNL_M32: #define __SSE__ 1
// CHECK_KNL_M32: #define __SSSE3__ 1
// CHECK_KNL_M32: #define __XSAVEOPT__ 1
// CHECK_KNL_M32: #define __XSAVE__ 1
// CHECK_KNL_M32: #define __i386 1
// CHECK_KNL_M32: #define __i386__ 1
// CHECK_KNL_M32: #define __knl 1
@ -687,6 +705,8 @@
// CHECK_KNL_M64: #define __SSE_MATH__ 1
// CHECK_KNL_M64: #define __SSE__ 1
// CHECK_KNL_M64: #define __SSSE3__ 1
// CHECK_KNL_M64: #define __XSAVEOPT__ 1
// CHECK_KNL_M64: #define __XSAVE__ 1
// CHECK_KNL_M64: #define __amd64 1
// CHECK_KNL_M64: #define __amd64__ 1
// CHECK_KNL_M64: #define __knl 1
@ -722,6 +742,10 @@
// CHECK_SKX_M32: #define __SSE4_2__ 1
// CHECK_SKX_M32: #define __SSE__ 1
// CHECK_SKX_M32: #define __SSSE3__ 1
// CHECK_SKX_M32: #define __XSAVEC__ 1
// CHECK_SKX_M32: #define __XSAVEOPT__ 1
// CHECK_SKX_M32: #define __XSAVES__ 1
// CHECK_SKX_M32: #define __XSAVE__ 1
// CHECK_SKX_M32: #define __i386 1
// CHECK_SKX_M32: #define __i386__ 1
// CHECK_SKX_M32: #define __skx 1
@ -758,6 +782,10 @@
// CHECK_SKX_M64: #define __SSE_MATH__ 1
// CHECK_SKX_M64: #define __SSE__ 1
// CHECK_SKX_M64: #define __SSSE3__ 1
// CHECK_SKX_M64: #define __XSAVEC__ 1
// CHECK_SKX_M64: #define __XSAVEOPT__ 1
// CHECK_SKX_M64: #define __XSAVES__ 1
// CHECK_SKX_M64: #define __XSAVE__ 1
// CHECK_SKX_M64: #define __amd64 1
// CHECK_SKX_M64: #define __amd64__ 1
// CHECK_SKX_M64: #define __skx 1
@ -1307,6 +1335,7 @@
// CHECK_BTVER1_M32: #define __SSE_MATH__ 1
// CHECK_BTVER1_M32: #define __SSE__ 1
// CHECK_BTVER1_M32: #define __SSSE3__ 1
// CHECK_BTVER1_M32: #define __XSAVE__ 1
// CHECK_BTVER1_M32: #define __btver1 1
// CHECK_BTVER1_M32: #define __btver1__ 1
// CHECK_BTVER1_M32: #define __i386 1
@ -1328,6 +1357,7 @@
// CHECK_BTVER1_M64: #define __SSE_MATH__ 1
// CHECK_BTVER1_M64: #define __SSE__ 1
// CHECK_BTVER1_M64: #define __SSSE3__ 1
// CHECK_BTVER1_M64: #define __XSAVE__ 1
// CHECK_BTVER1_M64: #define __amd64 1
// CHECK_BTVER1_M64: #define __amd64__ 1
// CHECK_BTVER1_M64: #define __btver1 1
@ -1356,6 +1386,8 @@
// CHECK_BTVER2_M32: #define __SSE_MATH__ 1
// CHECK_BTVER2_M32: #define __SSE__ 1
// CHECK_BTVER2_M32: #define __SSSE3__ 1
// CHECK_BTVER2_M32: #define __XSAVEOPT__ 1
// CHECK_BTVER2_M32: #define __XSAVE__ 1
// CHECK_BTVER2_M32: #define __btver2 1
// CHECK_BTVER2_M32: #define __btver2__ 1
// CHECK_BTVER2_M32: #define __i386 1
@ -1382,6 +1414,8 @@
// CHECK_BTVER2_M64: #define __SSE_MATH__ 1
// CHECK_BTVER2_M64: #define __SSE__ 1
// CHECK_BTVER2_M64: #define __SSSE3__ 1
// CHECK_BTVER2_M64: #define __XSAVEOPT__ 1
// CHECK_BTVER2_M64: #define __XSAVE__ 1
// CHECK_BTVER2_M64: #define __amd64 1
// CHECK_BTVER2_M64: #define __amd64__ 1
// CHECK_BTVER2_M64: #define __btver2 1
@ -1412,6 +1446,7 @@
// CHECK_BDVER1_M32: #define __SSE__ 1
// CHECK_BDVER1_M32: #define __SSSE3__ 1
// CHECK_BDVER1_M32: #define __XOP__ 1
// CHECK_BDVER1_M32: #define __XSAVE__ 1
// CHECK_BDVER1_M32: #define __bdver1 1
// CHECK_BDVER1_M32: #define __bdver1__ 1
// CHECK_BDVER1_M32: #define __i386 1
@ -1440,6 +1475,7 @@
// CHECK_BDVER1_M64: #define __SSE__ 1
// CHECK_BDVER1_M64: #define __SSSE3__ 1
// CHECK_BDVER1_M64: #define __XOP__ 1
// CHECK_BDVER1_M64: #define __XSAVE__ 1
// CHECK_BDVER1_M64: #define __amd64 1
// CHECK_BDVER1_M64: #define __amd64__ 1
// CHECK_BDVER1_M64: #define __bdver1 1
@ -1474,6 +1510,7 @@
// CHECK_BDVER2_M32: #define __SSSE3__ 1
// CHECK_BDVER2_M32: #define __TBM__ 1
// CHECK_BDVER2_M32: #define __XOP__ 1
// CHECK_BDVER2_M32: #define __XSAVE__ 1
// CHECK_BDVER2_M32: #define __bdver2 1
// CHECK_BDVER2_M32: #define __bdver2__ 1
// CHECK_BDVER2_M32: #define __i386 1
@ -1506,6 +1543,7 @@
// CHECK_BDVER2_M64: #define __SSSE3__ 1
// CHECK_BDVER2_M64: #define __TBM__ 1
// CHECK_BDVER2_M64: #define __XOP__ 1
// CHECK_BDVER2_M64: #define __XSAVE__ 1
// CHECK_BDVER2_M64: #define __amd64 1
// CHECK_BDVER2_M64: #define __amd64__ 1
// CHECK_BDVER2_M64: #define __bdver2 1
@ -1541,6 +1579,8 @@
// CHECK_BDVER3_M32: #define __SSSE3__ 1
// CHECK_BDVER3_M32: #define __TBM__ 1
// CHECK_BDVER3_M32: #define __XOP__ 1
// CHECK_BDVER3_M32: #define __XSAVEOPT__ 1
// CHECK_BDVER3_M32: #define __XSAVE__ 1
// CHECK_BDVER3_M32: #define __bdver3 1
// CHECK_BDVER3_M32: #define __bdver3__ 1
// CHECK_BDVER3_M32: #define __i386 1
@ -1574,6 +1614,8 @@
// CHECK_BDVER3_M64: #define __SSSE3__ 1
// CHECK_BDVER3_M64: #define __TBM__ 1
// CHECK_BDVER3_M64: #define __XOP__ 1
// CHECK_BDVER3_M64: #define __XSAVEOPT__ 1
// CHECK_BDVER3_M64: #define __XSAVE__ 1
// CHECK_BDVER3_M64: #define __amd64 1
// CHECK_BDVER3_M64: #define __amd64__ 1
// CHECK_BDVER3_M64: #define __bdver3 1
@ -1611,6 +1653,7 @@
// CHECK_BDVER4_M32: #define __SSSE3__ 1
// CHECK_BDVER4_M32: #define __TBM__ 1
// CHECK_BDVER4_M32: #define __XOP__ 1
// CHECK_BDVER4_M32: #define __XSAVE__ 1
// CHECK_BDVER4_M32: #define __bdver4 1
// CHECK_BDVER4_M32: #define __bdver4__ 1
// CHECK_BDVER4_M32: #define __i386 1
@ -1646,6 +1689,7 @@
// CHECK_BDVER4_M64: #define __SSSE3__ 1
// CHECK_BDVER4_M64: #define __TBM__ 1
// CHECK_BDVER4_M64: #define __XOP__ 1
// CHECK_BDVER4_M64: #define __XSAVE__ 1
// CHECK_BDVER4_M64: #define __amd64 1
// CHECK_BDVER4_M64: #define __amd64__ 1
// CHECK_BDVER4_M64: #define __bdver4 1

View File

@ -293,3 +293,26 @@
// RDSEED: #define __RDSEED__ 1
// RUN: %clang -target i386-unknown-unknown -march=atom -mxsave -x c -E -dM -o - %s | FileCheck --check-prefix=XSAVE %s
// XSAVE: #define __XSAVE__ 1
// RUN: %clang -target i386-unknown-unknown -march=atom -mxsaveopt -x c -E -dM -o - %s | FileCheck --check-prefix=XSAVEOPT %s
// XSAVEOPT: #define __XSAVEOPT__ 1
// XSAVEOPT: #define __XSAVE__ 1
// RUN: %clang -target i386-unknown-unknown -march=atom -mxsavec -x c -E -dM -o - %s | FileCheck --check-prefix=XSAVEC %s
// XSAVEC: #define __XSAVEC__ 1
// XSAVEC: #define __XSAVE__ 1
// RUN: %clang -target i386-unknown-unknown -march=atom -mxsaves -x c -E -dM -o - %s | FileCheck --check-prefix=XSAVES %s
// XSAVES: #define __XSAVES__ 1
// XSAVES: #define __XSAVE__ 1
// RUN: %clang -target i386-unknown-unknown -march=atom -mxsaveopt -mno-xsave -x c -E -dM -o - %s | FileCheck --check-prefix=NOXSAVE %s
// NOXSAVE-NOT: #define __XSAVEOPT__ 1
// NOXSAVE-NOT: #define __XSAVE__ 1