[X86] Remove support for Y0 constraint as an alias for Yz in inline assembly.

Neither gcc or icc support this. Split out from D79472. I want
to remove more, but it looks like icc does support some things
gcc doesn't and I need to double check our internal test suites.
This commit is contained in:
Craig Topper 2020-05-06 14:53:10 -07:00
parent 7c6420e431
commit 16c800b8b7
4 changed files with 1 additions and 33 deletions

View File

@ -1691,8 +1691,7 @@ bool X86TargetInfo::validateAsmConstraint(
switch (*Name) {
default:
return false;
case 'z':
case '0': // First SSE register.
case 'z': // First SSE register.
case '2':
case 't': // Any SSE register, when SSE2 is enabled.
case 'i': // Any SSE register, when SSE2 and inter-unit moves enabled.
@ -1897,7 +1896,6 @@ bool X86TargetInfo::validateOperandSize(const llvm::StringMap<bool> &FeatureMap,
case 'k':
return Size <= 64;
case 'z':
case '0':
// XMM0/YMM/ZMM0
if (FeatureMap.lookup("avx512f"))
// ZMM0 can be used if target supports AVX512F.
@ -1971,7 +1969,6 @@ std::string X86TargetInfo::convertConstraint(const char *&Constraint) const {
case 'i':
case 't':
case 'z':
case '0':
case '2':
// "^" hints llvm that this is a 2 letter constraint.
// "Constraint++" is used to promote the string iterator

View File

@ -54,15 +54,3 @@ void f_Yz(__m128 x, __m128 y, __m128 z)
:"+Yi"(z),"=Yz" (x)
:"Yi" (y) );
}
// CHECK-LABEL: f_Y0
void f_Y0(__m128 x, __m128 y, __m128 z)
{
// CHECK: vpaddq
// CHECK-SAME: "=^Yi,=^Y0,^Yi,0,~{dirflag},~{fpsr},~{flags}"
__asm__ volatile ("vpaddq %0,%2,%1\n\t"
"vpaddq %1,%0,%2\n\t"
:"+Yi"(z),"=Y0" (x)
:"Yi" (y) );
}

View File

@ -48016,7 +48016,6 @@ X86TargetLowering::getConstraintType(StringRef Constraint) const {
default:
break;
case 'z':
case '0':
return C_Register;
case 'i':
case 'm':
@ -48080,7 +48079,6 @@ TargetLowering::ConstraintWeight
return CW_Invalid;
// XMM0
case 'z':
case '0':
if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()) ||
((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512()))
@ -48551,7 +48549,6 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
if (!Subtarget.hasMMX()) break;
return std::make_pair(0U, &X86::VR64RegClass);
case 'z':
case '0':
if (!Subtarget.hasSSE1()) break;
switch (VT.SimpleTy) {
default: break;

View File

@ -67,17 +67,3 @@ entry:
ret void
}
; Function Attrs: nounwind
define void @f_Y0(<4 x float> %x, <4 x float> %y, <4 x float> %z) {
; xmm0 SSE register
; CHECK-LABEL: f_Y0:
; CHECK: ## InlineAsm Start
; CHECK-NEXT: vpaddq %xmm{{[0-9]+}}, %xmm{{[0-9]+}}, %xmm0
; CHECK-NEXT: vpaddq %xmm0, %xmm{{[0-9]+}}, %xmm{{[0-9]+}}
; CHECK: ## InlineAsm End
entry:
%0 = tail call { <4 x float>, <4 x float> } asm sideeffect "vpaddq $0,$2,$1\0A\09vpaddq $1,$0,$2\0A\09", "=^Yi,=^Y0,^Yi,0,~{dirflag},~{fpsr},~{flags}"(<4 x float> %y, <4 x float> %z)
ret void
}