[SVE][Inline-Asm] Add constraints for SVE ACLE types

Summary:
Adds the constraints described below to ensure that we
can tie variables of SVE ACLE types to operands in inline-asm:
 - y: SVE registers Z0-Z7
 - Upl: One of the low eight SVE predicate registers (P0-P7)
 - Upa: Full range of SVE predicate registers (P0-P15)

Reviewers: sdesmalen, huntergr, rovka, cameron.mcinally, efriedma, rengolin

Reviewed By: efriedma

Subscribers: miyuki, tschuett, rkruppe, psnobl, cfe-commits

Tags: #clang

Differential Revision: https://reviews.llvm.org/D75690
This commit is contained in:
Kerry McLaughlin 2020-03-17 10:27:29 +00:00
parent 06489eaa92
commit af64948e2a
7 changed files with 340 additions and 11 deletions

View File

@ -486,17 +486,29 @@ bool AArch64TargetInfo::validateAsmConstraint(
Info.setAllowsRegister();
return true;
case 'U':
if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
// SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
Info.setAllowsRegister();
Name += 2;
return true;
}
// Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
// Utf: A memory address suitable for ldp/stp in TF mode.
// Usa: An absolute symbolic address.
// Ush: The high part (bits 32:12) of a pc-relative symbolic address.
llvm_unreachable("FIXME: Unimplemented support for U* constraints.");
// Better to return an error saying that it's an unrecognised constraint
// even if this is a valid constraint in gcc.
return false;
case 'z': // Zero register, wzr or xzr
Info.setAllowsRegister();
return true;
case 'x': // Floating point and SIMD registers (V0-V15)
Info.setAllowsRegister();
return true;
case 'y': // SVE registers (V0-V7)
Info.setAllowsRegister();
return true;
}
return false;
}

View File

@ -87,6 +87,21 @@ public:
ArrayRef<const char *> getGCCRegNames() const override;
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
std::string convertConstraint(const char *&Constraint) const override {
std::string R;
switch (*Constraint) {
case 'U': // Three-character constraint; add "@3" hint for later parsing.
R = std::string("@3") + std::string(Constraint, 3);
Constraint += 2;
break;
default:
R = std::string(1, *Constraint);
break;
}
return R;
}
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override;
bool

View File

@ -4496,8 +4496,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Update the largest vector width if any arguments have vector types.
for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getFixedSize());
LargestVectorWidth =
std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getKnownMinSize());
}
// Compute the calling convention and attributes.
@ -4611,8 +4612,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Update largest vector width from the return type.
if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getFixedSize());
LargestVectorWidth =
std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getKnownMinSize());
// Insert instrumentation or attach profile metadata at indirect call sites.
// For more details, see the comment before the definition of

View File

@ -2095,8 +2095,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Update largest vector width for any vector types.
if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getFixedSize());
LargestVectorWidth =
std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getKnownMinSize());
} else {
ArgTypes.push_back(Dest.getAddress(*this).getType());
Args.push_back(Dest.getPointer(*this));
@ -2120,8 +2121,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Update largest vector width for any vector types.
if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getFixedSize());
LargestVectorWidth =
std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getKnownMinSize());
if (Info.allowsRegister())
InOutConstraints += llvm::utostr(i);
else
@ -2207,8 +2209,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Update largest vector width for any vector types.
if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getFixedSize());
LargestVectorWidth =
std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getKnownMinSize());
ArgTypes.push_back(Arg->getType());
Args.push_back(Arg);

View File

@ -0,0 +1,24 @@
// REQUIRES: aarch64-registered-target
// RUN: not %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns \
// RUN: -target-feature +neon -S -O1 -o - %s 2>&1 | FileCheck %s
// Set a vector constraint for an sve predicate register
// As the wrong constraint is used for an SVBool,
// the compiler will try to extend the nxv16i1 to an nxv16i8
// TODO: We don't have patterns for this yet but once they are added this test
// should be updated to check for an assembler error
__SVBool_t funcB1(__SVBool_t in)
{
__SVBool_t ret ;
asm volatile (
"mov %[ret].b, %[in].b \n"
: [ret] "=w" (ret)
: [in] "w" (in)
:);
return ret ;
}
// CHECK: funcB1
// CHECK-ERROR: fatal error: error in backend: Cannot select

View File

@ -0,0 +1,252 @@
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns \
// RUN: -target-feature +neon -S -O1 -o - -emit-llvm %s | FileCheck %s
// Tests to check that all sve datatypes can be passed in as input operands
// and passed out as output operands.
#define SVINT_TEST(DT, KIND)\
DT func_int_##DT##KIND(DT in)\
{\
DT out;\
asm volatile (\
"ptrue p0.b\n"\
"mov %[out]." #KIND ", p0/m, %[in]." #KIND "\n"\
: [out] "=w" (out)\
: [in] "w" (in)\
: "p0"\
);\
return out;\
}
SVINT_TEST(__SVUint8_t,b);
// CHECK: call <vscale x 16 x i8> asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"(<vscale x 16 x i8> %in)
SVINT_TEST(__SVUint8_t,h);
// CHECK: call <vscale x 16 x i8> asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"(<vscale x 16 x i8> %in)
SVINT_TEST(__SVUint8_t,s);
// CHECK: call <vscale x 16 x i8> asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"(<vscale x 16 x i8> %in)
SVINT_TEST(__SVUint8_t,d);
// CHECK: call <vscale x 16 x i8> asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"(<vscale x 16 x i8> %in)
SVINT_TEST(__SVUint16_t,b);
// CHECK: call <vscale x 8 x i16> asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"(<vscale x 8 x i16> %in)
SVINT_TEST(__SVUint16_t,h);
// CHECK: call <vscale x 8 x i16> asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"(<vscale x 8 x i16> %in)
SVINT_TEST(__SVUint16_t,s);
// CHECK: call <vscale x 8 x i16> asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"(<vscale x 8 x i16> %in)
SVINT_TEST(__SVUint16_t,d);
// CHECK: call <vscale x 8 x i16> asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"(<vscale x 8 x i16> %in)
SVINT_TEST(__SVUint32_t,b);
// CHECK: call <vscale x 4 x i32> asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"(<vscale x 4 x i32> %in)
SVINT_TEST(__SVUint32_t,h);
// CHECK: call <vscale x 4 x i32> asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"(<vscale x 4 x i32> %in)
SVINT_TEST(__SVUint32_t,s);
// CHECK: call <vscale x 4 x i32> asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"(<vscale x 4 x i32> %in)
SVINT_TEST(__SVUint32_t,d);
// CHECK: call <vscale x 4 x i32> asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"(<vscale x 4 x i32> %in)
SVINT_TEST(__SVUint64_t,b);
// CHECK: call <vscale x 2 x i64> asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"(<vscale x 2 x i64> %in)
SVINT_TEST(__SVUint64_t,h);
// CHECK: call <vscale x 2 x i64> asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"(<vscale x 2 x i64> %in)
SVINT_TEST(__SVUint64_t,s);
// CHECK: call <vscale x 2 x i64> asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"(<vscale x 2 x i64> %in)
SVINT_TEST(__SVUint64_t,d);
// CHECK: call <vscale x 2 x i64> asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"(<vscale x 2 x i64> %in)
SVINT_TEST(__SVInt8_t,b);
// CHECK: call <vscale x 16 x i8> asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"(<vscale x 16 x i8> %in)
SVINT_TEST(__SVInt8_t,h);
// CHECK: call <vscale x 16 x i8> asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"(<vscale x 16 x i8> %in)
SVINT_TEST(__SVInt8_t,s);
// CHECK: call <vscale x 16 x i8> asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"(<vscale x 16 x i8> %in)
SVINT_TEST(__SVInt8_t,d);
// CHECK: call <vscale x 16 x i8> asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"(<vscale x 16 x i8> %in)
SVINT_TEST(__SVInt16_t,b);
// CHECK: call <vscale x 8 x i16> asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"(<vscale x 8 x i16> %in)
SVINT_TEST(__SVInt16_t,h);
// CHECK: call <vscale x 8 x i16> asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"(<vscale x 8 x i16> %in)
SVINT_TEST(__SVInt16_t,s);
// CHECK: call <vscale x 8 x i16> asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"(<vscale x 8 x i16> %in)
SVINT_TEST(__SVInt16_t,d);
// CHECK: call <vscale x 8 x i16> asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"(<vscale x 8 x i16> %in)
SVINT_TEST(__SVInt32_t,b);
// CHECK: call <vscale x 4 x i32> asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"(<vscale x 4 x i32> %in)
SVINT_TEST(__SVInt32_t,h);
// CHECK: call <vscale x 4 x i32> asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"(<vscale x 4 x i32> %in)
SVINT_TEST(__SVInt32_t,s);
// CHECK: call <vscale x 4 x i32> asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"(<vscale x 4 x i32> %in)
SVINT_TEST(__SVInt32_t,d);
// CHECK: call <vscale x 4 x i32> asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"(<vscale x 4 x i32> %in)
SVINT_TEST(__SVInt64_t,b);
// CHECK: call <vscale x 2 x i64> asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"(<vscale x 2 x i64> %in)
SVINT_TEST(__SVInt64_t,h);
// CHECK: call <vscale x 2 x i64> asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"(<vscale x 2 x i64> %in)
SVINT_TEST(__SVInt64_t,s);
// CHECK: call <vscale x 2 x i64> asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"(<vscale x 2 x i64> %in)
SVINT_TEST(__SVInt64_t,d);
// CHECK: call <vscale x 2 x i64> asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"(<vscale x 2 x i64> %in)
//Test that floats can also be used as datatypes for integer instructions
//and check all the variants which would not be possible with a float
//instruction
SVINT_TEST(__SVFloat16_t,b);
// CHECK: call <vscale x 8 x half> asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"(<vscale x 8 x half> %in)
SVINT_TEST(__SVFloat16_t,h);
// CHECK: call <vscale x 8 x half> asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"(<vscale x 8 x half> %in)
SVINT_TEST(__SVFloat16_t,s);
// CHECK: call <vscale x 8 x half> asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"(<vscale x 8 x half> %in)
SVINT_TEST(__SVFloat16_t,d);
// CHECK: call <vscale x 8 x half> asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"(<vscale x 8 x half> %in)
SVINT_TEST(__SVFloat32_t,b);
// CHECK: call <vscale x 4 x float> asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"(<vscale x 4 x float> %in)
SVINT_TEST(__SVFloat32_t,h);
// CHECK: call <vscale x 4 x float> asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"(<vscale x 4 x float> %in)
SVINT_TEST(__SVFloat32_t,s);
// CHECK: call <vscale x 4 x float> asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"(<vscale x 4 x float> %in)
SVINT_TEST(__SVFloat32_t,d);
// CHECK: call <vscale x 4 x float> asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"(<vscale x 4 x float> %in)
SVINT_TEST(__SVFloat64_t,b);
// CHECK: call <vscale x 2 x double> asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"(<vscale x 2 x double> %in)
SVINT_TEST(__SVFloat64_t,h);
// CHECK: call <vscale x 2 x double> asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"(<vscale x 2 x double> %in)
SVINT_TEST(__SVFloat64_t,s);
// CHECK: call <vscale x 2 x double> asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"(<vscale x 2 x double> %in)
SVINT_TEST(__SVFloat64_t,d);
// CHECK: call <vscale x 2 x double> asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"(<vscale x 2 x double> %in)
#define SVBOOL_TEST(KIND)\
__SVBool_t func_bool_##KIND(__SVBool_t in1, __SVBool_t in2)\
{\
__SVBool_t out;\
asm volatile (\
"zip1 %[out]." #KIND ", %[in1]." #KIND ", %[in2]." #KIND "\n"\
: [out] "=Upa" (out)\
: [in1] "Upa" (in1),\
[in2] "Upa" (in2)\
:);\
return out;\
}
SVBOOL_TEST(b) ;
// CHECK: call <vscale x 16 x i1> asm sideeffect "zip1 $0.b, $1.b, $2.b\0A", "=@3Upa,@3Upa,@3Upa"(<vscale x 16 x i1> %in1, <vscale x 16 x i1> %in2)
SVBOOL_TEST(h) ;
// CHECK: call <vscale x 16 x i1> asm sideeffect "zip1 $0.h, $1.h, $2.h\0A", "=@3Upa,@3Upa,@3Upa"(<vscale x 16 x i1> %in1, <vscale x 16 x i1> %in2)
SVBOOL_TEST(s) ;
// CHECK: call <vscale x 16 x i1> asm sideeffect "zip1 $0.s, $1.s, $2.s\0A", "=@3Upa,@3Upa,@3Upa"(<vscale x 16 x i1> %in1, <vscale x 16 x i1> %in2)
SVBOOL_TEST(d) ;
// CHECK: call <vscale x 16 x i1> asm sideeffect "zip1 $0.d, $1.d, $2.d\0A", "=@3Upa,@3Upa,@3Upa"(<vscale x 16 x i1> %in1, <vscale x 16 x i1> %in2)
#define SVBOOL_TEST_UPL(DT, KIND)\
__SVBool_t func_bool_upl_##KIND(__SVBool_t in1, DT in2, DT in3)\
{\
__SVBool_t out;\
asm volatile (\
"fadd %[out]." #KIND ", %[in1]." #KIND ", %[in2]." #KIND ", %[in3]." #KIND "\n"\
: [out] "=w" (out)\
: [in1] "Upl" (in1),\
[in2] "w" (in2),\
[in3] "w" (in3)\
:);\
return out;\
}
SVBOOL_TEST_UPL(__SVInt8_t, b) ;
// CHECK: call <vscale x 16 x i1> asm sideeffect "fadd $0.b, $1.b, $2.b, $3.b\0A", "=w,@3Upl,w,w"(<vscale x 16 x i1> %in1, <vscale x 16 x i8> %in2, <vscale x 16 x i8> %in3)
SVBOOL_TEST_UPL(__SVInt16_t, h) ;
// CHECK: call <vscale x 16 x i1> asm sideeffect "fadd $0.h, $1.h, $2.h, $3.h\0A", "=w,@3Upl,w,w"(<vscale x 16 x i1> %in1, <vscale x 8 x i16> %in2, <vscale x 8 x i16> %in3)
SVBOOL_TEST_UPL(__SVInt32_t, s) ;
// CHECK: call <vscale x 16 x i1> asm sideeffect "fadd $0.s, $1.s, $2.s, $3.s\0A", "=w,@3Upl,w,w"(<vscale x 16 x i1> %in1, <vscale x 4 x i32> %in2, <vscale x 4 x i32> %in3)
SVBOOL_TEST_UPL(__SVInt64_t, d) ;
// CHECK: call <vscale x 16 x i1> asm sideeffect "fadd $0.d, $1.d, $2.d, $3.d\0A", "=w,@3Upl,w,w"(<vscale x 16 x i1> %in1, <vscale x 2 x i64> %in2, <vscale x 2 x i64> %in3)
#define SVFLOAT_TEST(DT,KIND)\
DT func_float_##DT##KIND(DT inout1, DT in2)\
{\
asm volatile (\
"ptrue p0." #KIND ", #1 \n"\
"fsub %[inout1]." #KIND ", p0/m, %[inout1]." #KIND ", %[in2]." #KIND "\n"\
: [inout1] "=w" (inout1)\
: "[inout1]" (inout1),\
[in2] "w" (in2)\
: "p0");\
return inout1 ;\
}\
SVFLOAT_TEST(__SVFloat16_t,s);
// CHECK: call <vscale x 8 x half> asm sideeffect "ptrue p0.s, #1 \0Afsub $0.s, p0/m, $0.s, $2.s\0A", "=w,0,w,~{p0}"(<vscale x 8 x half> %inout1, <vscale x 8 x half> %in2)
SVFLOAT_TEST(__SVFloat16_t,d);
// CHECK: call <vscale x 8 x half> asm sideeffect "ptrue p0.d, #1 \0Afsub $0.d, p0/m, $0.d, $2.d\0A", "=w,0,w,~{p0}"(<vscale x 8 x half> %inout1, <vscale x 8 x half> %in2)
SVFLOAT_TEST(__SVFloat32_t,s);
// CHECK: call <vscale x 4 x float> asm sideeffect "ptrue p0.s, #1 \0Afsub $0.s, p0/m, $0.s, $2.s\0A", "=w,0,w,~{p0}"(<vscale x 4 x float> %inout1, <vscale x 4 x float> %in2)
SVFLOAT_TEST(__SVFloat32_t,d);
// CHECK: call <vscale x 4 x float> asm sideeffect "ptrue p0.d, #1 \0Afsub $0.d, p0/m, $0.d, $2.d\0A", "=w,0,w,~{p0}"(<vscale x 4 x float> %inout1, <vscale x 4 x float> %in2)
SVFLOAT_TEST(__SVFloat64_t,s);
// CHECK: call <vscale x 2 x double> asm sideeffect "ptrue p0.s, #1 \0Afsub $0.s, p0/m, $0.s, $2.s\0A", "=w,0,w,~{p0}"(<vscale x 2 x double> %inout1, <vscale x 2 x double> %in2)
SVFLOAT_TEST(__SVFloat64_t,d);
// CHECK: call <vscale x 2 x double> asm sideeffect "ptrue p0.d, #1 \0Afsub $0.d, p0/m, $0.d, $2.d\0A", "=w,0,w,~{p0}"(<vscale x 2 x double> %inout1, <vscale x 2 x double> %in2)
#define SVFLOAT_TEST_Y(DT, KIND)\
__SVBool_t func_float_y_##KIND(DT in1, DT in2)\
{\
__SVBool_t out;\
asm volatile (\
"fmul %[out]." #KIND ", %[in1]." #KIND ", %[in2]." #KIND "\n"\
: [out] "=w" (out)\
: [in1] "w" (in1),\
[in2] "y" (in2)\
:);\
return out;\
}
SVFLOAT_TEST_Y(__SVFloat16_t,h);
// CHECK: call <vscale x 16 x i1> asm sideeffect "fmul $0.h, $1.h, $2.h\0A", "=w,w,y"(<vscale x 8 x half> %in1, <vscale x 8 x half> %in2)
SVFLOAT_TEST_Y(__SVFloat32_t,s);
// CHECK: call <vscale x 16 x i1> asm sideeffect "fmul $0.s, $1.s, $2.s\0A", "=w,w,y"(<vscale x 4 x float> %in1, <vscale x 4 x float> %in2)
SVFLOAT_TEST_Y(__SVFloat64_t,d);
// CHECK: call <vscale x 16 x i1> asm sideeffect "fmul $0.d, $1.d, $2.d\0A", "=w,w,y"(<vscale x 2 x double> %in1, <vscale x 2 x double> %in2)
// Another test for floats to include h suffix
#define SVFLOAT_CVT_TEST(DT1,KIND1,DT2,KIND2)\
DT1 func_float_cvt_##DT1##KIND1##DT2##KIND2(DT2 in1)\
{\
DT1 out1 ;\
asm volatile (\
"ptrue p0." #KIND2 ", #1 \n"\
"fcvt %[out1]." #KIND1 ", p0/m, %[in1]." #KIND2 "\n"\
: [out1] "=w" (out1)\
: [in1] "w" (in1)\
: "p0");\
return out1 ;\
}\
SVFLOAT_CVT_TEST(__SVFloat64_t,d,__SVFloat32_t,s);
// CHECK: call <vscale x 2 x double> asm sideeffect "ptrue p0.s, #1 \0Afcvt $0.d, p0/m, $1.s\0A", "=w,w,~{p0}"(<vscale x 4 x float> %in1)
SVFLOAT_CVT_TEST(__SVFloat64_t,d,__SVFloat16_t,h);
// CHECK: call <vscale x 2 x double> asm sideeffect "ptrue p0.h, #1 \0Afcvt $0.d, p0/m, $1.h\0A", "=w,w,~{p0}"(<vscale x 8 x half> %in1)
SVFLOAT_CVT_TEST(__SVFloat32_t,s,__SVFloat16_t,h);
// CHECK: call <vscale x 4 x float> asm sideeffect "ptrue p0.h, #1 \0Afcvt $0.s, p0/m, $1.h\0A", "=w,w,~{p0}"(<vscale x 8 x half> %in1)
SVFLOAT_CVT_TEST(__SVFloat32_t,s,__SVFloat64_t,d);
// CHECK: call <vscale x 4 x float> asm sideeffect "ptrue p0.d, #1 \0Afcvt $0.s, p0/m, $1.d\0A", "=w,w,~{p0}"(<vscale x 2 x double> %in1)
SVFLOAT_CVT_TEST(__SVFloat16_t,h,__SVFloat64_t,d);
// CHECK: call <vscale x 8 x half> asm sideeffect "ptrue p0.d, #1 \0Afcvt $0.h, p0/m, $1.d\0A", "=w,w,~{p0}"(<vscale x 2 x double> %in1)
SVFLOAT_CVT_TEST(__SVFloat16_t,h,__SVFloat32_t,s);
// CHECK: call <vscale x 8 x half> asm sideeffect "ptrue p0.s, #1 \0Afcvt $0.h, p0/m, $1.s\0A", "=w,w,~{p0}"(<vscale x 4 x float> %in1)
//Test a mix of float and ints
SVFLOAT_CVT_TEST(__SVInt16_t,h,__SVFloat32_t,s);
// CHECK: call <vscale x 8 x i16> asm sideeffect "ptrue p0.s, #1 \0Afcvt $0.h, p0/m, $1.s\0A", "=w,w,~{p0}"(<vscale x 4 x float> %in1)
SVFLOAT_CVT_TEST(__SVFloat16_t,s,__SVUint32_t,d);
// CHECK: call <vscale x 8 x half> asm sideeffect "ptrue p0.d, #1 \0Afcvt $0.s, p0/m, $1.d\0A", "=w,w,~{p0}"(<vscale x 4 x i32> %in1)

View File

@ -0,0 +1,21 @@
// REQUIRES: aarch64-registered-target
// RUN: not %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns \
// RUN: -target-feature +neon -S -O1 -o - %s | FileCheck %s
// Assembler error
// Output constraint : Set a vector constraint on an integer
__SVFloat32_t funcB2()
{
__SVFloat32_t ret ;
asm volatile (
"fmov %[ret], wzr \n"
: [ret] "=w" (ret)
:
:);
return ret ;
}
// CHECK: funcB2
// CHECK-ERROR: error: invalid operand for instruction