[clang][AArch64][SVE] Implement conditional operator for SVE vectors

This patch adds support for the conditional (ternary) operator on SVE
scalable vector types in C++, matching the behaviour for NEON vector
types. Like the conditional operator for NEON types, this is disabled in
C mode.

Differential Revision: https://reviews.llvm.org/D124091
This commit is contained in:
David Truby 2022-04-20 13:56:19 +00:00
parent 8b42e6d057
commit 8bc29d1427
5 changed files with 355 additions and 2 deletions

View File

@ -11974,6 +11974,10 @@ public:
QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType CheckSizelessVectorConditionalTypes(ExprResult &Cond,
ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,

View File

@ -4642,7 +4642,8 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
return tmp5;
}
if (condExpr->getType()->isVectorType()) {
if (condExpr->getType()->isVectorType() ||
condExpr->getType()->isVLSTBuiltinType()) {
CGF.incrementProfileCounter(E);
llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);

View File

@ -23,6 +23,7 @@
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/AlignedAllocation.h"
#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TypeTraits.h"
@ -40,6 +41,7 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TypeSize.h"
using namespace clang;
using namespace sema;
@ -6108,6 +6110,16 @@ static bool isValidVectorForConditionalCondition(ASTContext &Ctx,
return EltTy->isIntegralType(Ctx);
}
static bool isValidSizelessVectorForConditionalCondition(ASTContext &Ctx,
QualType CondTy) {
if (!CondTy->isVLSTBuiltinType())
return false;
const QualType EltTy =
cast<BuiltinType>(CondTy.getCanonicalType())->getSveEltType(Ctx);
assert(!EltTy->isEnumeralType() && "Vectors cant be enum types");
return EltTy->isIntegralType(Ctx);
}
QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc) {
@ -6199,6 +6211,89 @@ QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
return ResultType;
}
QualType Sema::CheckSizelessVectorConditionalTypes(ExprResult &Cond,
ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc) {
LHS = DefaultFunctionArrayLvalueConversion(LHS.get());
RHS = DefaultFunctionArrayLvalueConversion(RHS.get());
QualType CondType = Cond.get()->getType();
const auto *CondBT = CondType->castAs<BuiltinType>();
QualType CondElementTy = CondBT->getSveEltType(Context);
llvm::ElementCount CondElementCount =
Context.getBuiltinVectorTypeInfo(CondBT).EC;
QualType LHSType = LHS.get()->getType();
const auto *LHSBT =
LHSType->isVLSTBuiltinType() ? LHSType->getAs<BuiltinType>() : nullptr;
QualType RHSType = RHS.get()->getType();
const auto *RHSBT =
RHSType->isVLSTBuiltinType() ? RHSType->getAs<BuiltinType>() : nullptr;
QualType ResultType;
if (LHSBT && RHSBT) {
// If both are sizeless vector types, they must be the same type.
if (!Context.hasSameType(LHSType, RHSType)) {
Diag(QuestionLoc, diag::err_conditional_vector_mismatched)
<< LHSType << RHSType;
return QualType();
}
ResultType = LHSType;
} else if (LHSBT || RHSBT) {
ResultType = CheckSizelessVectorOperands(
LHS, RHS, QuestionLoc, /*IsCompAssign*/ false, ACK_Conditional);
if (ResultType.isNull())
return QualType();
} else {
// Both are scalar so splat
QualType ResultElementTy;
LHSType = LHSType.getCanonicalType().getUnqualifiedType();
RHSType = RHSType.getCanonicalType().getUnqualifiedType();
if (Context.hasSameType(LHSType, RHSType))
ResultElementTy = LHSType;
else
ResultElementTy =
UsualArithmeticConversions(LHS, RHS, QuestionLoc, ACK_Conditional);
if (ResultElementTy->isEnumeralType()) {
Diag(QuestionLoc, diag::err_conditional_vector_operand_type)
<< ResultElementTy;
return QualType();
}
ResultType = Context.getScalableVectorType(
ResultElementTy, CondElementCount.getKnownMinValue());
LHS = ImpCastExprToType(LHS.get(), ResultType, CK_VectorSplat);
RHS = ImpCastExprToType(RHS.get(), ResultType, CK_VectorSplat);
}
assert(!ResultType.isNull() && ResultType->isVLSTBuiltinType() &&
"Result should have been a vector type");
auto *ResultBuiltinTy = ResultType->castAs<BuiltinType>();
QualType ResultElementTy = ResultBuiltinTy->getSveEltType(Context);
llvm::ElementCount ResultElementCount =
Context.getBuiltinVectorTypeInfo(ResultBuiltinTy).EC;
if (ResultElementCount != CondElementCount) {
Diag(QuestionLoc, diag::err_conditional_vector_size)
<< CondType << ResultType;
return QualType();
}
if (Context.getTypeSize(ResultElementTy) !=
Context.getTypeSize(CondElementTy)) {
Diag(QuestionLoc, diag::err_conditional_vector_element_size)
<< CondType << ResultType;
return QualType();
}
return ResultType;
}
/// Check the operands of ?: under C++ semantics.
///
/// See C++ [expr.cond]. Note that LHS is never null, even for the GNU x ?: y
@ -6232,10 +6327,14 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
bool IsVectorConditional =
isValidVectorForConditionalCondition(Context, Cond.get()->getType());
bool IsSizelessVectorConditional =
isValidSizelessVectorForConditionalCondition(Context,
Cond.get()->getType());
// C++11 [expr.cond]p1
// The first expression is contextually converted to bool.
if (!Cond.get()->isTypeDependent()) {
ExprResult CondRes = IsVectorConditional
ExprResult CondRes = IsVectorConditional || IsSizelessVectorConditional
? DefaultFunctionArrayLvalueConversion(Cond.get())
: CheckCXXBooleanCondition(Cond.get());
if (CondRes.isInvalid())
@ -6304,6 +6403,9 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
if (IsVectorConditional)
return CheckVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc);
if (IsSizelessVectorConditional)
return CheckSizelessVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc);
// C++11 [expr.cond]p3
// Otherwise, if the second and third operand have different types, and
// either has (cv) class type [...] an attempt is made to convert each of

View File

@ -0,0 +1,224 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve \
// RUN: -fallow-half-arguments-and-returns -disable-O0-optnone \
// RUN: -emit-llvm -o - %s | opt -S -sroa | FileCheck %s
// REQUIRES: aarch64-registered-target
#include <arm_sve.h>
// CHECK-LABEL: @_Z9cond_boolu10__SVBool_tu10__SVBool_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 16 x i1> [[CMP]], zeroinitializer
// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 16 x i1> [[VECTOR_COND]], <vscale x 16 x i1> [[A]], <vscale x 16 x i1> [[B]]
// CHECK-NEXT: ret <vscale x 16 x i1> [[VECTOR_SELECT]]
//
svbool_t cond_bool(svbool_t a, svbool_t b) {
return a < b ? a : b;
}
// CHECK-LABEL: @_Z7cond_i8u10__SVInt8_tu10__SVInt8_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 16 x i8> [[CONV]], zeroinitializer
// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 16 x i1> [[VECTOR_COND]], <vscale x 16 x i8> [[A]], <vscale x 16 x i8> [[B]]
// CHECK-NEXT: ret <vscale x 16 x i8> [[VECTOR_SELECT]]
//
svint8_t cond_i8(svint8_t a, svint8_t b) {
return a < b ? a : b;
}
// CHECK-LABEL: @_Z7cond_u8u11__SVUint8_tu11__SVUint8_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 16 x i8> [[CONV]], zeroinitializer
// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 16 x i1> [[VECTOR_COND]], <vscale x 16 x i8> [[A]], <vscale x 16 x i8> [[B]]
// CHECK-NEXT: ret <vscale x 16 x i8> [[VECTOR_SELECT]]
//
svuint8_t cond_u8(svuint8_t a, svuint8_t b) {
return a < b ? a : b;
}
// CHECK-LABEL: @_Z8cond_i16u11__SVInt16_tu11__SVInt16_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 8 x i16> [[CONV]], zeroinitializer
// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 8 x i1> [[VECTOR_COND]], <vscale x 8 x i16> [[A]], <vscale x 8 x i16> [[B]]
// CHECK-NEXT: ret <vscale x 8 x i16> [[VECTOR_SELECT]]
//
svint16_t cond_i16(svint16_t a, svint16_t b) {
return a < b ? a : b;
}
// CHECK-LABEL: @_Z8cond_u16u12__SVUint16_tu12__SVUint16_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 8 x i16> [[CONV]], zeroinitializer
// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 8 x i1> [[VECTOR_COND]], <vscale x 8 x i16> [[A]], <vscale x 8 x i16> [[B]]
// CHECK-NEXT: ret <vscale x 8 x i16> [[VECTOR_SELECT]]
//
svuint16_t cond_u16(svuint16_t a, svuint16_t b) {
return a < b ? a : b;
}
// CHECK-LABEL: @_Z8cond_i32u11__SVInt32_tu11__SVInt32_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 4 x i32> [[CONV]], zeroinitializer
// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 4 x i1> [[VECTOR_COND]], <vscale x 4 x i32> [[A]], <vscale x 4 x i32> [[B]]
// CHECK-NEXT: ret <vscale x 4 x i32> [[VECTOR_SELECT]]
//
svint32_t cond_i32(svint32_t a, svint32_t b) {
return a < b ? a : b;
}
// CHECK-LABEL: @_Z8cond_u32u12__SVUint32_tu12__SVUint32_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 4 x i32> [[CONV]], zeroinitializer
// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 4 x i1> [[VECTOR_COND]], <vscale x 4 x i32> [[A]], <vscale x 4 x i32> [[B]]
// CHECK-NEXT: ret <vscale x 4 x i32> [[VECTOR_SELECT]]
//
svuint32_t cond_u32(svuint32_t a, svuint32_t b) {
return a < b ? a : b;
}
// CHECK-LABEL: @_Z8cond_i64u11__SVInt64_tu11__SVInt64_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 2 x i64> [[CONV]], zeroinitializer
// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 2 x i1> [[VECTOR_COND]], <vscale x 2 x i64> [[A]], <vscale x 2 x i64> [[B]]
// CHECK-NEXT: ret <vscale x 2 x i64> [[VECTOR_SELECT]]
//
svint64_t cond_i64(svint64_t a, svint64_t b) {
return a < b ? a : b;
}
// CHECK-LABEL: @_Z8cond_u64u12__SVUint64_tu12__SVUint64_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 2 x i64> [[CONV]], zeroinitializer
// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 2 x i1> [[VECTOR_COND]], <vscale x 2 x i64> [[A]], <vscale x 2 x i64> [[B]]
// CHECK-NEXT: ret <vscale x 2 x i64> [[VECTOR_SELECT]]
//
svuint64_t cond_u64(svuint64_t a, svuint64_t b) {
return a < b ? a : b;
}
// CHECK-LABEL: @_Z8cond_f16u13__SVFloat16_tu13__SVFloat16_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 8 x i16> [[CONV]], zeroinitializer
// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 8 x i1> [[VECTOR_COND]], <vscale x 8 x half> [[A]], <vscale x 8 x half> [[B]]
// CHECK-NEXT: ret <vscale x 8 x half> [[VECTOR_SELECT]]
//
svfloat16_t cond_f16(svfloat16_t a, svfloat16_t b) {
return a < b ? a : b;
}
// CHECK-LABEL: @_Z8cond_f32u13__SVFloat32_tu13__SVFloat32_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 4 x i32> [[CONV]], zeroinitializer
// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 4 x i1> [[VECTOR_COND]], <vscale x 4 x float> [[A]], <vscale x 4 x float> [[B]]
// CHECK-NEXT: ret <vscale x 4 x float> [[VECTOR_SELECT]]
//
svfloat32_t cond_f32(svfloat32_t a, svfloat32_t b) {
return a < b ? a : b;
}
// CHECK-LABEL: @_Z8cond_f64u13__SVFloat64_tu13__SVFloat64_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 2 x i64> [[CONV]], zeroinitializer
// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 2 x i1> [[VECTOR_COND]], <vscale x 2 x double> [[A]], <vscale x 2 x double> [[B]]
// CHECK-NEXT: ret <vscale x 2 x double> [[VECTOR_SELECT]]
//
svfloat64_t cond_f64(svfloat64_t a, svfloat64_t b) {
return a < b ? a : b;
}
// CHECK-LABEL: @_Z14cond_i32_splatu11__SVInt32_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 4 x i32> [[A:%.*]], zeroinitializer
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 4 x i32> [[CONV]], zeroinitializer
// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 4 x i1> [[VECTOR_COND]], <vscale x 4 x i32> [[A]], <vscale x 4 x i32> zeroinitializer
// CHECK-NEXT: ret <vscale x 4 x i32> [[VECTOR_SELECT]]
//
svint32_t cond_i32_splat(svint32_t a) {
return a < 0 ? a : 0;
}
// CHECK-LABEL: @_Z14cond_u32_splatu12__SVUint32_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 4 x i32> [[A:%.*]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 4 x i32> [[CONV]], zeroinitializer
// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 4 x i1> [[VECTOR_COND]], <vscale x 4 x i32> [[A]], <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
// CHECK-NEXT: ret <vscale x 4 x i32> [[VECTOR_SELECT]]
//
svuint32_t cond_u32_splat(svuint32_t a) {
return a < 1u ? a : 1u;
}
// CHECK-LABEL: @_Z14cond_i64_splatu11__SVInt64_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 2 x i64> [[A:%.*]], zeroinitializer
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 2 x i64> [[CONV]], zeroinitializer
// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 2 x i1> [[VECTOR_COND]], <vscale x 2 x i64> [[A]], <vscale x 2 x i64> zeroinitializer
// CHECK-NEXT: ret <vscale x 2 x i64> [[VECTOR_SELECT]]
//
svint64_t cond_i64_splat(svint64_t a) {
return a < 0l ? a : 0l;
}
// CHECK-LABEL: @_Z14cond_u64_splatu12__SVUint64_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 2 x i64> [[A:%.*]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 2 x i64> [[CONV]], zeroinitializer
// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 2 x i1> [[VECTOR_COND]], <vscale x 2 x i64> [[A]], <vscale x 2 x i64> shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT: ret <vscale x 2 x i64> [[VECTOR_SELECT]]
//
svuint64_t cond_u64_splat(svuint64_t a) {
return a < 1ul ? a : 1ul;
}
// CHECK-LABEL: @_Z14cond_f32_splatu13__SVFloat32_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 4 x float> [[A:%.*]], zeroinitializer
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 4 x i32> [[CONV]], zeroinitializer
// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 4 x i1> [[VECTOR_COND]], <vscale x 4 x float> [[A]], <vscale x 4 x float> zeroinitializer
// CHECK-NEXT: ret <vscale x 4 x float> [[VECTOR_SELECT]]
//
svfloat32_t cond_f32_splat(svfloat32_t a) {
return a < 0.f ? a : 0.f;
}
// CHECK-LABEL: @_Z14cond_f64_splatu13__SVFloat64_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 2 x double> [[A:%.*]], zeroinitializer
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 2 x i64> [[CONV]], zeroinitializer
// CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 2 x i1> [[VECTOR_COND]], <vscale x 2 x double> [[A]], <vscale x 2 x double> zeroinitializer
// CHECK-NEXT: ret <vscale x 2 x double> [[VECTOR_SELECT]]
//
svfloat64_t cond_f64_splat(svfloat64_t a) {
return a < 0. ? a : 0.;
}

View File

@ -0,0 +1,22 @@
// RUN: %clang_cc1 -verify -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only %s
// REQUIRES: aarch64-registered-target
#include <arm_sve.h>
void cond(svint8_t i8, svint16_t i16, svint32_t i32, svint64_t i64,
svuint8_t u8, svuint16_t u16, svuint32_t u32, svuint64_t u64,
svfloat16_t f16, svfloat32_t f32, svfloat64_t f64,
svbool_t b) {
(void) i8 < i8 ? i16 : i16; // expected-error{{invalid operands to binary expression}}
(void) i8 < i8 ? i32 : i32; // expected-error{{invalid operands to binary expression}}
(void) i8 < i8 ? i64 : i64; // expected-error{{invalid operands to binary expression}}
(void) i16 < i16 ? i16 : i8; // expected-error{{invalid operands to binary expression}}
(void) i16 < i16 ? i16 : i32; // expected-error{{invalid operands to binary expression}}
(void) i16 < i16 ? i16 : i64; // expected-error{{invalid operands to binary expression}}
(void) i16 < i16 ? i8 : i16; // expected-error{{invalid operands to binary expression}}
(void) i16 < i16 ? i32 : i16; // expected-error{{invalid operands to binary expression}}
(void) i16 < i16 ? i64 : i16; // expected-error{{invalid operands to binary expression}}
}