ARM64: initial clang support commit.

This adds Clang support for the ARM64 backend. There are definitely
still some rough edges, so please bring up any issues you see with
this patch.

As with the LLVM commit though, we think it'll be more useful for
merging with AArch64 from within the tree.

llvm-svn: 205100
This commit is contained in:
Tim Northover 2014-03-29 15:09:45 +00:00
parent af3698066a
commit a2ee433c8d
102 changed files with 9431 additions and 664 deletions

View File

@ -1546,7 +1546,7 @@ instructions for implementing atomic operations.
void __builtin_arm_clrex(void);
The types ``T`` currently supported are:
* Integer types with width at most 64 bits.
* Integer types with width at most 64 bits (or 128 bits on ARM64).
* Floating-point types
* Pointer types.

View File

@ -0,0 +1,34 @@
//===--- BuiltinsARM64.def - ARM64 Builtin function database ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the ARM64-specific builtin function database. Users of
// this file must define the BUILTIN macro to make use of this information.
//
//===----------------------------------------------------------------------===//
// The format of this database matches clang/Basic/Builtins.def.
// In libgcc
BUILTIN(__clear_cache, "vv*v*", "")
BUILTIN(__builtin_arm_ldrex, "v.", "t")
BUILTIN(__builtin_arm_strex, "i.", "t")
BUILTIN(__builtin_arm_clrex, "v", "")
// CRC32
BUILTIN(__builtin_arm_crc32b, "UiUiUc", "nc")
BUILTIN(__builtin_arm_crc32cb, "UiUiUc", "nc")
BUILTIN(__builtin_arm_crc32h, "UiUiUs", "nc")
BUILTIN(__builtin_arm_crc32ch, "UiUiUs", "nc")
BUILTIN(__builtin_arm_crc32w, "UiUiUi", "nc")
BUILTIN(__builtin_arm_crc32cw, "UiUiUi", "nc")
BUILTIN(__builtin_arm_crc32d, "UiUiLUi", "nc")
BUILTIN(__builtin_arm_crc32cd, "UiUiLUi", "nc")
#undef BUILTIN

View File

@ -0,0 +1,21 @@
//===--- BuiltinsNEON.def - NEON Builtin function database ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the NEON-specific builtin function database. Users of
// this file must define the BUILTIN macro to make use of this information.
//
//===----------------------------------------------------------------------===//
// The format of this database matches clang/Basic/Builtins.def.
#define GET_NEON_BUILTINS
#include "clang/Basic/arm_neon.inc"
#undef GET_NEON_BUILTINS
#undef BUILTIN

View File

@ -30,5 +30,6 @@ clang_tablegen(AttrList.inc -gen-clang-attr-list
# ARM NEON
clang_tablegen(arm_neon.inc -gen-arm-neon-sema
-I ${CMAKE_CURRENT_SOURCE_DIR}/../../
SOURCE arm_neon.td
TARGET ClangARMNeon)

View File

@ -50,7 +50,8 @@ $(ObjDir)/AttrList.inc.tmp : Attr.td $(CLANG_TBLGEN) $(ObjDir)/.dir
$(ObjDir)/arm_neon.inc.tmp : arm_neon.td $(CLANG_TBLGEN) $(ObjDir)/.dir
$(Echo) "Building Clang arm_neon.inc with tblgen"
$(Verb) $(ClangTableGen) -gen-arm-neon-sema -o $(call SYSPATH, $@) $<
$(Verb) $(ClangTableGen) -gen-arm-neon-sema -o $(call SYSPATH, $@) \
-I $(PROJ_SRC_DIR)/../.. $<
$(ObjDir)/Version.inc.tmp : Version.inc.in Makefile $(LLVM_OBJ_ROOT)/Makefile.config $(ObjDir)/.dir
$(Echo) "Updating Clang version info."

View File

@ -22,14 +22,12 @@
namespace clang {
namespace NEON {
enum {
LastTIBuiltin = clang::Builtin::FirstTSBuiltin-1,
enum {
LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1,
#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
#define GET_NEON_BUILTINS
#include "clang/Basic/arm_neon.inc"
#undef GET_NEON_BUILTINS
FirstTSBuiltin
};
#include "clang/Basic/BuiltinsNEON.def"
FirstTSBuiltin
};
}
/// \brief AArch64 builtins
@ -53,6 +51,17 @@ namespace clang {
};
}
/// \brief ARM64 builtins
namespace ARM64 {
enum {
LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1,
LastNEONBuiltin = NEON::FirstTSBuiltin - 1,
#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
#include "clang/Basic/BuiltinsARM64.def"
LastTSBuiltin
};
}
/// \brief PPC builtins
namespace PPC {
enum {

View File

@ -63,6 +63,14 @@ public:
/// - constructor/destructor signatures.
iOS,
/// The iOS 64-bit ABI is follows ARM's published 64-bit ABI more
/// closely, but we don't guarantee to follow it perfectly.
///
/// It is documented here:
/// http://infocenter.arm.com
/// /help/topic/com.arm.doc.ihi0059a/IHI0059A_cppabi64.pdf
iOS64,
/// The generic AArch64 ABI is also a modified version of the Itanium ABI,
/// but it has fewer divergences than the 32-bit ARM ABI.
///
@ -105,6 +113,7 @@ public:
case GenericItanium:
case GenericARM:
case iOS:
case iOS64:
return true;
case Microsoft:
@ -120,6 +129,7 @@ public:
case GenericItanium:
case GenericARM:
case iOS:
case iOS64:
return false;
case Microsoft:
@ -195,6 +205,7 @@ public:
bool canKeyFunctionBeInline() const {
switch (getKind()) {
case GenericARM:
case iOS64:
return false;
case GenericAArch64:
@ -248,6 +259,11 @@ public:
case iOS:
return UseTailPaddingUnlessPOD03;
// iOS on ARM64 uses the C++11 POD rules. It does not honor the
// Itanium exception about classes with over-large bitfields.
case iOS64:
return UseTailPaddingUnlessPOD11;
// MSVC always allocates fields in the tail-padding of a base class
// subobject, even if they're POD.
case Microsoft:

View File

@ -7941,9 +7941,12 @@ private:
ExprResult CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARM64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);

View File

@ -678,6 +678,7 @@ CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
switch (T.getCXXABI().getKind()) {
case TargetCXXABI::GenericARM:
case TargetCXXABI::iOS:
case TargetCXXABI::iOS64:
return CreateARMCXXABI(*this);
case TargetCXXABI::GenericAArch64: // Same as Itanium at this level
case TargetCXXABI::GenericItanium:
@ -7907,6 +7908,7 @@ MangleContext *ASTContext::createMangleContext() {
case TargetCXXABI::GenericItanium:
case TargetCXXABI::GenericARM:
case TargetCXXABI::iOS:
case TargetCXXABI::iOS64:
return ItaniumMangleContext::create(*this, getDiagnostics());
case TargetCXXABI::Microsoft:
return MicrosoftMangleContext::create(*this, getDiagnostics());

View File

@ -2153,8 +2153,17 @@ void CXXNameMangler::mangleNeonVectorType(const VectorType *T) {
const char *EltName = 0;
if (T->getVectorKind() == VectorType::NeonPolyVector) {
switch (cast<BuiltinType>(EltType)->getKind()) {
case BuiltinType::SChar: EltName = "poly8_t"; break;
case BuiltinType::Short: EltName = "poly16_t"; break;
case BuiltinType::SChar:
case BuiltinType::UChar:
EltName = "poly8_t";
break;
case BuiltinType::Short:
case BuiltinType::UShort:
EltName = "poly16_t";
break;
case BuiltinType::ULongLong:
EltName = "poly64_t";
break;
default: llvm_unreachable("unexpected Neon polynomial vector element type");
}
} else {
@ -2167,6 +2176,7 @@ void CXXNameMangler::mangleNeonVectorType(const VectorType *T) {
case BuiltinType::UInt: EltName = "uint32_t"; break;
case BuiltinType::LongLong: EltName = "int64_t"; break;
case BuiltinType::ULongLong: EltName = "uint64_t"; break;
case BuiltinType::Double: EltName = "float64_t"; break;
case BuiltinType::Float: EltName = "float32_t"; break;
case BuiltinType::Half: EltName = "float16_t";break;
default:
@ -2195,6 +2205,7 @@ static StringRef mangleAArch64VectorBase(const BuiltinType *EltType) {
case BuiltinType::Int:
return "Int32";
case BuiltinType::Long:
case BuiltinType::LongLong:
return "Int64";
case BuiltinType::UChar:
return "Uint8";
@ -2203,6 +2214,7 @@ static StringRef mangleAArch64VectorBase(const BuiltinType *EltType) {
case BuiltinType::UInt:
return "Uint32";
case BuiltinType::ULong:
case BuiltinType::ULongLong:
return "Uint64";
case BuiltinType::Half:
return "Float16";
@ -2262,10 +2274,12 @@ void CXXNameMangler::mangleAArch64NeonVectorType(const VectorType *T) {
void CXXNameMangler::mangleType(const VectorType *T) {
if ((T->getVectorKind() == VectorType::NeonVector ||
T->getVectorKind() == VectorType::NeonPolyVector)) {
llvm::Triple Target = getASTContext().getTargetInfo().getTriple();
llvm::Triple::ArchType Arch =
getASTContext().getTargetInfo().getTriple().getArch();
if ((Arch == llvm::Triple::aarch64) ||
(Arch == llvm::Triple::aarch64_be))
if (Arch == llvm::Triple::aarch64 ||
Arch == llvm::Triple::aarch64_be ||
(Arch == llvm::Triple::arm64 && !Target.isOSDarwin()))
mangleAArch64NeonVectorType(T);
else
mangleNeonVectorType(T);

View File

@ -3340,6 +3340,10 @@ public:
: DarwinTargetInfo<X86_64TargetInfo>(Triple) {
Int64Type = SignedLongLong;
MaxVectorAlign = 256;
// The 64-bit iOS simulator uses the builtin bool type for Objective-C.
llvm::Triple T = llvm::Triple(Triple);
if (T.getOS() == llvm::Triple::IOS)
UseSignedCharForObjCBool = false;
DescriptionString = "e-m:o-i64:64-f80:128-n8:16:32:64-S128";
}
};
@ -3602,9 +3606,7 @@ const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
ALL_LANGUAGES },
#define GET_NEON_BUILTINS
#include "clang/Basic/arm_neon.inc"
#undef GET_NEON_BUILTINS
#include "clang/Basic/BuiltinsNEON.def"
#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
@ -3924,6 +3926,11 @@ public:
Features["neon"] = true;
Features["hwdiv"] = true;
Features["hwdiv-arm"] = true;
} else if (CPU == "cyclone") {
Features["v8fp"] = true;
Features["neon"] = true;
Features["hwdiv"] = true;
Features["hwdiv-arm"] = true;
} else if (CPU == "cortex-a53" || CPU == "cortex-a57") {
Features["fp-armv8"] = true;
Features["neon"] = true;
@ -4029,6 +4036,7 @@ public:
.Cases("cortex-a9", "cortex-a12", "cortex-a15", "krait", "7A")
.Cases("cortex-r4", "cortex-r5", "7R")
.Case("swift", "7S")
.Case("cyclone", "8A")
.Cases("cortex-m3", "cortex-m4", "7M")
.Case("cortex-m0", "6M")
.Cases("cortex-a53", "cortex-a57", "8A")
@ -4320,9 +4328,7 @@ const Builtin::Info ARMTargetInfo::BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
ALL_LANGUAGES },
#define GET_NEON_BUILTINS
#include "clang/Basic/arm_neon.inc"
#undef GET_NEON_BUILTINS
#include "clang/Basic/BuiltinsNEON.def"
#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
@ -4378,6 +4384,294 @@ public:
} // end anonymous namespace.
namespace {
class ARM64TargetInfo : public TargetInfo {
static const TargetInfo::GCCRegAlias GCCRegAliases[];
static const char *const GCCRegNames[];
static const Builtin::Info BuiltinInfo[];
std::string ABI;
public:
ARM64TargetInfo(const llvm::Triple &Triple)
: TargetInfo(Triple), ABI("aapcs") {
BigEndian = false;
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
IntMaxType = SignedLong;
UIntMaxType = UnsignedLong;
Int64Type = SignedLong;
WCharType = UnsignedInt;
MaxVectorAlign = 128;
RegParmMax = 8;
MaxAtomicInlineWidth = 128;
MaxAtomicPromoteWidth = 128;
LongDoubleWidth = LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::IEEEquad;
if (Triple.isOSBinFormatMachO())
DescriptionString = "e-m:o-i64:64-i128:128-n32:64-S128";
else
DescriptionString = "e-m:e-i64:64-i128:128-n32:64-S128";
// {} in inline assembly are neon specifiers, not assembly variant
// specifiers.
NoAsmVariants = true;
// ARM64 targets default to using the ARM C++ ABI.
TheCXXABI.set(TargetCXXABI::GenericAArch64);
}
virtual const char *getABI() const { return ABI.c_str(); }
virtual bool setABI(const std::string &Name) {
if (Name != "aapcs" && Name != "darwinpcs")
return false;
ABI = Name;
return true;
}
virtual bool setCPU(const std::string &Name) {
bool CPUKnown = llvm::StringSwitch<bool>(Name)
.Case("arm64-generic", true)
.Case("cyclone", true)
.Default(false);
return CPUKnown;
}
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Target identification.
Builder.defineMacro("__arm64");
Builder.defineMacro("__arm64__");
Builder.defineMacro("__aarch64__");
Builder.defineMacro("__ARM64_ARCH_8__");
Builder.defineMacro("__AARCH64_SIMD__");
Builder.defineMacro("__ARM_NEON__");
// Target properties.
Builder.defineMacro("_LP64");
Builder.defineMacro("__LP64__");
Builder.defineMacro("__LITTLE_ENDIAN__");
// Subtarget options.
Builder.defineMacro("__REGISTER_PREFIX__", "");
Builder.defineMacro("__aarch64__");
Builder.defineMacro("__AARCH64EL__");
// ACLE predefines. Many can only have one possible value on v8 AArch64.
Builder.defineMacro("__ARM_ACLE", "200");
Builder.defineMacro("__ARM_ARCH", "8");
Builder.defineMacro("__ARM_ARCH_PROFILE", "'A'");
Builder.defineMacro("__ARM_64BIT_STATE");
Builder.defineMacro("__ARM_PCS_AAPCS64");
Builder.defineMacro("__ARM_ARCH_ISA_A64");
Builder.defineMacro("__ARM_FEATURE_UNALIGNED");
Builder.defineMacro("__ARM_FEATURE_CLZ");
Builder.defineMacro("__ARM_FEATURE_FMA");
Builder.defineMacro("__ARM_FEATURE_DIV");
Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
// 0xe implies support for half, single and double precision operations.
Builder.defineMacro("__ARM_FP", "0xe");
// PCS specifies this for SysV variants, which is all we support. Other ABIs
// may choose __ARM_FP16_FORMAT_ALTERNATIVE.
Builder.defineMacro("__ARM_FP16_FORMAT_IEEE");
if (Opts.FastMath || Opts.FiniteMathOnly)
Builder.defineMacro("__ARM_FP_FAST");
if ((Opts.C99 || Opts.C11) && !Opts.Freestanding)
Builder.defineMacro("__ARM_FP_FENV_ROUNDING");
Builder.defineMacro("__ARM_SIZEOF_WCHAR_T", Opts.ShortWChar ? "2" : "4");
Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM",
Opts.ShortEnums ? "1" : "4");
if (BigEndian)
Builder.defineMacro("__ARM_BIG_ENDIAN");
// FIXME: the target should support NEON as an optional extension, like
// the OSS AArch64.
Builder.defineMacro("__ARM_NEON");
// 64-bit NEON supports half, single and double precision operations.
Builder.defineMacro("__ARM_NEON_FP", "7");
// FIXME: the target should support crypto as an optional extension, like
// the OSS AArch64
Builder.defineMacro("__ARM_FEATURE_CRYPTO");
}
virtual void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const {
Records = BuiltinInfo;
NumRecords = clang::ARM64::LastTSBuiltin - Builtin::FirstTSBuiltin;
}
virtual bool hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
.Case("arm64", true)
.Case("neon", true)
.Default(false);
}
virtual bool isCLZForZeroUndef() const { return false; }
virtual BuiltinVaListKind getBuiltinVaListKind() const {
return TargetInfo::AArch64ABIBuiltinVaList;
}
virtual void getGCCRegNames(const char *const *&Names,
unsigned &NumNames) const;
virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
unsigned &NumAliases) const;
virtual bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const {
switch (*Name) {
default:
return false;
case 'w': // Floating point and SIMD registers (V0-V31)
Info.setAllowsRegister();
return true;
case 'z': // Zero register, wzr or xzr
Info.setAllowsRegister();
return true;
case 'x': // Floating point and SIMD registers (V0-V15)
Info.setAllowsRegister();
return true;
case 'Q': // A memory address that is a single base register.
Info.setAllowsMemory();
return true;
}
return false;
}
virtual bool validateConstraintModifier(StringRef Constraint,
const char Modifier,
unsigned Size) const {
// Strip off constraint modifiers.
while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
Constraint = Constraint.substr(1);
switch (Constraint[0]) {
default:
return true;
case 'z':
case 'r': {
switch (Modifier) {
case 'x':
case 'w':
// For now assume that the person knows what they're
// doing with the modifier.
return true;
default:
// By default an 'r' constraint will be in the 'x'
// registers.
return (Size == 64);
}
}
}
}
virtual const char *getClobbers() const { return ""; }
int getEHDataRegisterNumber(unsigned RegNo) const {
if (RegNo == 0)
return 0;
if (RegNo == 1)
return 1;
return -1;
}
};
const char *const ARM64TargetInfo::GCCRegNames[] = {
// 32-bit Integer registers
"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10",
"w11", "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21",
"w22", "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
// 64-bit Integer registers
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10",
"x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21",
"x22", "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
// 32-bit floating point regsisters
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10",
"s11", "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21",
"s22", "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
// 64-bit floating point regsisters
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10",
"d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21",
"d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
// Vector registers
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
"v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21",
"v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
};
void ARM64TargetInfo::getGCCRegNames(const char *const *&Names,
unsigned &NumNames) const {
Names = GCCRegNames;
NumNames = llvm::array_lengthof(GCCRegNames);
}
const TargetInfo::GCCRegAlias ARM64TargetInfo::GCCRegAliases[] = {
{ { "w31" }, "wsp" },
{ { "x29" }, "fp" },
{ { "x30" }, "lr" },
{ { "x31" }, "sp" },
// The S/D/Q and W/X registers overlap, but aren't really aliases; we
// don't want to substitute one of these for a different-sized one.
};
void ARM64TargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
unsigned &NumAliases) const {
Aliases = GCCRegAliases;
NumAliases = llvm::array_lengthof(GCCRegAliases);
}
const Builtin::Info ARM64TargetInfo::BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
{ #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
#include "clang/Basic/BuiltinsNEON.def"
#define BUILTIN(ID, TYPE, ATTRS) \
{ #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
#include "clang/Basic/BuiltinsARM64.def"
};
} // end anonymous namespace.
namespace {
class DarwinARM64TargetInfo : public DarwinTargetInfo<ARM64TargetInfo> {
public:
DarwinARM64TargetInfo(const llvm::Triple &Triple)
: DarwinTargetInfo<ARM64TargetInfo>(Triple) {
Int64Type = SignedLongLong;
WCharType = SignedInt;
UseSignedCharForObjCBool = false;
LongDoubleWidth = LongDoubleAlign = 64;
LongDoubleFormat = &llvm::APFloat::IEEEdouble;
TheCXXABI.set(TargetCXXABI::iOS64);
}
virtual BuiltinVaListKind getBuiltinVaListKind() const {
return TargetInfo::CharPtrBuiltinVaList;
}
};
} // end anonymous namespace
namespace {
// Hexagon abstract base class
class HexagonTargetInfo : public TargetInfo {
@ -5697,6 +5991,17 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) {
default:
return NULL;
case llvm::Triple::arm64:
if (Triple.isOSDarwin())
return new DarwinARM64TargetInfo(Triple);
switch (os) {
case llvm::Triple::Linux:
return new LinuxTargetInfo<ARM64TargetInfo>(Triple);
default:
return new ARM64TargetInfo(Triple);
}
case llvm::Triple::xcore:
return new XCoreTargetInfo(Triple);

File diff suppressed because it is too large Load Diff

View File

@ -843,7 +843,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// that the cleanup code should not destroy the variable.
if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
Builder.CreateStore(Builder.getTrue(), NRVOFlag);
} else if (!ReturnValue) {
} else if (!ReturnValue || (RV && RV->getType()->isVoidType())) {
// Make sure not to return anything, but evaluate the expression
// for side effects.
if (RV)

View File

@ -2200,6 +2200,20 @@ public:
bool negateForRightShift);
llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
llvm::Type *Ty, bool usgn, const char *name);
llvm::Value *EmitConcatVectors(llvm::Value *Lo, llvm::Value *Hi,
llvm::Type *ArgTy);
llvm::Value *EmitExtractHigh(llvm::Value *In, llvm::Type *ResTy);
// Helper functions for EmitARM64BuiltinExpr.
llvm::Value *vectorWrapScalar8(llvm::Value *Op);
llvm::Value *vectorWrapScalar16(llvm::Value *Op);
llvm::Value *emitVectorWrappedScalar8Intrinsic(
unsigned Int, SmallVectorImpl<llvm::Value *> &Ops, const char *Name);
llvm::Value *emitVectorWrappedScalar16Intrinsic(
unsigned Int, SmallVectorImpl<llvm::Value *> &Ops, const char *Name);
llvm::Value *EmitARM64BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitNeon64Call(llvm::Function *F,
llvm::SmallVectorImpl<llvm::Value *> &O,
const char *name);
llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);

View File

@ -60,6 +60,7 @@ static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
case TargetCXXABI::GenericAArch64:
case TargetCXXABI::GenericARM:
case TargetCXXABI::iOS:
case TargetCXXABI::iOS64:
case TargetCXXABI::GenericItanium:
return CreateItaniumCXXABI(CGM);
case TargetCXXABI::Microsoft:

View File

@ -244,6 +244,11 @@ public:
llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, llvm::Value *allocPtr,
CharUnits cookieSize) override;
};
class iOS64CXXABI : public ARMCXXABI {
public:
iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {}
};
}
CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
@ -254,6 +259,9 @@ CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
case TargetCXXABI::iOS:
return new ARMCXXABI(CGM);
case TargetCXXABI::iOS64:
return new iOS64CXXABI(CGM);
// Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
// include the other 32-bit ARM oddities: constructor/destructor return values
// and array cookies.
@ -1415,6 +1423,13 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// __cxa_guard_release (&obj_guard);
// }
// }
// ARM64 C++ ABI 3.2.2:
// This ABI instead only specifies the value bit 0 of the static guard
// variable; all other bits are platform defined. Bit 0 shall be 0 when the
// variable is not initialized and 1 when it is.
// FIXME: Reading one bit is no more efficient than reading one byte so
// the codegen is same as generic Itanium ABI.
} else {
// Load the first byte of the guard variable.
llvm::LoadInst *LI =

View File

@ -3134,6 +3134,569 @@ PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
return PPC64_initDwarfEHRegSizeTable(CGF, Address);
}
//===----------------------------------------------------------------------===//
// ARM64 ABI Implementation
//===----------------------------------------------------------------------===//
namespace {
class ARM64ABIInfo : public ABIInfo {
public:
enum ABIKind {
AAPCS = 0,
DarwinPCS
};
private:
ABIKind Kind;
public:
ARM64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {}
private:
ABIKind getABIKind() const { return Kind; }
bool isDarwinPCS() const { return Kind == DarwinPCS; }
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &AllocatedVFP,
bool &IsHA, unsigned &AllocatedGPR,
bool &IsSmallAggr) const;
bool isIllegalVectorType(QualType Ty) const;
virtual void computeInfo(CGFunctionInfo &FI) const {
// To correctly handle Homogeneous Aggregate, we need to keep track of the
// number of SIMD and Floating-point registers allocated so far.
// If the argument is an HFA or an HVA and there are sufficient unallocated
// SIMD and Floating-point registers, then the argument is allocated to SIMD
// and Floating-point Registers (with one register per member of the HFA or
// HVA). Otherwise, the NSRN is set to 8.
unsigned AllocatedVFP = 0;
// To correctly handle small aggregates, we need to keep track of the number
// of GPRs allocated so far. If the small aggregate can't all fit into
// registers, it will be on stack. We don't allow the aggregate to be
// partially in registers.
unsigned AllocatedGPR = 0;
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it) {
unsigned PreAllocation = AllocatedVFP, PreGPR = AllocatedGPR;
bool IsHA = false, IsSmallAggr = false;
const unsigned NumVFPs = 8;
const unsigned NumGPRs = 8;
it->info = classifyArgumentType(it->type, AllocatedVFP, IsHA,
AllocatedGPR, IsSmallAggr);
// If we do not have enough VFP registers for the HA, any VFP registers
// that are unallocated are marked as unavailable. To achieve this, we add
// padding of (NumVFPs - PreAllocation) floats.
if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) {
llvm::Type *PaddingTy = llvm::ArrayType::get(
llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation);
if (isDarwinPCS())
it->info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
else {
// Under AAPCS the 64-bit stack slot alignment means we can't pass HAs
// as sequences of floats since they'll get "holes" inserted as
// padding by the back end.
uint32_t NumStackSlots = getContext().getTypeSize(it->type);
NumStackSlots = llvm::RoundUpToAlignment(NumStackSlots, 64) / 64;
llvm::Type *CoerceTy = llvm::ArrayType::get(
llvm::Type::getDoubleTy(getVMContext()), NumStackSlots);
it->info = ABIArgInfo::getDirect(CoerceTy, 0, PaddingTy);
}
}
// If we do not have enough GPRs for the small aggregate, any GPR regs
// that are unallocated are marked as unavailable.
if (IsSmallAggr && AllocatedGPR > NumGPRs && PreGPR < NumGPRs) {
llvm::Type *PaddingTy = llvm::ArrayType::get(
llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreGPR);
it->info =
ABIArgInfo::getDirect(it->info.getCoerceToType(), 0, PaddingTy);
}
}
}
llvm::Value *EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
: EmitAAPCSVAArg(VAListAddr, Ty, CGF);
}
};
class ARM64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
ARM64TargetCodeGenInfo(CodeGenTypes &CGT, ARM64ABIInfo::ABIKind Kind)
: TargetCodeGenInfo(new ARM64ABIInfo(CGT, Kind)) {}
StringRef getARCRetainAutoreleasedReturnValueMarker() const {
return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { return 31; }
virtual bool doesReturnSlotInterfereWithArgs() const { return false; }
};
}
static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
ASTContext &Context,
uint64_t *HAMembers = 0);
ABIArgInfo ARM64ABIInfo::classifyArgumentType(QualType Ty,
unsigned &AllocatedVFP,
bool &IsHA,
unsigned &AllocatedGPR,
bool &IsSmallAggr) const {
// Handle illegal vector types here.
if (isIllegalVectorType(Ty)) {
uint64_t Size = getContext().getTypeSize(Ty);
if (Size <= 32) {
llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
AllocatedGPR++;
return ABIArgInfo::getDirect(ResType);
}
if (Size == 64) {
llvm::Type *ResType =
llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
AllocatedVFP++;
return ABIArgInfo::getDirect(ResType);
}
if (Size == 128) {
llvm::Type *ResType =
llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
AllocatedVFP++;
return ABIArgInfo::getDirect(ResType);
}
AllocatedGPR++;
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
}
if (Ty->isVectorType())
// Size of a legal vector should be either 64 or 128.
AllocatedVFP++;
if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
if (BT->getKind() == BuiltinType::Half ||
BT->getKind() == BuiltinType::Float ||
BT->getKind() == BuiltinType::Double ||
BT->getKind() == BuiltinType::LongDouble)
AllocatedVFP++;
}
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
if (!Ty->isFloatingType() && !Ty->isVectorType()) {
int RegsNeeded = getContext().getTypeSize(Ty) > 64 ? 2 : 1;
AllocatedGPR += RegsNeeded;
}
return (Ty->isPromotableIntegerType() && isDarwinPCS()
? ABIArgInfo::getExtend()
: ABIArgInfo::getDirect());
}
// Structures with either a non-trivial destructor or a non-trivial
// copy constructor are always indirect.
if (isRecordReturnIndirect(Ty, getCXXABI())) {
AllocatedGPR++;
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
}
// Empty records are always ignored on Darwin, but actually passed in C++ mode
// elsewhere for GNU compatibility.
if (isEmptyRecord(getContext(), Ty, true)) {
if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
return ABIArgInfo::getIgnore();
++AllocatedGPR;
return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
}
// Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
const Type *Base = 0;
uint64_t Members = 0;
if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) {
AllocatedVFP += Members;
IsHA = true;
return ABIArgInfo::getExpand();
}
// Aggregates <= 16 bytes are passed directly in registers or on the stack.
uint64_t Size = getContext().getTypeSize(Ty);
if (Size <= 128) {
Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
AllocatedGPR += Size / 64;
IsSmallAggr = true;
// We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
// For aggregates with 16-byte alignment, we use i128.
if (getContext().getTypeAlign(Ty) < 128 && Size == 128) {
llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
}
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
}
AllocatedGPR++;
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
}
ABIArgInfo ARM64ABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
// Large vector types should be returned via memory.
if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
return ABIArgInfo::getIndirect(0);
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend()
: ABIArgInfo::getDirect());
}
// Structures with either a non-trivial destructor or a non-trivial
// copy constructor are always indirect.
if (isRecordReturnIndirect(RetTy, getCXXABI()))
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
if (isEmptyRecord(getContext(), RetTy, true))
return ABIArgInfo::getIgnore();
const Type *Base = 0;
if (isHomogeneousAggregate(RetTy, Base, getContext()))
// Homogeneous Floating-point Aggregates (HFAs) are returned directly.
return ABIArgInfo::getDirect();
// Aggregates <= 16 bytes are returned directly in registers or on the stack.
uint64_t Size = getContext().getTypeSize(RetTy);
if (Size <= 128) {
Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
}
return ABIArgInfo::getIndirect(0);
}
/// isIllegalVectorType - check whether the vector type is legal for ARM64.
bool ARM64ABIInfo::isIllegalVectorType(QualType Ty) const {
if (const VectorType *VT = Ty->getAs<VectorType>()) {
// Check whether VT is legal.
unsigned NumElements = VT->getNumElements();
uint64_t Size = getContext().getTypeSize(VT);
// NumElements should be power of 2 between 1 and 16.
if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16)
return true;
return Size != 64 && (Size != 128 || NumElements == 1);
}
return false;
}
static llvm::Value *EmitAArch64VAArg(llvm::Value *VAListAddr, QualType Ty,
int AllocatedGPR, int AllocatedVFP,
bool IsIndirect, CodeGenFunction &CGF) {
// The AArch64 va_list type and handling is specified in the Procedure Call
// Standard, section B.4:
//
// struct {
// void *__stack;
// void *__gr_top;
// void *__vr_top;
// int __gr_offs;
// int __vr_offs;
// };
llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
auto &Ctx = CGF.getContext();
llvm::Value *reg_offs_p = 0, *reg_offs = 0;
int reg_top_index;
int RegSize;
if (AllocatedGPR) {
assert(!AllocatedVFP && "Arguments never split between int & VFP regs");
// 3 is the field number of __gr_offs
reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
reg_top_index = 1; // field number for __gr_top
RegSize = 8 * AllocatedGPR;
} else {
assert(!AllocatedGPR && "Argument must go in VFP or int regs");
// 4 is the field number of __vr_offs.
reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
reg_top_index = 2; // field number for __vr_top
RegSize = 16 * AllocatedVFP;
}
//=======================================
// Find out where argument was passed
//=======================================
// If reg_offs >= 0 we're already using the stack for this type of
// argument. We don't want to keep updating reg_offs (in case it overflows,
// though anyone passing 2GB of arguments, each at most 16 bytes, deserves
// whatever they get).
llvm::Value *UsingStack = 0;
UsingStack = CGF.Builder.CreateICmpSGE(
reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
// Otherwise, at least some kind of argument could go in these registers, the
// quesiton is whether this particular type is too big.
CGF.EmitBlock(MaybeRegBlock);
// Integer arguments may need to correct register alignment (for example a
// "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
// align __gr_offs to calculate the potential address.
if (AllocatedGPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
int Align = Ctx.getTypeAlign(Ty) / 8;
reg_offs = CGF.Builder.CreateAdd(
reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
"align_regoffs");
reg_offs = CGF.Builder.CreateAnd(
reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
"aligned_regoffs");
}
// Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
llvm::Value *NewOffset = 0;
NewOffset = CGF.Builder.CreateAdd(
reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
CGF.Builder.CreateStore(NewOffset, reg_offs_p);
// Now we're in a position to decide whether this argument really was in
// registers or not.
llvm::Value *InRegs = 0;
InRegs = CGF.Builder.CreateICmpSLE(
NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
//=======================================
// Argument was in registers
//=======================================
// Now we emit the code for if the argument was originally passed in
// registers. First start the appropriate block:
CGF.EmitBlock(InRegBlock);
llvm::Value *reg_top_p = 0, *reg_top = 0;
reg_top_p =
CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
llvm::Value *RegAddr = 0;
llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
if (IsIndirect) {
// If it's been passed indirectly (actually a struct), whatever we find from
// stored registers or on the stack will actually be a struct **.
MemTy = llvm::PointerType::getUnqual(MemTy);
}
const Type *Base = 0;
uint64_t NumMembers;
if (isHomogeneousAggregate(Ty, Base, Ctx, &NumMembers) && NumMembers > 1) {
// Homogeneous aggregates passed in registers will have their elements split
// and stored 16-bytes apart regardless of size (they're notionally in qN,
// qN+1, ...). We reload and store into a temporary local variable
// contiguously.
assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy);
int Offset = 0;
if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128)
Offset = 16 - Ctx.getTypeSize(Base) / 8;
for (unsigned i = 0; i < NumMembers; ++i) {
llvm::Value *BaseOffset =
llvm::ConstantInt::get(CGF.Int32Ty, 16 * i + Offset);
llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
LoadAddr = CGF.Builder.CreateBitCast(
LoadAddr, llvm::PointerType::getUnqual(BaseTy));
llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i);
llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
CGF.Builder.CreateStore(Elem, StoreAddr);
}
RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy);
} else {
// Otherwise the object is contiguous in memory
unsigned BeAlign = reg_top_index == 2 ? 16 : 8;
if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
Ctx.getTypeSize(Ty) < (BeAlign * 8)) {
int Offset = BeAlign - Ctx.getTypeSize(Ty) / 8;
BaseAddr = CGF.Builder.CreatePtrToInt(BaseAddr, CGF.Int64Ty);
BaseAddr = CGF.Builder.CreateAdd(
BaseAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
BaseAddr = CGF.Builder.CreateIntToPtr(BaseAddr, CGF.Int8PtrTy);
}
RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy);
}
CGF.EmitBranch(ContBlock);
//=======================================
// Argument was on the stack
//=======================================
CGF.EmitBlock(OnStackBlock);
llvm::Value *stack_p = 0, *OnStackAddr = 0;
stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
// Again, stack arguments may need realigmnent. In this case both integer and
// floating-point ones might be affected.
if (!IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
int Align = Ctx.getTypeAlign(Ty) / 8;
OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
OnStackAddr = CGF.Builder.CreateAdd(
OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
"align_stack");
OnStackAddr = CGF.Builder.CreateAnd(
OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
"align_stack");
OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
}
uint64_t StackSize;
if (IsIndirect)
StackSize = 8;
else
StackSize = Ctx.getTypeSize(Ty) / 8;
// All stack slots are 8 bytes
StackSize = llvm::RoundUpToAlignment(StackSize, 8);
llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize);
llvm::Value *NewStack =
CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, "new_stack");
// Write the new value of __stack for the next call to va_arg
CGF.Builder.CreateStore(NewStack, stack_p);
if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
Ctx.getTypeSize(Ty) < 64) {
int Offset = 8 - Ctx.getTypeSize(Ty) / 8;
OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
OnStackAddr = CGF.Builder.CreateAdd(
OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
}
OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy);
CGF.EmitBranch(ContBlock);
//=======================================
// Tidy up
//=======================================
CGF.EmitBlock(ContBlock);
llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr");
ResAddr->addIncoming(RegAddr, InRegBlock);
ResAddr->addIncoming(OnStackAddr, OnStackBlock);
if (IsIndirect)
return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr");
return ResAddr;
}
llvm::Value *ARM64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
unsigned AllocatedGPR = 0, AllocatedVFP = 0;
bool IsHA = false, IsSmallAggr = false;
ABIArgInfo AI =
classifyArgumentType(Ty, AllocatedVFP, IsHA, AllocatedGPR, IsSmallAggr);
return EmitAArch64VAArg(VAListAddr, Ty, AllocatedGPR, AllocatedVFP,
AI.isIndirect(), CGF);
}
llvm::Value *ARM64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
// We do not support va_arg for aggregates or illegal vector types.
// Lower VAArg here for these cases and use the LLVM va_arg instruction for
// other cases.
if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
return 0;
uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
const Type *Base = 0;
bool isHA = isHomogeneousAggregate(Ty, Base, getContext());
bool isIndirect = false;
// Arguments bigger than 16 bytes which aren't homogeneous aggregates should
// be passed indirectly.
if (Size > 16 && !isHA) {
isIndirect = true;
Size = 8;
Align = 8;
}
llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
if (isEmptyRecord(getContext(), Ty, true)) {
// These are ignored for parameter passing purposes.
llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
return Builder.CreateBitCast(Addr, PTy);
}
const uint64_t MinABIAlign = 8;
if (Align > MinABIAlign) {
llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
Addr = Builder.CreateGEP(Addr, Offset);
llvm::Value *AsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~(Align - 1));
llvm::Value *Aligned = Builder.CreateAnd(AsInt, Mask);
Addr = Builder.CreateIntToPtr(Aligned, BP, "ap.align");
}
uint64_t Offset = llvm::RoundUpToAlignment(Size, MinABIAlign);
llvm::Value *NextAddr = Builder.CreateGEP(
Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
if (isIndirect)
Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
return AddrTyped;
}
//===----------------------------------------------------------------------===//
// ARM ABI Implementation
//===----------------------------------------------------------------------===//
@ -3372,8 +3935,7 @@ void ARMABIInfo::setRuntimeCC() {
/// contained in the type is returned through it; this is used for the
/// recursive calls that check aggregate component types.
static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
ASTContext &Context,
uint64_t *HAMembers = 0) {
ASTContext &Context, uint64_t *HAMembers) {
uint64_t Members = 0;
if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members))
@ -4168,237 +4730,12 @@ ABIArgInfo AArch64ABIInfo::classifyGenericType(QualType Ty,
llvm::Value *AArch64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
// The AArch64 va_list type and handling is specified in the Procedure Call
// Standard, section B.4:
//
// struct {
// void *__stack;
// void *__gr_top;
// void *__vr_top;
// int __gr_offs;
// int __vr_offs;
// };
int FreeIntRegs = 8, FreeVFPRegs = 8;
Ty = CGF.getContext().getCanonicalType(Ty);
ABIArgInfo AI = classifyGenericType(Ty, FreeIntRegs, FreeVFPRegs);
llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
llvm::Value *reg_offs_p = 0, *reg_offs = 0;
int reg_top_index;
int RegSize;
if (FreeIntRegs < 8) {
assert(FreeVFPRegs == 8 && "Arguments never split between int & VFP regs");
// 3 is the field number of __gr_offs
reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
reg_top_index = 1; // field number for __gr_top
RegSize = 8 * (8 - FreeIntRegs);
} else {
assert(FreeVFPRegs < 8 && "Argument must go in VFP or int regs");
// 4 is the field number of __vr_offs.
reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
reg_top_index = 2; // field number for __vr_top
RegSize = 16 * (8 - FreeVFPRegs);
}
//=======================================
// Find out where argument was passed
//=======================================
// If reg_offs >= 0 we're already using the stack for this type of
// argument. We don't want to keep updating reg_offs (in case it overflows,
// though anyone passing 2GB of arguments, each at most 16 bytes, deserves
// whatever they get).
llvm::Value *UsingStack = 0;
UsingStack = CGF.Builder.CreateICmpSGE(reg_offs,
llvm::ConstantInt::get(CGF.Int32Ty, 0));
CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
// Otherwise, at least some kind of argument could go in these registers, the
// quesiton is whether this particular type is too big.
CGF.EmitBlock(MaybeRegBlock);
// Integer arguments may need to correct register alignment (for example a
// "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
// align __gr_offs to calculate the potential address.
if (FreeIntRegs < 8 && AI.isDirect() && getContext().getTypeAlign(Ty) > 64) {
int Align = getContext().getTypeAlign(Ty) / 8;
reg_offs = CGF.Builder.CreateAdd(reg_offs,
llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
"align_regoffs");
reg_offs = CGF.Builder.CreateAnd(reg_offs,
llvm::ConstantInt::get(CGF.Int32Ty, -Align),
"aligned_regoffs");
}
// Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
llvm::Value *NewOffset = 0;
NewOffset = CGF.Builder.CreateAdd(reg_offs,
llvm::ConstantInt::get(CGF.Int32Ty, RegSize),
"new_reg_offs");
CGF.Builder.CreateStore(NewOffset, reg_offs_p);
// Now we're in a position to decide whether this argument really was in
// registers or not.
llvm::Value *InRegs = 0;
InRegs = CGF.Builder.CreateICmpSLE(NewOffset,
llvm::ConstantInt::get(CGF.Int32Ty, 0),
"inreg");
CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
//=======================================
// Argument was in registers
//=======================================
// Now we emit the code for if the argument was originally passed in
// registers. First start the appropriate block:
CGF.EmitBlock(InRegBlock);
llvm::Value *reg_top_p = 0, *reg_top = 0;
reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
llvm::Value *RegAddr = 0;
llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
if (!AI.isDirect()) {
// If it's been passed indirectly (actually a struct), whatever we find from
// stored registers or on the stack will actually be a struct **.
MemTy = llvm::PointerType::getUnqual(MemTy);
}
const Type *Base = 0;
uint64_t NumMembers;
if (isHomogeneousAggregate(Ty, Base, getContext(), &NumMembers)
&& NumMembers > 1) {
// Homogeneous aggregates passed in registers will have their elements split
// and stored 16-bytes apart regardless of size (they're notionally in qN,
// qN+1, ...). We reload and store into a temporary local variable
// contiguously.
assert(AI.isDirect() && "Homogeneous aggregates should be passed directly");
llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy);
int Offset = 0;
if (CGF.CGM.getDataLayout().isBigEndian() &&
getContext().getTypeSize(Base) < 128)
Offset = 16 - getContext().getTypeSize(Base)/8;
for (unsigned i = 0; i < NumMembers; ++i) {
llvm::Value *BaseOffset = llvm::ConstantInt::get(CGF.Int32Ty,
16 * i + Offset);
llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
LoadAddr = CGF.Builder.CreateBitCast(LoadAddr,
llvm::PointerType::getUnqual(BaseTy));
llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i);
llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
CGF.Builder.CreateStore(Elem, StoreAddr);
}
RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy);
} else {
// Otherwise the object is contiguous in memory
unsigned BeAlign = reg_top_index == 2 ? 16 : 8;
if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
getContext().getTypeSize(Ty) < (BeAlign * 8)) {
int Offset = BeAlign - getContext().getTypeSize(Ty)/8;
BaseAddr = CGF.Builder.CreatePtrToInt(BaseAddr, CGF.Int64Ty);
BaseAddr = CGF.Builder.CreateAdd(BaseAddr,
llvm::ConstantInt::get(CGF.Int64Ty,
Offset),
"align_be");
BaseAddr = CGF.Builder.CreateIntToPtr(BaseAddr, CGF.Int8PtrTy);
}
RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy);
}
CGF.EmitBranch(ContBlock);
//=======================================
// Argument was on the stack
//=======================================
CGF.EmitBlock(OnStackBlock);
llvm::Value *stack_p = 0, *OnStackAddr = 0;
stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
// Again, stack arguments may need realigmnent. In this case both integer and
// floating-point ones might be affected.
if (AI.isDirect() && getContext().getTypeAlign(Ty) > 64) {
int Align = getContext().getTypeAlign(Ty) / 8;
OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
OnStackAddr = CGF.Builder.CreateAdd(OnStackAddr,
llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
"align_stack");
OnStackAddr = CGF.Builder.CreateAnd(OnStackAddr,
llvm::ConstantInt::get(CGF.Int64Ty, -Align),
"align_stack");
OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
}
uint64_t StackSize;
if (AI.isDirect())
StackSize = getContext().getTypeSize(Ty) / 8;
else
StackSize = 8;
// All stack slots are 8 bytes
StackSize = llvm::RoundUpToAlignment(StackSize, 8);
llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize);
llvm::Value *NewStack = CGF.Builder.CreateGEP(OnStackAddr, StackSizeC,
"new_stack");
// Write the new value of __stack for the next call to va_arg
CGF.Builder.CreateStore(NewStack, stack_p);
if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
getContext().getTypeSize(Ty) < 64 ) {
int Offset = 8 - getContext().getTypeSize(Ty)/8;
OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
OnStackAddr = CGF.Builder.CreateAdd(OnStackAddr,
llvm::ConstantInt::get(CGF.Int64Ty,
Offset),
"align_be");
OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
}
OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy);
CGF.EmitBranch(ContBlock);
//=======================================
// Tidy up
//=======================================
CGF.EmitBlock(ContBlock);
llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr");
ResAddr->addIncoming(RegAddr, InRegBlock);
ResAddr->addIncoming(OnStackAddr, OnStackBlock);
if (AI.isDirect())
return ResAddr;
return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr");
return EmitAArch64VAArg(VAListAddr, Ty, 8 - FreeIntRegs, 8 - FreeVFPRegs,
AI.isIndirect(), CGF);
}
//===----------------------------------------------------------------------===//
@ -5801,6 +6138,14 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::mips64el:
return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
case llvm::Triple::arm64: {
ARM64ABIInfo::ABIKind Kind = ARM64ABIInfo::AAPCS;
if (strcmp(getTarget().getABI(), "darwinpcs") == 0)
Kind = ARM64ABIInfo::DarwinPCS;
return *(TheTargetCodeGenInfo = new ARM64TargetCodeGenInfo(Types, Kind));
}
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types));

View File

@ -399,7 +399,8 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
// If we are compiling as iOS / simulator, don't attempt to link libgcc_s.1,
// it never went into the SDK.
// Linking against libgcc_s.1 isn't needed for iOS 5.0+
if (isIPhoneOSVersionLT(5, 0) && !isTargetIOSSimulator())
if (isIPhoneOSVersionLT(5, 0) && !isTargetIOSSimulator() &&
getTriple().getArch() != llvm::Triple::arm64)
CmdArgs.push_back("-lgcc_s.1");
// We currently always need a static runtime library for iOS.
@ -498,7 +499,8 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
// go ahead as assume we're targeting iOS.
StringRef MachOArchName = getMachOArchName(Args);
if (OSXTarget.empty() && iOSTarget.empty() &&
(MachOArchName == "armv7" || MachOArchName == "armv7s"))
(MachOArchName == "armv7" || MachOArchName == "armv7s" ||
MachOArchName == "arm64"))
iOSTarget = iOSVersionMin;
// Handle conflicting deployment targets
@ -517,6 +519,7 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
// default platform.
if (!OSXTarget.empty() && !iOSTarget.empty()) {
if (getTriple().getArch() == llvm::Triple::arm ||
getTriple().getArch() == llvm::Triple::arm64 ||
getTriple().getArch() == llvm::Triple::thumb)
OSXTarget = "";
else
@ -652,6 +655,7 @@ void DarwinClang::AddCCKextLibArgs(const ArgList &Args,
// Use the newer cc_kext for iOS ARM after 6.0.
if (!isTargetIPhoneOS() || isTargetIOSSimulator() ||
getTriple().getArch() == llvm::Triple::arm64 ||
!isIPhoneOSVersionLT(6, 0)) {
llvm::sys::path::append(P, "libclang_rt.cc_kext.a");
} else {
@ -879,6 +883,10 @@ DerivedArgList *MachO::TranslateArgs(const DerivedArgList &Args,
else if (Name == "armv7s")
DAL->AddJoinedArg(0, MArch, "armv7s");
else if (Name == "arm64")
DAL->AddJoinedArg(0, MArch, "arm64");
else if (Name == "armv8")
DAL->AddJoinedArg(0, MArch, "arm64");
}
return DAL;
@ -919,7 +927,8 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
// FIXME: It would be far better to avoid inserting those -static arguments,
// but we can't check the deployment target in the translation code until
// it is set here.
if (isTargetIOSBased() && !isIPhoneOSVersionLT(6, 0)) {
if (isTargetIOSBased() && !isIPhoneOSVersionLT(6, 0) &&
getTriple().getArch() != llvm::Triple::arm64) {
for (ArgList::iterator it = DAL->begin(), ie = DAL->end(); it != ie; ) {
Arg *A = *it;
++it;
@ -984,7 +993,8 @@ bool MachO::isPIEDefault() const {
}
bool MachO::isPICDefaultForced() const {
return getArch() == llvm::Triple::x86_64;
return (getArch() == llvm::Triple::x86_64 ||
getArch() == llvm::Triple::arm64);
}
bool MachO::SupportsProfiling() const {
@ -1073,7 +1083,9 @@ void Darwin::addStartObjectFileArgs(const llvm::opt::ArgList &Args,
if (isTargetIOSSimulator()) {
; // iOS simulator does not need crt1.o.
} else if (isTargetIPhoneOS()) {
if (isIPhoneOSVersionLT(3, 1))
if (getArch() == llvm::Triple::arm64)
; // iOS does not need any crt1 files for arm64
else if (isIPhoneOSVersionLT(3, 1))
CmdArgs.push_back("-lcrt1.o");
else if (isIPhoneOSVersionLT(6, 0))
CmdArgs.push_back("-lcrt1.3.1.o");
@ -1386,6 +1398,7 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
};
switch (TargetTriple.getArch()) {
case llvm::Triple::arm64:
case llvm::Triple::aarch64:
LibDirs.append(AArch64LibDirs,
AArch64LibDirs + llvm::array_lengthof(AArch64LibDirs));
@ -2090,6 +2103,7 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const {
getTriple().getArch() == llvm::Triple::x86_64 ||
getTriple().getArch() == llvm::Triple::aarch64 ||
getTriple().getArch() == llvm::Triple::aarch64_be ||
getTriple().getArch() == llvm::Triple::arm64 ||
getTriple().getArch() == llvm::Triple::arm ||
getTriple().getArch() == llvm::Triple::armeb ||
getTriple().getArch() == llvm::Triple::thumb ||
@ -2099,12 +2113,13 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const {
void Generic_ELF::addClangTargetOptions(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
const Generic_GCC::GCCVersion &V = GCCInstallation.getVersion();
bool UseInitArrayDefault =
bool UseInitArrayDefault =
getTriple().getArch() == llvm::Triple::aarch64 ||
getTriple().getArch() == llvm::Triple::aarch64_be ||
(getTriple().getOS() == llvm::Triple::Linux && (
!V.isOlderThan(4, 7, 0) ||
getTriple().getEnvironment() == llvm::Triple::Android));
getTriple().getArch() == llvm::Triple::arm64 ||
(getTriple().getOS() == llvm::Triple::Linux &&
(!V.isOlderThan(4, 7, 0) ||
getTriple().getEnvironment() == llvm::Triple::Android));
if (DriverArgs.hasFlag(options::OPT_fuse_init_array,
options::OPT_fno_use_init_array,
@ -2840,6 +2855,7 @@ static std::string getMultiarchTriple(const llvm::Triple &TargetTriple,
if (llvm::sys::fs::exists(SysRoot + "/lib/x86_64-linux-gnu"))
return "x86_64-linux-gnu";
return TargetTriple.str();
case llvm::Triple::arm64:
case llvm::Triple::aarch64:
if (llvm::sys::fs::exists(SysRoot + "/lib/aarch64-linux-gnu"))
return "aarch64-linux-gnu";
@ -3216,8 +3232,9 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
MultiarchIncludeDirs = X86_64MultiarchIncludeDirs;
} else if (getTriple().getArch() == llvm::Triple::x86) {
MultiarchIncludeDirs = X86MultiarchIncludeDirs;
} else if ((getTriple().getArch() == llvm::Triple::aarch64) ||
(getTriple().getArch() == llvm::Triple::aarch64_be)) {
} else if (getTriple().getArch() == llvm::Triple::aarch64 ||
getTriple().getArch() == llvm::Triple::aarch64_be ||
getTriple().getArch() == llvm::Triple::arm64) {
MultiarchIncludeDirs = AArch64MultiarchIncludeDirs;
} else if (getTriple().getArch() == llvm::Triple::arm) {
if (getTriple().getEnvironment() == llvm::Triple::GNUEABIHF)

View File

@ -368,7 +368,8 @@ public:
llvm::opt::ArgStringList &CmdArgs) const override;
bool isKernelStatic() const override {
return !isTargetIPhoneOS() || isIPhoneOSVersionLT(6, 0);
return !isTargetIPhoneOS() || isIPhoneOSVersionLT(6, 0) ||
getTriple().getArch() == llvm::Triple::arm64;
}
protected:

View File

@ -471,6 +471,7 @@ static bool isSignedCharDefault(const llvm::Triple &Triple) {
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
case llvm::Triple::arm64:
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::ppc:
@ -846,6 +847,59 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
}
}
/// getARM64TargetCPU - Get the (LLVM) name of the ARM64 cpu we are targeting.
static std::string getARM64TargetCPU(const ArgList &Args) {
// If we have -mcpu=, use that.
if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
StringRef MCPU = A->getValue();
// Handle -mcpu=native.
if (MCPU == "native")
return llvm::sys::getHostCPUName();
else
return MCPU;
}
// At some point, we may need to check -march here, but for now we only
// one arm64 architecture.
// Default to "cyclone" CPU.
return "cyclone";
}
void Clang::AddARM64TargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
llvm::Triple Triple(TripleStr);
if (!Args.hasFlag(options::OPT_mred_zone, options::OPT_mno_red_zone, true) ||
Args.hasArg(options::OPT_mkernel) ||
Args.hasArg(options::OPT_fapple_kext))
CmdArgs.push_back("-disable-red-zone");
if (!Args.hasFlag(options::OPT_mimplicit_float,
options::OPT_mno_implicit_float, true))
CmdArgs.push_back("-no-implicit-float");
const char *ABIName = 0;
if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ))
ABIName = A->getValue();
else if (Triple.isOSDarwin())
ABIName = "darwinpcs";
else
ABIName = "aapcs";
CmdArgs.push_back("-target-abi");
CmdArgs.push_back(ABIName);
CmdArgs.push_back("-target-cpu");
CmdArgs.push_back(Args.MakeArgString(getARM64TargetCPU(Args)));
if (Args.hasArg(options::OPT_mstrict_align)) {
CmdArgs.push_back("-backend-option");
CmdArgs.push_back("-arm64-strict-align");
}
}
// Get CPU and ABI names. They are not independent
// so we have to calculate them together.
static void getMipsCPUAndABI(const ArgList &Args,
@ -2325,8 +2379,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Note that these flags are trump-cards. Regardless of the order w.r.t. the
// PIC or PIE options above, if these show up, PIC is disabled.
llvm::Triple Triple(TripleStr);
if (KernelOrKext &&
(!Triple.isiOS() || Triple.isOSVersionLT(6)))
if (KernelOrKext && (!Triple.isiOS() || Triple.isOSVersionLT(6) ||
Triple.getArch() == llvm::Triple::arm64))
PIC = PIE = false;
if (Args.hasArg(options::OPT_static))
PIC = PIE = false;
@ -2649,6 +2703,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
AddARMTargetArgs(Args, CmdArgs, KernelOrKext);
break;
case llvm::Triple::arm64:
AddARM64TargetArgs(Args, CmdArgs);
break;
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
@ -4876,6 +4934,7 @@ const char *arm::getLLVMArchSuffixForARM(StringRef CPU) {
.Case("cortex-m3", "v7m")
.Case("cortex-m4", "v7em")
.Case("swift", "v7s")
.Case("cyclone", "v8")
.Cases("cortex-a53", "cortex-a57", "v8")
.Default("");
}
@ -4910,6 +4969,7 @@ llvm::Triple::ArchType darwin::getArchTypeForMachOArchName(StringRef Str) {
.Cases("arm", "armv4t", "armv5", "armv6", "armv6m", llvm::Triple::arm)
.Cases("armv7", "armv7em", "armv7k", "armv7m", llvm::Triple::arm)
.Cases("armv7s", "xscale", llvm::Triple::arm)
.Case("arm64", llvm::Triple::arm64)
.Case("r600", llvm::Triple::r600)
.Case("nvptx", llvm::Triple::nvptx)
.Case("nvptx64", llvm::Triple::nvptx64)
@ -6711,7 +6771,8 @@ static StringRef getLinuxDynamicLinker(const ArgList &Args,
} else if (ToolChain.getArch() == llvm::Triple::x86 ||
ToolChain.getArch() == llvm::Triple::sparc)
return "/lib/ld-linux.so.2";
else if (ToolChain.getArch() == llvm::Triple::aarch64)
else if (ToolChain.getArch() == llvm::Triple::aarch64 ||
ToolChain.getArch() == llvm::Triple::arm64)
return "/lib/ld-linux-aarch64.so.1";
else if (ToolChain.getArch() == llvm::Triple::aarch64_be)
return "/lib/ld-linux-aarch64_be.so.1";
@ -6811,7 +6872,8 @@ void gnutools::Link::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-m");
if (ToolChain.getArch() == llvm::Triple::x86)
CmdArgs.push_back("elf_i386");
else if (ToolChain.getArch() == llvm::Triple::aarch64)
else if (ToolChain.getArch() == llvm::Triple::aarch64 ||
ToolChain.getArch() == llvm::Triple::arm64)
CmdArgs.push_back("aarch64linux");
else if (ToolChain.getArch() == llvm::Triple::aarch64_be)
CmdArgs.push_back("aarch64_be_linux");

View File

@ -54,6 +54,8 @@ using llvm::opt::ArgStringList;
void AddARMTargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
bool KernelOrKext) const;
void AddARM64TargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
void AddMIPSTargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
void AddR600TargetArgs(const llvm::opt::ArgList &Args,

View File

@ -378,6 +378,11 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple, const HeaderSearchOp
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
"arm-apple-darwin10", "v6", "", triple);
break;
case llvm::Triple::arm64:
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
"arm64-apple-darwin10", "", "", triple);
break;
}
return;
}

View File

@ -9,7 +9,7 @@
CLANG_LEVEL := ../..
BUILT_SOURCES = arm_neon.h.inc
BUILT_SOURCES = arm_neon.h.inc aarch64_simd.h.inc
TABLEGEN_INC_FILES_COMMON = 1
include $(CLANG_LEVEL)/Makefile
@ -24,11 +24,11 @@ HEADERS := $(notdir $(wildcard $(PROJ_SRC_DIR)/*.h))
OBJHEADERS := $(addprefix $(HeaderDir)/, $(HEADERS))
$(OBJHEADERS): $(HeaderDir)/%.h: $(PROJ_SRC_DIR)/%.h $(HeaderDir)/.dir $(HeaderDir)/arm_neon.h
$(OBJHEADERS): $(HeaderDir)/%.h: $(PROJ_SRC_DIR)/%.h $(HeaderDir)/.dir $(HeaderDir)/arm_neon.h $(HeaderDir)/aarch64_simd.h
$(Verb) cp $< $@
$(Echo) Copying $(notdir $<) to build dir
$(HeaderDir)/arm_neon.h: $(BUILT_SOURCES) $(HeaderDir)/.dir
$(HeaderDir)/arm_neon.h: $(HeaderDir)/%: %.inc $(HeaderDir)/.dir
$(Verb) cp $< $@
$(Echo) Copying $(notdir $<) to build dir
@ -61,4 +61,5 @@ install-local:: $(INSTHEADERS) $(PROJ_headers)/module.map
$(ObjDir)/arm_neon.h.inc.tmp : $(CLANG_LEVEL)/include/clang/Basic/arm_neon.td $(CLANG_TBLGEN) $(ObjDir)/.dir
$(Echo) "Building Clang arm_neon.h.inc with tblgen"
$(Verb) $(ClangTableGen) -gen-arm-neon -o $(call SYSPATH, $@) $<
$(Verb) $(ClangTableGen) -gen-arm-neon -o $(call SYSPATH, $@) \
-I $(PROJ_SRC_DIR)/../../include $<

View File

@ -309,6 +309,10 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall))
return ExprError();
break;
case llvm::Triple::arm64:
if (CheckARM64BuiltinFunctionCall(BuiltinID, TheCall))
return ExprError();
break;
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall))
@ -369,7 +373,7 @@ static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
/// the vector type specified by the NeonTypeFlags. This is used to check
/// the pointer arguments for Neon load/store intrinsics.
static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
bool IsAArch64) {
bool IsPolyUnsigned, bool IsInt64Long) {
switch (Flags.getEltType()) {
case NeonTypeFlags::Int8:
return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
@ -378,15 +382,15 @@ static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
case NeonTypeFlags::Int32:
return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
case NeonTypeFlags::Int64:
if (IsAArch64)
if (IsInt64Long)
return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
else
return Flags.isUnsigned() ? Context.UnsignedLongLongTy
: Context.LongLongTy;
case NeonTypeFlags::Poly8:
return IsAArch64 ? Context.UnsignedCharTy : Context.SignedCharTy;
return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
case NeonTypeFlags::Poly16:
return IsAArch64 ? Context.UnsignedShortTy : Context.ShortTy;
return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
case NeonTypeFlags::Poly64:
return Context.UnsignedLongTy;
case NeonTypeFlags::Poly128:
@ -434,9 +438,13 @@ bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
QualType RHSTy = RHS.get()->getType();
bool IsAArch64 =
Context.getTargetInfo().getTriple().getArch() == llvm::Triple::aarch64;
QualType EltTy = getNeonEltType(NeonTypeFlags(TV), Context, IsAArch64);
llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
bool IsPolyUnsigned =
Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::arm64;
bool IsInt64Long =
Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong;
QualType EltTy =
getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
if (HasConstPtr)
EltTy = EltTy.withConst();
QualType LHSTy = Context.getPointerType(EltTy);
@ -487,11 +495,15 @@ bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
return false;
}
bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall) {
bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth) {
assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_strex) &&
BuiltinID == ARM::BI__builtin_arm_strex ||
BuiltinID == ARM64::BI__builtin_arm_ldrex ||
BuiltinID == ARM64::BI__builtin_arm_strex) &&
"unexpected ARM builtin");
bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex;
bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM64::BI__builtin_arm_ldrex;
DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
@ -552,7 +564,8 @@ bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall) {
}
// But ARM doesn't have instructions to deal with 128-bit versions.
if (Context.getTypeSize(ValType) > 64) {
if (Context.getTypeSize(ValType) > MaxWidth) {
assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
Diag(DRE->getLocStart(), diag::err_atomic_exclusive_builtin_pointer_size)
<< PointerArg->getType() << PointerArg->getSourceRange();
return true;
@ -598,7 +611,7 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_strex) {
return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall);
return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
}
if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
@ -636,6 +649,21 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
return false;
}
bool Sema::CheckARM64BuiltinFunctionCall(unsigned BuiltinID,
CallExpr *TheCall) {
llvm::APSInt Result;
if (BuiltinID == ARM64::BI__builtin_arm_ldrex ||
BuiltinID == ARM64::BI__builtin_arm_strex) {
return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
}
if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
return true;
return false;
}
bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
unsigned i = 0, l = 0, u = 0;
switch (BuiltinID) {

View File

@ -4720,14 +4720,20 @@ static void HandleExtVectorTypeAttr(QualType &CurType,
}
static bool isPermittedNeonBaseType(QualType &Ty,
VectorType::VectorKind VecKind,
bool IsAArch64) {
VectorType::VectorKind VecKind, Sema &S) {
const BuiltinType *BTy = Ty->getAs<BuiltinType>();
if (!BTy)
return false;
llvm::Triple Triple = S.Context.getTargetInfo().getTriple();
// Signed poly is mathematically wrong, but has been baked into some ABIs by
// now.
bool IsPolyUnsigned = Triple.getArch() == llvm::Triple::aarch64 ||
Triple.getArch() == llvm::Triple::aarch64_be ||
Triple.getArch() == llvm::Triple::arm64;
if (VecKind == VectorType::NeonPolyVector) {
if (IsAArch64) {
if (IsPolyUnsigned) {
// AArch64 polynomial vectors are unsigned and support poly64.
return BTy->getKind() == BuiltinType::UChar ||
BTy->getKind() == BuiltinType::UShort ||
@ -4742,7 +4748,11 @@ static bool isPermittedNeonBaseType(QualType &Ty,
// Non-polynomial vector types: the usual suspects are allowed, as well as
// float64_t on AArch64.
if (IsAArch64 && BTy->getKind() == BuiltinType::Double)
bool Is64Bit = Triple.getArch() == llvm::Triple::aarch64 ||
Triple.getArch() == llvm::Triple::aarch64_be ||
Triple.getArch() == llvm::Triple::arm64;
if (Is64Bit && BTy->getKind() == BuiltinType::Double)
return true;
return BTy->getKind() == BuiltinType::SChar ||
@ -4794,11 +4804,7 @@ static void HandleNeonVectorTypeAttr(QualType& CurType,
return;
}
// Only certain element types are supported for Neon vectors.
llvm::Triple::ArchType Arch =
S.Context.getTargetInfo().getTriple().getArch();
if (!isPermittedNeonBaseType(CurType, VecKind,
(Arch == llvm::Triple::aarch64) ||
(Arch == llvm::Triple::aarch64_be))) {
if (!isPermittedNeonBaseType(CurType, VecKind, S)) {
S.Diag(Attr.getLoc(), diag::err_attribute_invalid_vector_type) << CurType;
Attr.setInvalid();
return;

View File

@ -1,5 +1,5 @@
// RUN: %clang_cc1 %s -o - -emit-llvm | FileCheck %s
// XFAIL: aarch64
// XFAIL: aarch64, arm64
// PR1513

View File

@ -1,8 +1,10 @@
// REQUIRES: aarch64-registered-target
// REQUIRES: arm64-registered-target
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
// RUN: -S -O3 -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple arm64-none-linux-gnu -S -O3 -o - %s | FileCheck %s
// Test new aarch64 intrinsics and types
@ -281,7 +283,7 @@ float32_t test_vfmas_laneq_f32(float32_t a, float32_t b, float32x4_t v) {
float64_t test_vfmsd_lane_f64(float64_t a, float64_t b, float64x1_t v) {
// CHECK-LABEL: test_vfmsd_lane_f64
return vfmsd_lane_f64(a, b, v, 0);
// CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0]
// CHECK: {{fmls d[0-9]+, d[0-9]+, v[0-9]+\.d\[0\]|fmsub d[0-9]+, d[0-9]+, d[0-9]+}}
}
float32_t test_vfmss_laneq_f32(float32_t a, float32_t b, float32x4_t v) {
@ -738,7 +740,7 @@ float32x2_t test_vmul_lane_f32(float32x2_t a, float32x2_t v) {
float64x1_t test_vmul_lane_f64(float64x1_t a, float64x1_t v) {
// CHECK-LABEL: test_vmul_lane_f64
return vmul_lane_f64(a, v, 0);
// CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0]
// CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+\.d\[0\]|d[0-9]+}}
}
@ -1574,109 +1576,109 @@ float64x2_t test_vmulxq_laneq_f64_0(float64x2_t a, float64x2_t v) {
int32x4_t test_vmull_high_n_s16(int16x8_t a, int16_t b) {
// CHECK-LABEL: test_vmull_high_n_s16
return vmull_high_n_s16(a, b);
// CHECK: smull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
// CHECK: smull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+\.h\[0\]|v[0-9]+\.8h}}
}
int64x2_t test_vmull_high_n_s32(int32x4_t a, int32_t b) {
// CHECK-LABEL: test_vmull_high_n_s32
return vmull_high_n_s32(a, b);
// CHECK: smull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
// CHECK: smull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+\.s\[0\]|v[0-9]+\.4s}}
}
uint32x4_t test_vmull_high_n_u16(uint16x8_t a, uint16_t b) {
// CHECK-LABEL: test_vmull_high_n_u16
return vmull_high_n_u16(a, b);
// CHECK: umull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
// CHECK: umull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+\.h\[0\]|v[0-9]+\.8h}}
}
uint64x2_t test_vmull_high_n_u32(uint32x4_t a, uint32_t b) {
// CHECK-LABEL: test_vmull_high_n_u32
return vmull_high_n_u32(a, b);
// CHECK: umull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
// CHECK: umull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+\.s\[0\]|v[0-9]+\.4s}}
}
int32x4_t test_vqdmull_high_n_s16(int16x8_t a, int16_t b) {
// CHECK-LABEL: test_vqdmull_high_n_s16
return vqdmull_high_n_s16(a, b);
// CHECK: sqdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
// CHECK: sqdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+\.h\[0\]|v[0-9]+\.8h}}
}
int64x2_t test_vqdmull_high_n_s32(int32x4_t a, int32_t b) {
// CHECK-LABEL: test_vqdmull_high_n_s32
return vqdmull_high_n_s32(a, b);
// CHECK: sqdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
// CHECK: sqdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+\.s\[0\]|v[0-9]+\.4s}}
}
int32x4_t test_vmlal_high_n_s16(int32x4_t a, int16x8_t b, int16_t c) {
// CHECK-LABEL: test_vmlal_high_n_s16
return vmlal_high_n_s16(a, b, c);
// CHECK: smlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
// CHECK: smlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+\.h\[0\]|v[0-9]+\.8h}}
}
int64x2_t test_vmlal_high_n_s32(int64x2_t a, int32x4_t b, int32_t c) {
// CHECK-LABEL: test_vmlal_high_n_s32
return vmlal_high_n_s32(a, b, c);
// CHECK: smlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
// CHECK: smlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+\.s\[0\]|v[0-9]+\.4s}}
}
uint32x4_t test_vmlal_high_n_u16(uint32x4_t a, uint16x8_t b, uint16_t c) {
// CHECK-LABEL: test_vmlal_high_n_u16
return vmlal_high_n_u16(a, b, c);
// CHECK: umlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
// CHECK: umlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+\.h\[0\]|v[0-9]+\.8h}}
}
uint64x2_t test_vmlal_high_n_u32(uint64x2_t a, uint32x4_t b, uint32_t c) {
// CHECK-LABEL: test_vmlal_high_n_u32
return vmlal_high_n_u32(a, b, c);
// CHECK: umlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
// CHECK: umlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+\.s\[0\]|v[0-9]+\.4s}}
}
int32x4_t test_vqdmlal_high_n_s16(int32x4_t a, int16x8_t b, int16_t c) {
// CHECK-LABEL: test_vqdmlal_high_n_s16
return vqdmlal_high_n_s16(a, b, c);
// CHECK: sqdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
// CHECK: sqdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+\.h\[0\]|v[0-9]+\.8h}}
}
int64x2_t test_vqdmlal_high_n_s32(int64x2_t a, int32x4_t b, int32_t c) {
// CHECK-LABEL: test_vqdmlal_high_n_s32
return vqdmlal_high_n_s32(a, b, c);
// CHECK: sqdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
// CHECK: sqdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+\.s\[0\]|v[0-9]+\.4s}}
}
int32x4_t test_vmlsl_high_n_s16(int32x4_t a, int16x8_t b, int16_t c) {
// CHECK-LABEL: test_vmlsl_high_n_s16
return vmlsl_high_n_s16(a, b, c);
// CHECK: smlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
// CHECK: smlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+\.h\[0\]|v[0-9]+\.8h}}
}
int64x2_t test_vmlsl_high_n_s32(int64x2_t a, int32x4_t b, int32_t c) {
// CHECK-LABEL: test_vmlsl_high_n_s32
return vmlsl_high_n_s32(a, b, c);
// CHECK: smlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
// CHECK: smlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+\.s\[0\]|v[0-9]+\.4s}}
}
uint32x4_t test_vmlsl_high_n_u16(uint32x4_t a, uint16x8_t b, uint16_t c) {
// CHECK-LABEL: test_vmlsl_high_n_u16
return vmlsl_high_n_u16(a, b, c);
// CHECK: umlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
// CHECK: umlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+\.h\[0\]|v[0-9]+\.8h}}
}
uint64x2_t test_vmlsl_high_n_u32(uint64x2_t a, uint32x4_t b, uint32_t c) {
// CHECK-LABEL: test_vmlsl_high_n_u32
return vmlsl_high_n_u32(a, b, c);
// CHECK: umlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
// CHECK: umlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+\.s\[0\]|v[0-9]+\.4s}}
}
int32x4_t test_vqdmlsl_high_n_s16(int32x4_t a, int16x8_t b, int16_t c) {
// CHECK-LABEL: test_vqdmlsl_high_n_s16
return vqdmlsl_high_n_s16(a, b, c);
// CHECK: sqdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
// CHECK: sqdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+\.h\[0\]|v[0-9]+\.8h}}
}
int64x2_t test_vqdmlsl_high_n_s32(int64x2_t a, int32x4_t b, int32_t c) {
// CHECK-LABEL: test_vqdmlsl_high_n_s32
return vqdmlsl_high_n_s32(a, b, c);
// CHECK: sqdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
// CHECK: sqdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+\.s\[0\]|v[0-9]+\.4s}}
}
float32x2_t test_vmul_n_f32(float32x2_t a, float32_t b) {

View File

@ -1,6 +1,8 @@
// REQUIRES: aarch64-registered-target
// REQUIRES: arm64-registered-target
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
// RUN: -S -O3 -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple arm64-none-linux-gnu -S -O3 -o - %s | FileCheck %s
// Test new aarch64 intrinsics and types

View File

@ -1,6 +1,9 @@
// REQUIRES: aarch64-registered-target
// REQUIRES: arm64-registered-target
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple arm64-none-linux-gnu \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
// Test new aarch64 intrinsics and types

View File

@ -1,6 +1,9 @@
// REQUIRES: aarch64-registered-target
// REQUIRES: arm64-registered-target
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple arm64-none-linux-gnu \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
// Test new aarch64 intrinsics and types
@ -9,19 +12,19 @@
int8x8_t test_vext_s8(int8x8_t a, int8x8_t b) {
// CHECK-LABEL: test_vext_s8
return vext_s8(a, b, 2);
// CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x2
// CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{(0x)?2}}
}
int16x4_t test_vext_s16(int16x4_t a, int16x4_t b) {
// CHECK-LABEL: test_vext_s16
return vext_s16(a, b, 3);
// CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x6
// CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{(0x)?6}}
}
int32x2_t test_vext_s32(int32x2_t a, int32x2_t b) {
// CHECK-LABEL: test_vext_s32
return vext_s32(a, b, 1);
// CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x4
// CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{(0x)?4}}
}
int64x1_t test_vext_s64(int64x1_t a, int64x1_t b) {
@ -32,43 +35,43 @@ int64x1_t test_vext_s64(int64x1_t a, int64x1_t b) {
int8x16_t test_vextq_s8(int8x16_t a, int8x16_t b) {
// CHECK-LABEL: test_vextq_s8
return vextq_s8(a, b, 2);
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x2
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?2}}
}
int16x8_t test_vextq_s16(int16x8_t a, int16x8_t b) {
// CHECK-LABEL: test_vextq_s16
return vextq_s16(a, b, 3);
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?6}}
}
int32x4_t test_vextq_s32(int32x4_t a, int32x4_t b) {
// CHECK-LABEL: test_vextq_s32
return vextq_s32(a, b, 1);
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x4
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?4}}
}
int64x2_t test_vextq_s64(int64x2_t a, int64x2_t b) {
// CHECK-LABEL: test_vextq_s64
return vextq_s64(a, b, 1);
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x8
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?8}}
}
uint8x8_t test_vext_u8(uint8x8_t a, uint8x8_t b) {
// CHECK-LABEL: test_vext_u8
return vext_u8(a, b, 2);
// CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x2
// CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{(0x)?2}}
}
uint16x4_t test_vext_u16(uint16x4_t a, uint16x4_t b) {
// CHECK-LABEL: test_vext_u16
return vext_u16(a, b, 3);
// CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x6
// CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{(0x)?6}}
}
uint32x2_t test_vext_u32(uint32x2_t a, uint32x2_t b) {
// CHECK-LABEL: test_vext_u32
return vext_u32(a, b, 1);
// CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x4
// CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{(0x)?4}}
}
uint64x1_t test_vext_u64(uint64x1_t a, uint64x1_t b) {
@ -79,31 +82,31 @@ uint64x1_t test_vext_u64(uint64x1_t a, uint64x1_t b) {
uint8x16_t test_vextq_u8(uint8x16_t a, uint8x16_t b) {
// CHECK-LABEL: test_vextq_u8
return vextq_u8(a, b, 2);
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x2
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?2}}
}
uint16x8_t test_vextq_u16(uint16x8_t a, uint16x8_t b) {
// CHECK-LABEL: test_vextq_u16
return vextq_u16(a, b, 3);
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?6}}
}
uint32x4_t test_vextq_u32(uint32x4_t a, uint32x4_t b) {
// CHECK-LABEL: test_vextq_u32
return vextq_u32(a, b, 1);
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x4
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?4}}
}
uint64x2_t test_vextq_u64(uint64x2_t a, uint64x2_t b) {
// CHECK-LABEL: test_vextq_u64
return vextq_u64(a, b, 1);
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x8
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?8}}
}
float32x2_t test_vext_f32(float32x2_t a, float32x2_t b) {
// CHECK-LABEL: test_vext_f32
return vext_f32(a, b, 1);
// CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x4
// CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{(0x)?4}}
}
float64x1_t test_vext_f64(float64x1_t a, float64x1_t b) {
@ -114,35 +117,35 @@ float64x1_t test_vext_f64(float64x1_t a, float64x1_t b) {
float32x4_t test_vextq_f32(float32x4_t a, float32x4_t b) {
// CHECK-LABEL: test_vextq_f32
return vextq_f32(a, b, 1);
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x4
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?4}}
}
float64x2_t test_vextq_f64(float64x2_t a, float64x2_t b) {
// CHECK-LABEL: test_vextq_f64
return vextq_f64(a, b, 1);
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x8
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?8}}
}
poly8x8_t test_vext_p8(poly8x8_t a, poly8x8_t b) {
// CHECK-LABEL: test_vext_p8
return vext_p8(a, b, 2);
// CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x2
// CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{(0x)?2}}
}
poly16x4_t test_vext_p16(poly16x4_t a, poly16x4_t b) {
// CHECK-LABEL: test_vext_p16
return vext_p16(a, b, 3);
// CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x6
// CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{(0x)?6}}
}
poly8x16_t test_vextq_p8(poly8x16_t a, poly8x16_t b) {
// CHECK-LABEL: test_vextq_p8
return vextq_p8(a, b, 2);
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x2
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?2}}
}
poly16x8_t test_vextq_p16(poly16x8_t a, poly16x8_t b) {
// CHECK-LABEL: test_vextq_p16
return vextq_p16(a, b, 3);
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6
// CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?6}}
}

View File

@ -1,6 +1,9 @@
// REQUIRES: aarch64-registered-target
// REQUIRES: arm64-registered-target
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple arm64-none-linux-gnu \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
// Test new aarch64 intrinsics and types
@ -14,120 +17,120 @@ float32_t test_vcvtxd_f32_f64(float64_t a) {
int32_t test_vcvtas_s32_f32(float32_t a) {
// CHECK-LABEL: test_vcvtas_s32_f32
// CHECK: fcvtas {{s[0-9]+}}, {{s[0-9]+}}
// CHECK: fcvtas {{[ws][0-9]+}}, {{s[0-9]+}}
return (int32_t)vcvtas_s32_f32(a);
}
int64_t test_test_vcvtad_s64_f64(float64_t a) {
// CHECK-LABEL: test_test_vcvtad_s64_f64
// CHECK: fcvtas {{d[0-9]+}}, {{d[0-9]+}}
// CHECK: fcvtas {{[dx][0-9]+}}, {{d[0-9]+}}
return (int64_t)vcvtad_s64_f64(a);
}
uint32_t test_vcvtas_u32_f32(float32_t a) {
// CHECK-LABEL: test_vcvtas_u32_f32
// CHECK: fcvtau {{s[0-9]+}}, {{s[0-9]+}}
// CHECK: fcvtau {{[ws][0-9]+}}, {{s[0-9]+}}
return (uint32_t)vcvtas_u32_f32(a);
}
uint64_t test_vcvtad_u64_f64(float64_t a) {
// CHECK-LABEL: test_vcvtad_u64_f64
// CHECK: fcvtau {{d[0-9]+}}, {{d[0-9]+}}
// CHECK: fcvtau {{[xd][0-9]+}}, {{d[0-9]+}}
return (uint64_t)vcvtad_u64_f64(a);
}
int32_t test_vcvtms_s32_f32(float32_t a) {
// CHECK-LABEL: test_vcvtms_s32_f32
// CHECK: fcvtms {{s[0-9]+}}, {{s[0-9]+}}
// CHECK: fcvtms {{[sw][0-9]+}}, {{s[0-9]+}}
return (int32_t)vcvtms_s32_f32(a);
}
int64_t test_vcvtmd_s64_f64(float64_t a) {
// CHECK-LABEL: test_vcvtmd_s64_f64
// CHECK: fcvtms {{d[0-9]+}}, {{d[0-9]+}}
// CHECK: fcvtms {{[dx][0-9]+}}, {{d[0-9]+}}
return (int64_t)vcvtmd_s64_f64(a);
}
uint32_t test_vcvtms_u32_f32(float32_t a) {
// CHECK-LABEL: test_vcvtms_u32_f32
// CHECK: fcvtmu {{s[0-9]+}}, {{s[0-9]+}}
// CHECK: fcvtmu {{[ws][0-9]+}}, {{s[0-9]+}}
return (uint32_t)vcvtms_u32_f32(a);
}
uint64_t test_vcvtmd_u64_f64(float64_t a) {
// CHECK-LABEL: test_vcvtmd_u64_f64
// CHECK: fcvtmu {{d[0-9]+}}, {{d[0-9]+}}
// CHECK: fcvtmu {{[xd][0-9]+}}, {{d[0-9]+}}
return (uint64_t)vcvtmd_u64_f64(a);
}
int32_t test_vcvtns_s32_f32(float32_t a) {
// CHECK-LABEL: test_vcvtns_s32_f32
// CHECK: fcvtns {{s[0-9]+}}, {{s[0-9]+}}
// CHECK: fcvtns {{[sw][0-9]+}}, {{s[0-9]+}}
return (int32_t)vcvtns_s32_f32(a);
}
int64_t test_vcvtnd_s64_f64(float64_t a) {
// CHECK-LABEL: test_vcvtnd_s64_f64
// CHECK: fcvtns {{d[0-9]+}}, {{d[0-9]+}}
// CHECK: fcvtns {{[dx][0-9]+}}, {{d[0-9]+}}
return (int64_t)vcvtnd_s64_f64(a);
}
uint32_t test_vcvtns_u32_f32(float32_t a) {
// CHECK-LABEL: test_vcvtns_u32_f32
// CHECK: fcvtnu {{s[0-9]+}}, {{s[0-9]+}}
// CHECK: fcvtnu {{[sw][0-9]+}}, {{s[0-9]+}}
return (uint32_t)vcvtns_u32_f32(a);
}
uint64_t test_vcvtnd_u64_f64(float64_t a) {
// CHECK-LABEL: test_vcvtnd_u64_f64
// CHECK: fcvtnu {{d[0-9]+}}, {{d[0-9]+}}
// CHECK: fcvtnu {{[dx][0-9]+}}, {{d[0-9]+}}
return (uint64_t)vcvtnd_u64_f64(a);
}
int32_t test_vcvtps_s32_f32(float32_t a) {
// CHECK-LABEL: test_vcvtps_s32_f32
// CHECK: fcvtps {{s[0-9]+}}, {{s[0-9]+}}
// CHECK: fcvtps {{[sw][0-9]+}}, {{s[0-9]+}}
return (int32_t)vcvtps_s32_f32(a);
}
int64_t test_vcvtpd_s64_f64(float64_t a) {
// CHECK-LABEL: test_vcvtpd_s64_f64
// CHECK: fcvtps {{d[0-9]+}}, {{d[0-9]+}}
// CHECK: fcvtps {{[dx][0-9]+}}, {{d[0-9]+}}
return (int64_t)vcvtpd_s64_f64(a);
}
uint32_t test_vcvtps_u32_f32(float32_t a) {
// CHECK-LABEL: test_vcvtps_u32_f32
// CHECK: fcvtpu {{s[0-9]+}}, {{s[0-9]+}}
// CHECK: fcvtpu {{[sw][0-9]+}}, {{s[0-9]+}}
return (uint32_t)vcvtps_u32_f32(a);
}
uint64_t test_vcvtpd_u64_f64(float64_t a) {
// CHECK-LABEL: test_vcvtpd_u64_f64
// CHECK: fcvtpu {{d[0-9]+}}, {{d[0-9]+}}
// CHECK: fcvtpu {{[dx][0-9]+}}, {{d[0-9]+}}
return (uint64_t)vcvtpd_u64_f64(a);
}
int32_t test_vcvts_s32_f32(float32_t a) {
// CHECK-LABEL: test_vcvts_s32_f32
// CHECK: fcvtzs {{s[0-9]+}}, {{s[0-9]+}}
// CHECK: fcvtzs {{[sw][0-9]+}}, {{s[0-9]+}}
return (int32_t)vcvts_s32_f32(a);
}
int64_t test_vcvtd_s64_f64(float64_t a) {
// CHECK-LABEL: test_vcvtd_s64_f64
// CHECK: fcvtzs {{d[0-9]+}}, {{d[0-9]+}}
// CHECK: fcvtzs {{[dx][0-9]+}}, {{d[0-9]+}}
return (int64_t)vcvtd_s64_f64(a);
}
uint32_t test_vcvts_u32_f32(float32_t a) {
// CHECK-LABEL: test_vcvts_u32_f32
// CHECK: fcvtzu {{s[0-9]+}}, {{s[0-9]+}}
// CHECK: fcvtzu {{[sw][0-9]+}}, {{s[0-9]+}}
return (uint32_t)vcvts_u32_f32(a);
}
uint64_t test_vcvtd_u64_f64(float64_t a) {
// CHECK-LABEL: test_vcvtd_u64_f64
// CHECK: fcvtzu {{d[0-9]+}}, {{d[0-9]+}}
// CHECK: fcvtzu {{[dx][0-9]+}}, {{d[0-9]+}}
return (uint64_t)vcvtd_u64_f64(a);
}

View File

@ -1,8 +1,10 @@
// REQUIRES: aarch64-registered-target
// REQUIRES: arm64-registered-target
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck -check-prefix=CHECK-FMA %s
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
// RUN: -S -O3 -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple arm64-none-linux-gnu -S -O3 -o - %s | FileCheck %s
// Test new aarch64 intrinsics and types
@ -192,11 +194,11 @@ float32x4_t test_vmlsq_laneq_f32(float32x4_t a, float32x4_t b, float32x4_t v) {
float64x2_t test_vfmaq_n_f64(float64x2_t a, float64x2_t b, float64_t c) {
// CHECK-LABEL: test_vfmaq_n_f64:
return vfmaq_n_f64(a, b, c);
// CHECK: fmla {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
// CHECK: fmla {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+\.2d|v[0-9]+\.d\[0\]}}
}
float64x2_t test_vfmsq_n_f64(float64x2_t a, float64x2_t b, float64_t c) {
// CHECK-LABEL: test_vfmsq_n_f64:
return vfmsq_n_f64(a, b, c);
// CHECK: fmls {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
// CHECK: fmls {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+\.2d|v[0-9]+\.d\[0\]}}
}

View File

@ -1,109 +1,112 @@
// REQUIRES: aarch64-registered-target
// REQUIRES: arm64-registered-target
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple arm64-none-linux-gnu \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
// Test new aarch64 intrinsics and types
#include <arm_neon.h>
// CHECK-LABEL: test_vceqz_s8
// CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
// CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x0|0}}
uint8x8_t test_vceqz_s8(int8x8_t a) {
return vceqz_s8(a);
}
// CHECK-LABEL: test_vceqz_s16
// CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
// CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #{{0x0|0}}
uint16x4_t test_vceqz_s16(int16x4_t a) {
return vceqz_s16(a);
}
// CHECK-LABEL: test_vceqz_s32
// CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
// CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0x0|0}}
uint32x2_t test_vceqz_s32(int32x2_t a) {
return vceqz_s32(a);
}
// CHECK-LABEL: test_vceqz_s64
// CHECK: cmeq {{d[0-9]+}}, {{d[0-9]+}}, #0x0
// CHECK: cmeq {{d[0-9]+}}, {{d[0-9]+}}, #{{0x0|0}}
uint64x1_t test_vceqz_s64(int64x1_t a) {
return vceqz_s64(a);
}
// CHECK-LABEL: test_vceqz_u64
// CHECK: cmeq {{d[0-9]+}}, {{d[0-9]+}}, #0x0
// CHECK: cmeq {{d[0-9]+}}, {{d[0-9]+}}, #{{0x0|0}}
uint64x1_t test_vceqz_u64(uint64x1_t a) {
return vceqz_u64(a);
}
// CHECK-LABEL: test_vceqz_p64
// CHECK: cmeq {{d[0-9]+}}, {{d[0-9]+}}, #0x0
// CHECK: cmeq {{d[0-9]+}}, {{d[0-9]+}}, #{{0x0|0}}
uint64x1_t test_vceqz_p64(poly64x1_t a) {
return vceqz_p64(a);
}
// CHECK-LABEL: test_vceqzq_s8
// CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
// CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x0|0}}
uint8x16_t test_vceqzq_s8(int8x16_t a) {
return vceqzq_s8(a);
}
// CHECK-LABEL: test_vceqzq_s16
// CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
// CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #{{0x0|0}}
uint16x8_t test_vceqzq_s16(int16x8_t a) {
return vceqzq_s16(a);
}
// CHECK-LABEL: test_vceqzq_s32
// CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
// CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0x0|0}}
uint32x4_t test_vceqzq_s32(int32x4_t a) {
return vceqzq_s32(a);
}
// CHECK-LABEL: test_vceqzq_s64
// CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
// CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0x0|0}}
uint64x2_t test_vceqzq_s64(int64x2_t a) {
return vceqzq_s64(a);
}
// CHECK-LABEL: test_vceqz_u8
// CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
// CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x0|0}}
uint8x8_t test_vceqz_u8(uint8x8_t a) {
return vceqz_u8(a);
}
// CHECK-LABEL: test_vceqz_u16
// CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
// CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #{{0x0|0}}
uint16x4_t test_vceqz_u16(uint16x4_t a) {
return vceqz_u16(a);
}
// CHECK-LABEL: test_vceqz_u32
// CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
// CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0x0|0}}
uint32x2_t test_vceqz_u32(uint32x2_t a) {
return vceqz_u32(a);
}
// CHECK-LABEL: test_vceqzq_u8
// CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
// CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x0|0}}
uint8x16_t test_vceqzq_u8(uint8x16_t a) {
return vceqzq_u8(a);
}
// CHECK-LABEL: test_vceqzq_u16
// CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
// CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #{{0x0|0}}
uint16x8_t test_vceqzq_u16(uint16x8_t a) {
return vceqzq_u16(a);
}
// CHECK-LABEL: test_vceqzq_u32
// CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
// CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0x0|0}}
uint32x4_t test_vceqzq_u32(uint32x4_t a) {
return vceqzq_u32(a);
}
// CHECK-LABEL: test_vceqzq_u64
// CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
// CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0x0|0}}
uint64x2_t test_vceqzq_u64(uint64x2_t a) {
return vceqzq_u64(a);
}
@ -127,25 +130,25 @@ uint32x4_t test_vceqzq_f32(float32x4_t a) {
}
// CHECK-LABEL: test_vceqz_p8
// CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
// CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x0|0}}
uint8x8_t test_vceqz_p8(poly8x8_t a) {
return vceqz_p8(a);
}
// CHECK-LABEL: test_vceqzq_p8
// CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
// CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x0|0}}
uint8x16_t test_vceqzq_p8(poly8x16_t a) {
return vceqzq_p8(a);
}
// CHECK-LABEL: test_vceqz_p16
// CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
// CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #{{0x0|0}}
uint16x4_t test_vceqz_p16(poly16x4_t a) {
return vceqz_p16(a);
}
// CHECK-LABEL: test_vceqzq_p16
// CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
// CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #{{0x0|0}}
uint16x8_t test_vceqzq_p16(poly16x8_t a) {
return vceqzq_p16(a);
}
@ -163,49 +166,49 @@ uint64x2_t test_vceqzq_p64(poly64x2_t a) {
}
// CHECK-LABEL: test_vcgez_s8
// CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
// CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x0|0}}
uint8x8_t test_vcgez_s8(int8x8_t a) {
return vcgez_s8(a);
}
// CHECK-LABEL: test_vcgez_s16
// CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
// CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #{{0x0|0}}
uint16x4_t test_vcgez_s16(int16x4_t a) {
return vcgez_s16(a);
}
// CHECK-LABEL: test_vcgez_s32
// CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
// CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0x0|0}}
uint32x2_t test_vcgez_s32(int32x2_t a) {
return vcgez_s32(a);
}
// CHECK-LABEL: test_vcgez_s64
// CHECK: cmge {{d[0-9]+}}, {{d[0-9]+}}, #0x0
// CHECK: cmge {{d[0-9]+}}, {{d[0-9]+}}, #{{0x0|0}}
uint64x1_t test_vcgez_s64(int64x1_t a) {
return vcgez_s64(a);
}
// CHECK-LABEL: test_vcgezq_s8
// CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
// CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x0|0}}
uint8x16_t test_vcgezq_s8(int8x16_t a) {
return vcgezq_s8(a);
}
// CHECK-LABEL: test_vcgezq_s16
// CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
// CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #{{0x0|0}}
uint16x8_t test_vcgezq_s16(int16x8_t a) {
return vcgezq_s16(a);
}
// CHECK-LABEL: test_vcgezq_s32
// CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
// CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0x0|0}}
uint32x4_t test_vcgezq_s32(int32x4_t a) {
return vcgezq_s32(a);
}
// CHECK-LABEL: test_vcgezq_s64
// CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
// CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0x0|0}}
uint64x2_t test_vcgezq_s64(int64x2_t a) {
return vcgezq_s64(a);
}
@ -235,49 +238,49 @@ uint64x2_t test_vcgezq_f64(float64x2_t a) {
}
// CHECK-LABEL: test_vclez_s8
// CHECK: cmle {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
// CHECK: cmle {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x0|0}}
uint8x8_t test_vclez_s8(int8x8_t a) {
return vclez_s8(a);
}
// CHECK-LABEL: test_vclez_s16
// CHECK: cmle {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
// CHECK: cmle {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #{{0x0|0}}
uint16x4_t test_vclez_s16(int16x4_t a) {
return vclez_s16(a);
}
// CHECK-LABEL: test_vclez_s32
// CHECK: cmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
// CHECK: cmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0x0|0}}
uint32x2_t test_vclez_s32(int32x2_t a) {
return vclez_s32(a);
}
// CHECK-LABEL: test_vclez_s64
// CHECK: cmle {{d[0-9]+}}, {{d[0-9]+}}, #0x0
// CHECK: cmle {{d[0-9]+}}, {{d[0-9]+}}, #{{0x0|0}}
uint64x1_t test_vclez_s64(int64x1_t a) {
return vclez_s64(a);
}
// CHECK-LABEL: test_vclezq_s8
// CHECK: cmle {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
// CHECK: cmle {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x0|0}}
uint8x16_t test_vclezq_s8(int8x16_t a) {
return vclezq_s8(a);
}
// CHECK-LABEL: test_vclezq_s16
// CHECK: cmle {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
// CHECK: cmle {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #{{0x0|0}}
uint16x8_t test_vclezq_s16(int16x8_t a) {
return vclezq_s16(a);
}
// CHECK-LABEL: test_vclezq_s32
// CHECK: cmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
// CHECK: cmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0x0|0}}
uint32x4_t test_vclezq_s32(int32x4_t a) {
return vclezq_s32(a);
}
// CHECK-LABEL: test_vclezq_s64
// CHECK: cmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
// CHECK: cmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0x0|0}}
uint64x2_t test_vclezq_s64(int64x2_t a) {
return vclezq_s64(a);
}
@ -307,49 +310,49 @@ uint64x2_t test_vclezq_f64(float64x2_t a) {
}
// CHECK-LABEL: test_vcgtz_s8
// CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
// CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x0|0}}
uint8x8_t test_vcgtz_s8(int8x8_t a) {
return vcgtz_s8(a);
}
// CHECK-LABEL: test_vcgtz_s16
// CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
// CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #{{0x0|0}}
uint16x4_t test_vcgtz_s16(int16x4_t a) {
return vcgtz_s16(a);
}
// CHECK-LABEL: test_vcgtz_s32
// CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
// CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0x0|0}}
uint32x2_t test_vcgtz_s32(int32x2_t a) {
return vcgtz_s32(a);
}
// CHECK-LABEL: test_vcgtz_s64
// CHECK: cmgt {{d[0-9]+}}, {{d[0-9]+}}, #0x0
// CHECK: cmgt {{d[0-9]+}}, {{d[0-9]+}}, #{{0x0|0}}
uint64x1_t test_vcgtz_s64(int64x1_t a) {
return vcgtz_s64(a);
}
// CHECK-LABEL: test_vcgtzq_s8
// CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
// CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x0|0}}
uint8x16_t test_vcgtzq_s8(int8x16_t a) {
return vcgtzq_s8(a);
}
// CHECK-LABEL: test_vcgtzq_s16
// CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
// CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #{{0x0|0}}
uint16x8_t test_vcgtzq_s16(int16x8_t a) {
return vcgtzq_s16(a);
}
// CHECK-LABEL: test_vcgtzq_s32
// CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
// CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0x0|0}}
uint32x4_t test_vcgtzq_s32(int32x4_t a) {
return vcgtzq_s32(a);
}
// CHECK-LABEL: test_vcgtzq_s64
// CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
// CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0x0|0}}
uint64x2_t test_vcgtzq_s64(int64x2_t a) {
return vcgtzq_s64(a);
}

View File

@ -1,9 +1,11 @@
// REQUIRES: aarch64-registered-target
// REQUIRES: arm64-registered-target
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple arm64-none-linux-gnu \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
// Test new aarch64 intrinsics and types
#include <arm_neon.h>
int8x8_t test_vuzp1_s8(int8x8_t a, int8x8_t b) {
@ -33,7 +35,7 @@ int16x8_t test_vuzp1q_s16(int16x8_t a, int16x8_t b) {
int32x2_t test_vuzp1_s32(int32x2_t a, int32x2_t b) {
// CHECK-LABEL: test_vuzp1_s32
return vuzp1_s32(a, b);
// CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
// CHECK: {{ins v[0-9]+.s\[1\], v[0-9]+.s\[0\]|zip1 v0.2s, v0.2s, v1.2s}}
}
int32x4_t test_vuzp1q_s32(int32x4_t a, int32x4_t b) {
@ -45,7 +47,7 @@ int32x4_t test_vuzp1q_s32(int32x4_t a, int32x4_t b) {
int64x2_t test_vuzp1q_s64(int64x2_t a, int64x2_t b) {
// CHECK-LABEL: test_vuzp1q_s64
return vuzp1q_s64(a, b);
// CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
// CHECK: {{ins v[0-9]+.d\[1\], v[0-9]+.d\[0\]|zip1 v0.2d, v0.2d, v1.2d}}
}
uint8x8_t test_vuzp1_u8(uint8x8_t a, uint8x8_t b) {
@ -75,7 +77,7 @@ uint16x8_t test_vuzp1q_u16(uint16x8_t a, uint16x8_t b) {
uint32x2_t test_vuzp1_u32(uint32x2_t a, uint32x2_t b) {
// CHECK-LABEL: test_vuzp1_u32
return vuzp1_u32(a, b);
// CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
// CHECK: {{ins v[0-9]+.s\[1\], v[0-9]+.s\[0\]|zip1 v0.2s, v0.2s, v1.2s}}
}
uint32x4_t test_vuzp1q_u32(uint32x4_t a, uint32x4_t b) {
@ -87,13 +89,13 @@ uint32x4_t test_vuzp1q_u32(uint32x4_t a, uint32x4_t b) {
uint64x2_t test_vuzp1q_u64(uint64x2_t a, uint64x2_t b) {
// CHECK-LABEL: test_vuzp1q_u64
return vuzp1q_u64(a, b);
// CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
// CHECK: {{ins v[0-9]+.d\[1\], v[0-9]+.d\[0\]|zip1 v0.2d, v0.2d, v1.2d}}
}
float32x2_t test_vuzp1_f32(float32x2_t a, float32x2_t b) {
// CHECK-LABEL: test_vuzp1_f32
return vuzp1_f32(a, b);
// CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
// CHECK: {{ins v[0-9]+.s\[1\], v[0-9]+.s\[0\]|zip1 v0.2s, v0.2s, v1.2s}}
}
float32x4_t test_vuzp1q_f32(float32x4_t a, float32x4_t b) {
@ -105,7 +107,7 @@ float32x4_t test_vuzp1q_f32(float32x4_t a, float32x4_t b) {
float64x2_t test_vuzp1q_f64(float64x2_t a, float64x2_t b) {
// CHECK-LABEL: test_vuzp1q_f64
return vuzp1q_f64(a, b);
// CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
// CHECK: {{ins v[0-9]+.d\[1\], v[0-9]+.d\[0\]|zip1 v0.2d, v0.2d, v1.2d}}
}
poly8x8_t test_vuzp1_p8(poly8x8_t a, poly8x8_t b) {
@ -159,7 +161,7 @@ int16x8_t test_vuzp2q_s16(int16x8_t a, int16x8_t b) {
int32x2_t test_vuzp2_s32(int32x2_t a, int32x2_t b) {
// CHECK-LABEL: test_vuzp2_s32
return vuzp2_s32(a, b);
// CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
// CHECK: {{ins v[0-9]+.s\[0\], v[0-9]+.s\[1\]|zip2 v0.2s, v0.2s, v1.2s}}
}
int32x4_t test_vuzp2q_s32(int32x4_t a, int32x4_t b) {
@ -171,7 +173,7 @@ int32x4_t test_vuzp2q_s32(int32x4_t a, int32x4_t b) {
int64x2_t test_vuzp2q_s64(int64x2_t a, int64x2_t b) {
// CHECK-LABEL: test_vuzp2q_s64
return vuzp2q_s64(a, b);
// CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
// CHECK: {{ins v[0-9]+.d\[0\], v[0-9]+.d\[1\]|zip2 v0.2d, v0.2d, v1.2d}}
}
uint8x8_t test_vuzp2_u8(uint8x8_t a, uint8x8_t b) {
@ -201,7 +203,7 @@ uint16x8_t test_vuzp2q_u16(uint16x8_t a, uint16x8_t b) {
uint32x2_t test_vuzp2_u32(uint32x2_t a, uint32x2_t b) {
// CHECK-LABEL: test_vuzp2_u32
return vuzp2_u32(a, b);
// CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
// CHECK: {{ins v[0-9]+.s\[0\], v[0-9]+.s\[1\]|zip2 v0.2s, v0.2s, v1.2s}}
}
uint32x4_t test_vuzp2q_u32(uint32x4_t a, uint32x4_t b) {
@ -213,13 +215,13 @@ uint32x4_t test_vuzp2q_u32(uint32x4_t a, uint32x4_t b) {
uint64x2_t test_vuzp2q_u64(uint64x2_t a, uint64x2_t b) {
// CHECK-LABEL: test_vuzp2q_u64
return vuzp2q_u64(a, b);
// CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
// CHECK: {{ins v[0-9]+.d\[0\], v[0-9]+.d\[1\]|zip2 v0.2d, v0.2d, v1.2d}}
}
float32x2_t test_vuzp2_f32(float32x2_t a, float32x2_t b) {
// CHECK-LABEL: test_vuzp2_f32
return vuzp2_f32(a, b);
// CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
// CHECK: {{ins v[0-9]+.s\[0\], v[0-9]+.s\[1\]|zip2 v0.2s, v0.2s, v1.2s}}
}
float32x4_t test_vuzp2q_f32(float32x4_t a, float32x4_t b) {
@ -231,7 +233,7 @@ float32x4_t test_vuzp2q_f32(float32x4_t a, float32x4_t b) {
float64x2_t test_vuzp2q_f64(float64x2_t a, float64x2_t b) {
// CHECK-LABEL: test_vuzp2q_f64
return vuzp2q_f64(a, b);
// CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
// CHECK: {{ins v[0-9]+.d\[0\], v[0-9]+.d\[1\]|zip2 v0.2d, v0.2d, v1.2d}}
}
poly8x8_t test_vuzp2_p8(poly8x8_t a, poly8x8_t b) {
@ -285,7 +287,7 @@ int16x8_t test_vzip1q_s16(int16x8_t a, int16x8_t b) {
int32x2_t test_vzip1_s32(int32x2_t a, int32x2_t b) {
// CHECK-LABEL: test_vzip1_s32
return vzip1_s32(a, b);
// CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
// CHECK: {{ins v[0-9]+.s\[1\], v[0-9]+.s\[0\]|zip1 v0.2s, v0.2s, v1.2s}}
}
int32x4_t test_vzip1q_s32(int32x4_t a, int32x4_t b) {
@ -297,7 +299,7 @@ int32x4_t test_vzip1q_s32(int32x4_t a, int32x4_t b) {
int64x2_t test_vzip1q_s64(int64x2_t a, int64x2_t b) {
// CHECK-LABEL: test_vzip1q_s64
return vzip1q_s64(a, b);
// CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
// CHECK: {{ins v[0-9]+.d\[1\], v[0-9]+.d\[0\]|zip1 v0.2d, v0.2d, v1.2d}}
}
uint8x8_t test_vzip1_u8(uint8x8_t a, uint8x8_t b) {
@ -327,7 +329,7 @@ uint16x8_t test_vzip1q_u16(uint16x8_t a, uint16x8_t b) {
uint32x2_t test_vzip1_u32(uint32x2_t a, uint32x2_t b) {
// CHECK-LABEL: test_vzip1_u32
return vzip1_u32(a, b);
// CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
// CHECK: {{ins v[0-9]+.s\[1\], v[0-9]+.s\[0\]|zip1 v0.2s, v0.2s, v1.2s}}
}
uint32x4_t test_vzip1q_u32(uint32x4_t a, uint32x4_t b) {
@ -339,13 +341,13 @@ uint32x4_t test_vzip1q_u32(uint32x4_t a, uint32x4_t b) {
uint64x2_t test_vzip1q_u64(uint64x2_t a, uint64x2_t b) {
// CHECK-LABEL: test_vzip1q_u64
return vzip1q_u64(a, b);
// CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
// CHECK: {{ins v[0-9]+.d\[1\], v[0-9]+.d\[0\]|zip1 v0.2d, v0.2d, v1.2d}}
}
float32x2_t test_vzip1_f32(float32x2_t a, float32x2_t b) {
// CHECK-LABEL: test_vzip1_f32
return vzip1_f32(a, b);
// CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
// CHECK: {{ins v[0-9]+.s\[1\], v[0-9]+.s\[0\]|zip1 v0.2s, v0.2s, v1.2s}}
}
float32x4_t test_vzip1q_f32(float32x4_t a, float32x4_t b) {
@ -357,7 +359,7 @@ float32x4_t test_vzip1q_f32(float32x4_t a, float32x4_t b) {
float64x2_t test_vzip1q_f64(float64x2_t a, float64x2_t b) {
// CHECK-LABEL: test_vzip1q_f64
return vzip1q_f64(a, b);
// CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
// CHECK: {{ins v[0-9]+.d\[1\], v[0-9]+.d\[0\]|zip1 v0.2d, v0.2d, v1.2d}}
}
poly8x8_t test_vzip1_p8(poly8x8_t a, poly8x8_t b) {
@ -411,7 +413,7 @@ int16x8_t test_vzip2q_s16(int16x8_t a, int16x8_t b) {
int32x2_t test_vzip2_s32(int32x2_t a, int32x2_t b) {
// CHECK-LABEL: test_vzip2_s32
return vzip2_s32(a, b);
// CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
// CHECK: {{ins v[0-9]+.s\[0\], v[0-9]+.s\[1\]|zip2 v0.2s, v0.2s, v1.2s}}
}
int32x4_t test_vzip2q_s32(int32x4_t a, int32x4_t b) {
@ -423,7 +425,7 @@ int32x4_t test_vzip2q_s32(int32x4_t a, int32x4_t b) {
int64x2_t test_vzip2q_s64(int64x2_t a, int64x2_t b) {
// CHECK-LABEL: test_vzip2q_s64
return vzip2q_s64(a, b);
// CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
// CHECK: {{ins v[0-9]+.d\[0\], v[0-9]+.d\[1\]|zip2 v0.2d, v0.2d, v1.2d}}
}
uint8x8_t test_vzip2_u8(uint8x8_t a, uint8x8_t b) {
@ -453,7 +455,7 @@ uint16x8_t test_vzip2q_u16(uint16x8_t a, uint16x8_t b) {
uint32x2_t test_vzip2_u32(uint32x2_t a, uint32x2_t b) {
// CHECK-LABEL: test_vzip2_u32
return vzip2_u32(a, b);
// CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
// CHECK: {{ins v[0-9]+.s\[0\], v[0-9]+.s\[1\]|zip2 v0.2s, v0.2s, v1.2s}}
}
uint32x4_t test_vzip2q_u32(uint32x4_t a, uint32x4_t b) {
@ -465,13 +467,13 @@ uint32x4_t test_vzip2q_u32(uint32x4_t a, uint32x4_t b) {
uint64x2_t test_vzip2q_u64(uint64x2_t a, uint64x2_t b) {
// CHECK-LABEL: test_vzip2q_u64
return vzip2q_u64(a, b);
// CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
// CHECK: {{ins v[0-9]+.d\[0\], v[0-9]+.d\[1\]|zip2 v0.2d, v0.2d, v1.2d}}
}
float32x2_t test_vzip2_f32(float32x2_t a, float32x2_t b) {
// CHECK-LABEL: test_vzip2_f32
return vzip2_f32(a, b);
// CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
// CHECK: {{ins v[0-9]+.s\[0\], v[0-9]+.s\[1\]|zip2 v0.2s, v0.2s, v1.2s}}
}
float32x4_t test_vzip2q_f32(float32x4_t a, float32x4_t b) {
@ -483,7 +485,7 @@ float32x4_t test_vzip2q_f32(float32x4_t a, float32x4_t b) {
float64x2_t test_vzip2q_f64(float64x2_t a, float64x2_t b) {
// CHECK-LABEL: test_vzip2q_f64
return vzip2q_f64(a, b);
// CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
// CHECK: {{ins v[0-9]+.d\[0\], v[0-9]+.d\[1\]|zip2 v0.2d, v0.2d, v1.2d}}
}
poly8x8_t test_vzip2_p8(poly8x8_t a, poly8x8_t b) {
@ -537,7 +539,7 @@ int16x8_t test_vtrn1q_s16(int16x8_t a, int16x8_t b) {
int32x2_t test_vtrn1_s32(int32x2_t a, int32x2_t b) {
// CHECK-LABEL: test_vtrn1_s32
return vtrn1_s32(a, b);
// CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
// CHECK: {{ins v[0-9]+.s\[1\], v[0-9]+.s\[0\]|zip1 v0.2s, v0.2s, v1.2s}}
}
int32x4_t test_vtrn1q_s32(int32x4_t a, int32x4_t b) {
@ -549,7 +551,7 @@ int32x4_t test_vtrn1q_s32(int32x4_t a, int32x4_t b) {
int64x2_t test_vtrn1q_s64(int64x2_t a, int64x2_t b) {
// CHECK-LABEL: test_vtrn1q_s64
return vtrn1q_s64(a, b);
// CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
// CHECK: {{ins v[0-9]+.d\[1\], v[0-9]+.d\[0\]|zip1 v0.2d, v0.2d, v1.2d}}
}
uint8x8_t test_vtrn1_u8(uint8x8_t a, uint8x8_t b) {
@ -579,7 +581,7 @@ uint16x8_t test_vtrn1q_u16(uint16x8_t a, uint16x8_t b) {
uint32x2_t test_vtrn1_u32(uint32x2_t a, uint32x2_t b) {
// CHECK-LABEL: test_vtrn1_u32
return vtrn1_u32(a, b);
// CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
// CHECK: {{ins v[0-9]+.s\[1\], v[0-9]+.s\[0\]|zip1 v0.2s, v0.2s, v1.2s}}
}
uint32x4_t test_vtrn1q_u32(uint32x4_t a, uint32x4_t b) {
@ -591,13 +593,13 @@ uint32x4_t test_vtrn1q_u32(uint32x4_t a, uint32x4_t b) {
uint64x2_t test_vtrn1q_u64(uint64x2_t a, uint64x2_t b) {
// CHECK-LABEL: test_vtrn1q_u64
return vtrn1q_u64(a, b);
// CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
// CHECK: {{ins v[0-9]+.d\[1\], v[0-9]+.d\[0\]|zip1 v0.2d, v0.2d, v1.2d}}
}
float32x2_t test_vtrn1_f32(float32x2_t a, float32x2_t b) {
// CHECK-LABEL: test_vtrn1_f32
return vtrn1_f32(a, b);
// CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
// CHECK: {{ins v[0-9]+.s\[1\], v[0-9]+.s\[0\]|zip1 v0.2s, v0.2s, v1.2s}}
}
float32x4_t test_vtrn1q_f32(float32x4_t a, float32x4_t b) {
@ -609,7 +611,7 @@ float32x4_t test_vtrn1q_f32(float32x4_t a, float32x4_t b) {
float64x2_t test_vtrn1q_f64(float64x2_t a, float64x2_t b) {
// CHECK-LABEL: test_vtrn1q_f64
return vtrn1q_f64(a, b);
// CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
// CHECK: {{ins v[0-9]+.d\[1\], v[0-9]+.d\[0\]|zip1 v0.2d, v0.2d, v1.2d}}
}
poly8x8_t test_vtrn1_p8(poly8x8_t a, poly8x8_t b) {
@ -663,7 +665,7 @@ int16x8_t test_vtrn2q_s16(int16x8_t a, int16x8_t b) {
int32x2_t test_vtrn2_s32(int32x2_t a, int32x2_t b) {
// CHECK-LABEL: test_vtrn2_s32
return vtrn2_s32(a, b);
// CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
// CHECK: {{ins v[0-9]+.s\[0\], v[0-9]+.s\[1\]|zip2 v0.2s, v0.2s, v1.2s}}
}
int32x4_t test_vtrn2q_s32(int32x4_t a, int32x4_t b) {
@ -675,7 +677,7 @@ int32x4_t test_vtrn2q_s32(int32x4_t a, int32x4_t b) {
int64x2_t test_vtrn2q_s64(int64x2_t a, int64x2_t b) {
// CHECK-LABEL: test_vtrn2q_s64
return vtrn2q_s64(a, b);
// CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
// CHECK: {{ins v[0-9]+.d\[0\], v[0-9]+.d\[1\]|zip2 v0.2d, v0.2d, v1.2d}}
}
uint8x8_t test_vtrn2_u8(uint8x8_t a, uint8x8_t b) {
@ -705,7 +707,7 @@ uint16x8_t test_vtrn2q_u16(uint16x8_t a, uint16x8_t b) {
uint32x2_t test_vtrn2_u32(uint32x2_t a, uint32x2_t b) {
// CHECK-LABEL: test_vtrn2_u32
return vtrn2_u32(a, b);
// CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
// CHECK: {{ins v[0-9]+.s\[0\], v[0-9]+.s\[1\]|zip2 v0.2s, v0.2s, v1.2s}}
}
uint32x4_t test_vtrn2q_u32(uint32x4_t a, uint32x4_t b) {
@ -717,13 +719,13 @@ uint32x4_t test_vtrn2q_u32(uint32x4_t a, uint32x4_t b) {
uint64x2_t test_vtrn2q_u64(uint64x2_t a, uint64x2_t b) {
// CHECK-LABEL: test_vtrn2q_u64
return vtrn2q_u64(a, b);
// CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
// CHECK: {{ins v[0-9]+.d\[0\], v[0-9]+.d\[1\]|zip2 v0.2d, v0.2d, v1.2d}}
}
float32x2_t test_vtrn2_f32(float32x2_t a, float32x2_t b) {
// CHECK-LABEL: test_vtrn2_f32
return vtrn2_f32(a, b);
// CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
// CHECK: {{ins v[0-9]+.s\[0\], v[0-9]+.s\[1\]|zip2 v0.2s, v0.2s, v1.2s}}
}
float32x4_t test_vtrn2q_f32(float32x4_t a, float32x4_t b) {
@ -735,7 +737,7 @@ float32x4_t test_vtrn2q_f32(float32x4_t a, float32x4_t b) {
float64x2_t test_vtrn2q_f64(float64x2_t a, float64x2_t b) {
// CHECK-LABEL: test_vtrn2q_f64
return vtrn2q_f64(a, b);
// CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
// CHECK: {{ins v[0-9]+.d\[0\], v[0-9]+.d\[1\]|zip2 v0.2d, v0.2d, v1.2d}}
}
poly8x8_t test_vtrn2_p8(poly8x8_t a, poly8x8_t b) {
@ -778,8 +780,8 @@ int16x4x2_t test_vuzp_s16(int16x4_t a, int16x4_t b) {
int32x2x2_t test_vuzp_s32(int32x2_t a, int32x2_t b) {
// CHECK-LABEL: test_vuzp_s32
return vuzp_s32(a, b);
// CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
// CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
// CHECK: {{ins v[0-9]+.s\[1\], v[0-9]+.s\[0\]|zip1 v2.2s, v0.2s, v1.2s}}
// CHECK: {{ins v[0-9]+.s\[0\], v[0-9]+.s\[1\]|zip2 v1.2s, v0.2s, v1.2s}}
}
uint8x8x2_t test_vuzp_u8(uint8x8_t a, uint8x8_t b) {
// CHECK-LABEL: test_vuzp_u8
@ -796,14 +798,14 @@ uint16x4x2_t test_vuzp_u16(uint16x4_t a, uint16x4_t b) {
uint32x2x2_t test_vuzp_u32(uint32x2_t a, uint32x2_t b) {
// CHECK-LABEL: test_vuzp_u32
return vuzp_u32(a, b);
// CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
// CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
// CHECK: {{ins v[0-9]+.s\[1\], v[0-9]+.s\[0\]|zip1 v2.2s, v0.2s, v1.2s}}
// CHECK: {{ins v[0-9]+.s\[0\], v[0-9]+.s\[1\]|zip2 v1.2s, v0.2s, v1.2s}}
}
float32x2x2_t test_vuzp_f32(float32x2_t a, float32x2_t b) {
// CHECK-LABEL: test_vuzp_f32
return vuzp_f32(a, b);
// CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
// CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
// CHECK: {{ins v[0-9]+.s\[1\], v[0-9]+.s\[0\]|zip1 v2.2s, v0.2s, v1.2s}}
// CHECK: {{ins v[0-9]+.s\[0\], v[0-9]+.s\[1\]|zip2 v1.2s, v0.2s, v1.2s}}
}
poly8x8x2_t test_vuzp_p8(poly8x8_t a, poly8x8_t b) {
// CHECK-LABEL: test_vuzp_p8
@ -888,8 +890,8 @@ int16x4x2_t test_vzip_s16(int16x4_t a, int16x4_t b) {
int32x2x2_t test_vzip_s32(int32x2_t a, int32x2_t b) {
// CHECK-LABEL: test_vzip_s32
return vzip_s32(a, b);
// CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
// CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
// CHECK: {{ins v[0-9]+.s\[1\], v[0-9]+.s\[0\]|zip1 v2.2s, v0.2s, v1.2s}}
// CHECK: {{ins v[0-9]+.s\[0\], v[0-9]+.s\[1\]|zip2 v1.2s, v0.2s, v1.2s}}
}
uint8x8x2_t test_vzip_u8(uint8x8_t a, uint8x8_t b) {
// CHECK-LABEL: test_vzip_u8
@ -906,14 +908,14 @@ uint16x4x2_t test_vzip_u16(uint16x4_t a, uint16x4_t b) {
uint32x2x2_t test_vzip_u32(uint32x2_t a, uint32x2_t b) {
// CHECK-LABEL: test_vzip_u32
return vzip_u32(a, b);
// CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
// CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
// CHECK: {{ins v[0-9]+.s\[1\], v[0-9]+.s\[0\]|zip1 v2.2s, v0.2s, v1.2s}}
// CHECK: {{ins v[0-9]+.s\[0\], v[0-9]+.s\[1\]|zip2 v1.2s, v0.2s, v1.2s}}
}
float32x2x2_t test_vzip_f32(float32x2_t a, float32x2_t b) {
// CHECK-LABEL: test_vzip_f32
return vzip_f32(a, b);
// CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
// CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
// CHECK: {{ins v[0-9]+.s\[1\], v[0-9]+.s\[0\]|zip1 v2.2s, v0.2s, v1.2s}}
// CHECK: {{ins v[0-9]+.s\[0\], v[0-9]+.s\[1\]|zip2 v1.2s, v0.2s, v1.2s}}
}
poly8x8x2_t test_vzip_p8(poly8x8_t a, poly8x8_t b) {
// CHECK-LABEL: test_vzip_p8
@ -998,8 +1000,8 @@ int16x4x2_t test_vtrn_s16(int16x4_t a, int16x4_t b) {
int32x2x2_t test_vtrn_s32(int32x2_t a, int32x2_t b) {
// CHECK-LABEL: test_vtrn_s32
return vtrn_s32(a, b);
// CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
// CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
// CHECK: {{ins v[0-9]+.s\[1\], v[0-9]+.s\[0\]|zip1 v2.2s, v0.2s, v1.2s}}
// CHECK: {{ins v[0-9]+.s\[0\], v[0-9]+.s\[1\]|zip2 v1.2s, v0.2s, v1.2s}}
}
uint8x8x2_t test_vtrn_u8(uint8x8_t a, uint8x8_t b) {
// CHECK-LABEL: test_vtrn_u8
@ -1016,14 +1018,14 @@ uint16x4x2_t test_vtrn_u16(uint16x4_t a, uint16x4_t b) {
uint32x2x2_t test_vtrn_u32(uint32x2_t a, uint32x2_t b) {
// CHECK-LABEL: test_vtrn_u32
return vtrn_u32(a, b);
// CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
// CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
// CHECK: {{ins v[0-9]+.s\[1\], v[0-9]+.s\[0\]|zip1 v2.2s, v0.2s, v1.2s}}
// CHECK: {{ins v[0-9]+.s\[0\], v[0-9]+.s\[1\]|zip2 v1.2s, v0.2s, v1.2s}}
}
float32x2x2_t test_vtrn_f32(float32x2_t a, float32x2_t b) {
// CHECK-LABEL: test_vtrn_f32
return vtrn_f32(a, b);
// CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
// CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
// CHECK: {{ins v[0-9]+.s\[1\], v[0-9]+.s\[0\]|zip1 v2.2s, v0.2s, v1.2s}}
// CHECK: {{ins v[0-9]+.s\[0\], v[0-9]+.s\[1\]|zip2 v1.2s, v0.2s, v1.2s}}
}
poly8x8x2_t test_vtrn_p8(poly8x8_t a, poly8x8_t b) {
// CHECK-LABEL: test_vtrn_p8

View File

@ -1,6 +1,9 @@
// REQUIRES: aarch64-registered-target
// REQUIRES: arm64-registered-target
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple arm64-none-linux-gnu \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
#include <arm_neon.h>
@ -40,14 +43,14 @@ float64_t test_vdupd_laneq_f64(float64x2_t a) {
// CHECK-LABEL: test_vdupb_lane_s8
int8_t test_vdupb_lane_s8(int8x8_t a) {
return vdupb_lane_s8(a, 7);
// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.b[7]
// CHECK: {{umov|smov}} {{w[0-9]+}}, {{v[0-9]+}}.b[7]
}
// CHECK-LABEL: test_vduph_lane_s16
int16_t test_vduph_lane_s16(int16x4_t a) {
return vduph_lane_s16(a, 3);
// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.h[3]
// CHECK: {{umov|smov}} {{w[0-9]+}}, {{v[0-9]+}}.h[3]
}
@ -95,14 +98,14 @@ uint64_t test_vdupd_lane_u64(uint64x1_t a) {
// CHECK-LABEL: test_vdupb_laneq_s8
int8_t test_vdupb_laneq_s8(int8x16_t a) {
return vdupb_laneq_s8(a, 15);
// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.b[15]
// CHECK: {{umov|smov}} {{w[0-9]+}}, {{v[0-9]+}}.b[15]
}
// CHECK-LABEL: test_vduph_laneq_s16
int16_t test_vduph_laneq_s16(int16x8_t a) {
return vduph_laneq_s16(a, 7);
// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.h[7]
// CHECK: {{umov|smov}} {{w[0-9]+}}, {{v[0-9]+}}.h[7]
}

View File

@ -1,6 +1,9 @@
// REQUIRES: aarch64-registered-target
// REQUIRES: arm64-registered-target
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple arm64-none-linux-gnu \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
// Test new aarch64 intrinsics and types
@ -16,7 +19,7 @@ float32_t test_vmuls_lane_f32(float32_t a, float32x2_t b) {
float64_t test_vmuld_lane_f64(float64_t a, float64x1_t b) {
// CHECK-LABEL: test_vmuld_lane_f64
return vmuld_lane_f64(a, b, 0);
// CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0]
// CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+.d\[0\]|d[0-9]+}}
}
float32_t test_vmuls_laneq_f32(float32_t a, float32x4_t b) {
@ -34,7 +37,7 @@ float64_t test_vmuld_laneq_f64(float64_t a, float64x2_t b) {
float64x1_t test_vmul_n_f64(float64x1_t a, float64_t b) {
// CHECK-LABEL: test_vmul_n_f64
return vmul_n_f64(a, b);
// CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0]
// CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+.d\[0\]|d[0-9]+}}
}
float32_t test_vmulxs_lane_f32(float32_t a, float32x2_t b) {
@ -52,7 +55,7 @@ float32_t test_vmulxs_laneq_f32(float32_t a, float32x4_t b) {
float64_t test_vmulxd_lane_f64(float64_t a, float64x1_t b) {
// CHECK-LABEL: test_vmulxd_lane_f64
return vmulxd_lane_f64(a, b, 0);
// CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0]
// CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+.d\[0\]|d[0-9]+}}
}
float64_t test_vmulxd_laneq_f64(float64_t a, float64x2_t b) {
@ -64,7 +67,7 @@ float64_t test_vmulxd_laneq_f64(float64_t a, float64x2_t b) {
// CHECK-LABEL: test_vmulx_lane_f64
float64x1_t test_vmulx_lane_f64(float64x1_t a, float64x1_t b) {
return vmulx_lane_f64(a, b, 0);
// CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0]
// CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+.d\[0\]|d[0-9]+}}
}
@ -90,7 +93,7 @@ float32_t test_vfmas_lane_f32(float32_t a, float32_t b, float32x2_t c) {
// CHECK-LABEL: test_vfmad_lane_f64
float64_t test_vfmad_lane_f64(float64_t a, float64_t b, float64x1_t c) {
return vfmad_lane_f64(a, b, c, 0);
// CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0]
// CHECK: {{fmla|fmadd}} {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+.d\[0\]|d[0-9]+}}
}
// CHECK-LABEL: test_vfmad_laneq_f64
@ -108,13 +111,13 @@ float32_t test_vfmss_lane_f32(float32_t a, float32_t b, float32x2_t c) {
// CHECK-LABEL: test_vfma_lane_f64
float64x1_t test_vfma_lane_f64(float64x1_t a, float64x1_t b, float64x1_t v) {
return vfma_lane_f64(a, b, v, 0);
// CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0]
// CHECK: {{fmla|fmadd}} {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+.d\[0\]|d[0-9]+}}
}
// CHECK-LABEL: test_vfms_lane_f64
float64x1_t test_vfms_lane_f64(float64x1_t a, float64x1_t b, float64x1_t v) {
return vfms_lane_f64(a, b, v, 0);
// CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0]
// CHECK: {{fmls|fmsub}} {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+.d\[0\]|d[0-9]+}}
}
// CHECK-LABEL: test_vfma_laneq_f64
@ -132,7 +135,7 @@ float64x1_t test_vfms_laneq_f64(float64x1_t a, float64x1_t b, float64x2_t v) {
// CHECK-LABEL: test_vqdmullh_lane_s16
int32_t test_vqdmullh_lane_s16(int16_t a, int16x4_t b) {
return vqdmullh_lane_s16(a, b, 3);
// CHECK: sqdmull {{s[0-9]+}}, {{h[0-9]+}}, {{v[0-9]+}}.h[3]
// CHECK: sqdmull {{s[0-9]+|v[0-9]+.4s}}, {{h[0-9]+|v[0-9].4h}}, {{v[0-9]+}}.h[3]
}
// CHECK-LABEL: test_vqdmulls_lane_s32
@ -144,7 +147,7 @@ int64_t test_vqdmulls_lane_s32(int32_t a, int32x2_t b) {
// CHECK-LABEL: test_vqdmullh_laneq_s16
int32_t test_vqdmullh_laneq_s16(int16_t a, int16x8_t b) {
return vqdmullh_laneq_s16(a, b, 7);
// CHECK: sqdmull {{s[0-9]+}}, {{h[0-9]+}}, {{v[0-9]+}}.h[7]
// CHECK: sqdmull {{s[0-9]+|v[0-9]+.4s}}, {{h[0-9]+|v[0-9]+.4h}}, {{v[0-9]+}}.h[7]
}
// CHECK-LABEL: test_vqdmulls_laneq_s32
@ -156,7 +159,7 @@ int64_t test_vqdmulls_laneq_s32(int32_t a, int32x4_t b) {
// CHECK-LABEL: test_vqdmulhh_lane_s16
int16_t test_vqdmulhh_lane_s16(int16_t a, int16x4_t b) {
return vqdmulhh_lane_s16(a, b, 3);
// CHECK: sqdmulh {{h[0-9]+}}, {{h[0-9]+}}, {{v[0-9]+}}.h[3]
// CHECK: sqdmulh {{h[0-9]+|v[0-9]+.4h}}, {{h[0-9]+|v[0-9]+.4h}}, {{v[0-9]+}}.h[3]
}
// CHECK-LABEL: test_vqdmulhs_lane_s32
@ -169,7 +172,7 @@ int32_t test_vqdmulhs_lane_s32(int32_t a, int32x2_t b) {
// CHECK-LABEL: test_vqdmulhh_laneq_s16
int16_t test_vqdmulhh_laneq_s16(int16_t a, int16x8_t b) {
return vqdmulhh_laneq_s16(a, b, 7);
// CHECK: sqdmulh {{h[0-9]+}}, {{h[0-9]+}}, {{v[0-9]+}}.h[7]
// CHECK: sqdmulh {{h[0-9]+|v[0-9]+.4h}}, {{h[0-9]+|v[0-9]+.4h}}, {{v[0-9]+}}.h[7]
}
@ -182,7 +185,7 @@ int32_t test_vqdmulhs_laneq_s32(int32_t a, int32x4_t b) {
// CHECK-LABEL: test_vqrdmulhh_lane_s16
int16_t test_vqrdmulhh_lane_s16(int16_t a, int16x4_t b) {
return vqrdmulhh_lane_s16(a, b, 3);
// CHECK: sqrdmulh {{h[0-9]+}}, {{h[0-9]+}}, {{v[0-9]+}}.h[3]
// CHECK: sqrdmulh {{h[0-9]+|v[0-9]+.4h}}, {{h[0-9]+|v[0-9]+.4h}}, {{v[0-9]+}}.h[3]
}
// CHECK-LABEL: test_vqrdmulhs_lane_s32
@ -195,7 +198,7 @@ int32_t test_vqrdmulhs_lane_s32(int32_t a, int32x2_t b) {
// CHECK-LABEL: test_vqrdmulhh_laneq_s16
int16_t test_vqrdmulhh_laneq_s16(int16_t a, int16x8_t b) {
return vqrdmulhh_laneq_s16(a, b, 7);
// CHECK: sqrdmulh {{h[0-9]+}}, {{h[0-9]+}}, {{v[0-9]+}}.h[7]
// CHECK: sqrdmulh {{h[0-9]+|v[0-9]+.4h}}, {{h[0-9]+|v[0-9]+.4h}}, {{v[0-9]+}}.h[7]
}
@ -208,7 +211,7 @@ int32_t test_vqrdmulhs_laneq_s32(int32_t a, int32x4_t b) {
// CHECK-LABEL: test_vqdmlalh_lane_s16
int32_t test_vqdmlalh_lane_s16(int32_t a, int16_t b, int16x4_t c) {
return vqdmlalh_lane_s16(a, b, c, 3);
// CHECK: sqdmlal {{s[0-9]+}}, {{h[0-9]+}}, {{v[0-9]+}}.h[3]
// CHECK: sqdmlal {{s[0-9]+|v[0-9]+.4s}}, {{h[0-9]+|v[0-9]+.4h}}, {{v[0-9]+}}.h[3]
}
// CHECK-LABEL: test_vqdmlals_lane_s32
@ -220,7 +223,7 @@ int64_t test_vqdmlals_lane_s32(int64_t a, int32_t b, int32x2_t c) {
// CHECK-LABEL: test_vqdmlalh_laneq_s16
int32_t test_vqdmlalh_laneq_s16(int32_t a, int16_t b, int16x8_t c) {
return vqdmlalh_laneq_s16(a, b, c, 7);
// CHECK: sqdmlal {{s[0-9]+}}, {{h[0-9]+}}, {{v[0-9]+}}.h[7]
// CHECK: sqdmlal {{s[0-9]+|v[0-9]+.4s}}, {{h[0-9]+|v[0-9]+.4h}}, {{v[0-9]+}}.h[7]
}
// CHECK-LABEL: test_vqdmlals_laneq_s32
@ -232,7 +235,7 @@ int64_t test_vqdmlals_laneq_s32(int64_t a, int32_t b, int32x4_t c) {
// CHECK-LABEL: test_vqdmlslh_lane_s16
int32_t test_vqdmlslh_lane_s16(int32_t a, int16_t b, int16x4_t c) {
return vqdmlslh_lane_s16(a, b, c, 3);
// CHECK: sqdmlsl {{s[0-9]+}}, {{h[0-9]+}}, {{v[0-9]+}}.h[3]
// CHECK: sqdmlsl {{s[0-9]+|v[0-9]+.4s}}, {{h[0-9]+|v[0-9]+.4h}}, {{v[0-9]+}}.h[3]
}
// CHECK-LABEL: test_vqdmlsls_lane_s32
@ -244,7 +247,7 @@ int64_t test_vqdmlsls_lane_s32(int64_t a, int32_t b, int32x2_t c) {
// CHECK-LABEL: test_vqdmlslh_laneq_s16
int32_t test_vqdmlslh_laneq_s16(int32_t a, int16_t b, int16x8_t c) {
return vqdmlslh_laneq_s16(a, b, c, 7);
// CHECK: sqdmlsl {{s[0-9]+}}, {{h[0-9]+}}, {{v[0-9]+}}.h[7]
// CHECK: sqdmlsl {{s[0-9]+|v[0-9]+.4s}}, {{h[0-9]+|v[0-9]+.4h}}, {{v[0-9]+}}.h[7]
}
// CHECK-LABEL: test_vqdmlsls_laneq_s32
@ -262,11 +265,11 @@ float64x1_t test_vmulx_lane_f64_0() {
arg1 = vcreate_f64(UINT64_C(0x3fd6304bc43ab5c2));
arg2 = vcreate_f64(UINT64_C(0x3fee211e215aeef3));
result = vmulx_lane_f64(arg1, arg2, 0);
// CHECK: adrp x0
// CHECK: ldr d0, [x0,
// CHECK: adrp x0
// CHECK: ldr d1, [x0,
// CHECK: fmulx d0, d1, d0
// CHECK: adrp x[[ADDRLO:[0-9]+]]
// CHECK: ldr d0, [x[[ADDRLO]],
// CHECK: adrp x[[ADDRLO:[0-9]+]]
// CHECK: ldr d1, [x[[ADDRLO]],
// CHECK: fmulx d0, d1, d0
return result;
}
@ -281,10 +284,10 @@ float64x1_t test_vmulx_laneq_f64_2() {
arg2 = vcreate_f64(UINT64_C(0x3fee211e215aeef3));
arg3 = vcombine_f64(arg1, arg2);
result = vmulx_laneq_f64(arg1, arg3, 1);
// CHECK: adrp x0
// CHECK: ldr d0, [x0,
// CHECK: adrp x0
// CHECK: ldr d1, [x0,
// CHECK: fmulx d0, d1, d0
// CHECK: adrp x[[ADDRLO:[0-9]+]]
// CHECK: ldr d0, [x[[ADDRLO]],
// CHECK: adrp x[[ADDRLO:[0-9]+]]
// CHECK: ldr d1, [x[[ADDRLO]],
// CHECK: fmulx d0, d1, d0
return result;
}

View File

@ -1,6 +1,9 @@
// REQUIRES: aarch64-registered-target
// REQUIRES: arm64-registered-target
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
// RUN: -ffp-contract=fast -S -emit-llvm -O1 -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple arm64-none-linux-gnu \
// RUN: -ffp-contract=fast -S -emit-llvm -O1 -o - %s | FileCheck %s
#include <arm_neon.h>

View File

@ -1,6 +1,9 @@
// REQUIRES: aarch64-registered-target
// REQUIRES: arm64-registered-target
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple arm64-none-linux-gnu \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
// Test new aarch64 intrinsics and types
@ -9,81 +12,81 @@
int8x8_t test_vtbl1_s8(int8x8_t a, int8x8_t b) {
// CHECK-LABEL: test_vtbl1_s8
return vtbl1_s8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
int8x8_t test_vqtbl1_s8(int8x16_t a, int8x8_t b) {
// CHECK-LABEL: test_vqtbl1_s8
return vqtbl1_s8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
int8x8_t test_vtbl2_s8(int8x8x2_t a, int8x8_t b) {
// CHECK-LABEL: test_vtbl2_s8
return vtbl2_s8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
int8x8_t test_vqtbl2_s8(int8x16x2_t a, int8x8_t b) {
// CHECK-LABEL: test_vqtbl2_s8
return vqtbl2_s8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
int8x8_t test_vtbl3_s8(int8x8x3_t a, int8x8_t b) {
// CHECK-LABEL: test_vtbl3_s8
return vtbl3_s8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
int8x8_t test_vqtbl3_s8(int8x16x3_t a, int8x8_t b) {
// CHECK-LABEL: test_vqtbl3_s8
return vqtbl3_s8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
int8x8_t test_vtbl4_s8(int8x8x4_t a, int8x8_t b) {
// CHECK-LABEL: test_vtbl4_s8
return vtbl4_s8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
int8x8_t test_vqtbl4_s8(int8x16x4_t a, int8x8_t b) {
// CHECK-LABEL: test_vqtbl4_s8
return vqtbl4_s8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
int8x16_t test_vqtbl1q_s8(int8x16_t a, int8x16_t b) {
// CHECK-LABEL: test_vqtbl1q_s8
return vqtbl1q_s8(a, b);
// CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbl {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
int8x16_t test_vqtbl2q_s8(int8x16x2_t a, int8x16_t b) {
// CHECK-LABEL: test_vqtbl2q_s8
return vqtbl2q_s8(a, b);
// CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbl {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
int8x16_t test_vqtbl3q_s8(int8x16x3_t a, int8x16_t b) {
// CHECK-LABEL: test_vqtbl3q_s8
return vqtbl3q_s8(a, b);
// CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbl {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
int8x16_t test_vqtbl4q_s8(int8x16x4_t a, int8x16_t b) {
// CHECK-LABEL: test_vqtbl4q_s8
return vqtbl4q_s8(a, b);
// CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbl {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
int8x8_t test_vtbx1_s8(int8x8_t a, int8x8_t b, int8x8_t c) {
// CHECK-LABEL: test_vtbx1_s8
return vtbx1_s8(a, b, c);
// CHECK: movi {{v[0-9]+}}.8b, #0
// CHECK: movi {{v[0-9]+.8b|d[0-9]+}}, #0
// CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
// CHECK: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
// CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
}
@ -91,15 +94,15 @@ int8x8_t test_vtbx1_s8(int8x8_t a, int8x8_t b, int8x8_t c) {
int8x8_t test_vtbx2_s8(int8x8_t a, int8x8x2_t b, int8x8_t c) {
// CHECK-LABEL: test_vtbx2_s8
return vtbx2_s8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbx {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
int8x8_t test_vtbx3_s8(int8x8_t a, int8x8x3_t b, int8x8_t c) {
// CHECK-LABEL: test_vtbx3_s8
return vtbx3_s8(a, b, c);
// CHECK: movi {{v[0-9]+}}.8b, #0
// CHECK: movi {{v[0-9]+.8b|d[0-9]+}}, #0
// CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
// CHECK: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
// CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
}
@ -107,135 +110,135 @@ int8x8_t test_vtbx3_s8(int8x8_t a, int8x8x3_t b, int8x8_t c) {
int8x8_t test_vtbx4_s8(int8x8_t a, int8x8x4_t b, int8x8_t c) {
// CHECK-LABEL: test_vtbx4_s8
return vtbx4_s8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbx {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
int8x8_t test_vqtbx1_s8(int8x8_t a, int8x16_t b, int8x8_t c) {
// CHECK-LABEL: test_vqtbx1_s8
return vqtbx1_s8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbx {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
int8x8_t test_vqtbx2_s8(int8x8_t a, int8x16x2_t b, int8x8_t c) {
// CHECK-LABEL: test_vqtbx2_s8
return vqtbx2_s8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbx {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
int8x8_t test_vqtbx3_s8(int8x8_t a, int8x16x3_t b, int8x8_t c) {
// CHECK-LABEL: test_vqtbx3_s8
return vqtbx3_s8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbx {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
int8x8_t test_vqtbx4_s8(int8x8_t a, int8x16x4_t b, int8x8_t c) {
// CHECK-LABEL: test_vqtbx4_s8
return vqtbx4_s8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbx {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
int8x16_t test_vqtbx1q_s8(int8x16_t a, int8x16_t b, int8x16_t c) {
// CHECK-LABEL: test_vqtbx1q_s8
return vqtbx1q_s8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbx {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
int8x16_t test_vqtbx2q_s8(int8x16_t a, int8x16x2_t b, int8x16_t c) {
// CHECK-LABEL: test_vqtbx2q_s8
return vqtbx2q_s8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbx {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
int8x16_t test_vqtbx3q_s8(int8x16_t a, int8x16x3_t b, int8x16_t c) {
// CHECK-LABEL: test_vqtbx3q_s8
return vqtbx3q_s8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbx {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
int8x16_t test_vqtbx4q_s8(int8x16_t a, int8x16x4_t b, int8x16_t c) {
// CHECK-LABEL: test_vqtbx4q_s8
return vqtbx4q_s8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbx {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
uint8x8_t test_vtbl1_u8(uint8x8_t a, uint8x8_t b) {
// CHECK-LABEL: test_vtbl1_u8
return vtbl1_u8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
uint8x8_t test_vqtbl1_u8(uint8x16_t a, uint8x8_t b) {
// CHECK-LABEL: test_vqtbl1_u8
return vqtbl1_u8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
uint8x8_t test_vtbl2_u8(uint8x8x2_t a, uint8x8_t b) {
// CHECK-LABEL: test_vtbl2_u8
return vtbl2_u8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
uint8x8_t test_vqtbl2_u8(uint8x16x2_t a, uint8x8_t b) {
// CHECK-LABEL: test_vqtbl2_u8
return vqtbl2_u8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
uint8x8_t test_vtbl3_u8(uint8x8x3_t a, uint8x8_t b) {
// CHECK-LABEL: test_vtbl3_u8
return vtbl3_u8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
uint8x8_t test_vqtbl3_u8(uint8x16x3_t a, uint8x8_t b) {
// CHECK-LABEL: test_vqtbl3_u8
return vqtbl3_u8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
uint8x8_t test_vtbl4_u8(uint8x8x4_t a, uint8x8_t b) {
// CHECK-LABEL: test_vtbl4_u8
return vtbl4_u8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
uint8x8_t test_vqtbl4_u8(uint8x16x4_t a, uint8x8_t b) {
// CHECK-LABEL: test_vqtbl4_u8
return vqtbl4_u8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
uint8x16_t test_vqtbl1q_u8(uint8x16_t a, uint8x16_t b) {
// CHECK-LABEL: test_vqtbl1q_u8
return vqtbl1q_u8(a, b);
// CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbl {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
uint8x16_t test_vqtbl2q_u8(uint8x16x2_t a, uint8x16_t b) {
// CHECK-LABEL: test_vqtbl2q_u8
return vqtbl2q_u8(a, b);
// CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbl {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
uint8x16_t test_vqtbl3q_u8(uint8x16x3_t a, uint8x16_t b) {
// CHECK-LABEL: test_vqtbl3q_u8
return vqtbl3q_u8(a, b);
// CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbl {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
uint8x16_t test_vqtbl4q_u8(uint8x16x4_t a, uint8x16_t b) {
// CHECK-LABEL: test_vqtbl4q_u8
return vqtbl4q_u8(a, b);
// CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbl {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
uint8x8_t test_vtbx1_u8(uint8x8_t a, uint8x8_t b, uint8x8_t c) {
// CHECK-LABEL: test_vtbx1_u8
return vtbx1_u8(a, b, c);
// CHECK: movi {{v[0-9]+}}.8b, #0
// CHECK: movi {{v[0-9]+.8b|d[0-9]+}}, #0
// CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
// CHECK: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
// CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
}
@ -243,15 +246,15 @@ uint8x8_t test_vtbx1_u8(uint8x8_t a, uint8x8_t b, uint8x8_t c) {
uint8x8_t test_vtbx2_u8(uint8x8_t a, uint8x8x2_t b, uint8x8_t c) {
// CHECK-LABEL: test_vtbx2_u8
return vtbx2_u8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbx {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
uint8x8_t test_vtbx3_u8(uint8x8_t a, uint8x8x3_t b, uint8x8_t c) {
// CHECK-LABEL: test_vtbx3_u8
return vtbx3_u8(a, b, c);
// CHECK: movi {{v[0-9]+}}.8b, #0
// CHECK: movi {{v[0-9]+.8b|d[0-9]+}}, #0
// CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
// CHECK: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
// CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
}
@ -259,135 +262,135 @@ uint8x8_t test_vtbx3_u8(uint8x8_t a, uint8x8x3_t b, uint8x8_t c) {
uint8x8_t test_vtbx4_u8(uint8x8_t a, uint8x8x4_t b, uint8x8_t c) {
// CHECK-LABEL: test_vtbx4_u8
return vtbx4_u8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbx {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
uint8x8_t test_vqtbx1_u8(uint8x8_t a, uint8x16_t b, uint8x8_t c) {
// CHECK-LABEL: test_vqtbx1_u8
return vqtbx1_u8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbx {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
uint8x8_t test_vqtbx2_u8(uint8x8_t a, uint8x16x2_t b, uint8x8_t c) {
// CHECK-LABEL: test_vqtbx2_u8
return vqtbx2_u8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbx {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
uint8x8_t test_vqtbx3_u8(uint8x8_t a, uint8x16x3_t b, uint8x8_t c) {
// CHECK-LABEL: test_vqtbx3_u8
return vqtbx3_u8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbx {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
uint8x8_t test_vqtbx4_u8(uint8x8_t a, uint8x16x4_t b, uint8x8_t c) {
// CHECK-LABEL: test_vqtbx4_u8
return vqtbx4_u8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbx {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
uint8x16_t test_vqtbx1q_u8(uint8x16_t a, uint8x16_t b, uint8x16_t c) {
// CHECK-LABEL: test_vqtbx1q_u8
return vqtbx1q_u8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbx {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
uint8x16_t test_vqtbx2q_u8(uint8x16_t a, uint8x16x2_t b, uint8x16_t c) {
// CHECK-LABEL: test_vqtbx2q_u8
return vqtbx2q_u8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbx {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
uint8x16_t test_vqtbx3q_u8(uint8x16_t a, uint8x16x3_t b, uint8x16_t c) {
// CHECK-LABEL: test_vqtbx3q_u8
return vqtbx3q_u8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbx {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
uint8x16_t test_vqtbx4q_u8(uint8x16_t a, uint8x16x4_t b, uint8x16_t c) {
// CHECK-LABEL: test_vqtbx4q_u8
return vqtbx4q_u8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbx {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
poly8x8_t test_vtbl1_p8(poly8x8_t a, uint8x8_t b) {
// CHECK-LABEL: test_vtbl1_p8
return vtbl1_p8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
poly8x8_t test_vqtbl1_p8(poly8x16_t a, uint8x8_t b) {
// CHECK-LABEL: test_vqtbl1_p8
return vqtbl1_p8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
poly8x8_t test_vtbl2_p8(poly8x8x2_t a, uint8x8_t b) {
// CHECK-LABEL: test_vtbl2_p8
return vtbl2_p8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
poly8x8_t test_vqtbl2_p8(poly8x16x2_t a, uint8x8_t b) {
// CHECK-LABEL: test_vqtbl2_p8
return vqtbl2_p8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
poly8x8_t test_vtbl3_p8(poly8x8x3_t a, uint8x8_t b) {
// CHECK-LABEL: test_vtbl3_p8
return vtbl3_p8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
poly8x8_t test_vqtbl3_p8(poly8x16x3_t a, uint8x8_t b) {
// CHECK-LABEL: test_vqtbl3_p8
return vqtbl3_p8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
poly8x8_t test_vtbl4_p8(poly8x8x4_t a, uint8x8_t b) {
// CHECK-LABEL: test_vtbl4_p8
return vtbl4_p8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
poly8x8_t test_vqtbl4_p8(poly8x16x4_t a, uint8x8_t b) {
// CHECK-LABEL: test_vqtbl4_p8
return vqtbl4_p8(a, b);
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
poly8x16_t test_vqtbl1q_p8(poly8x16_t a, uint8x16_t b) {
// CHECK-LABEL: test_vqtbl1q_p8
return vqtbl1q_p8(a, b);
// CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbl {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
poly8x16_t test_vqtbl2q_p8(poly8x16x2_t a, uint8x16_t b) {
// CHECK-LABEL: test_vqtbl2q_p8
return vqtbl2q_p8(a, b);
// CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbl {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
poly8x16_t test_vqtbl3q_p8(poly8x16x3_t a, uint8x16_t b) {
// CHECK-LABEL: test_vqtbl3q_p8
return vqtbl3q_p8(a, b);
// CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbl {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
poly8x16_t test_vqtbl4q_p8(poly8x16x4_t a, uint8x16_t b) {
// CHECK-LABEL: test_vqtbl4q_p8
return vqtbl4q_p8(a, b);
// CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbl {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
poly8x8_t test_vtbx1_p8(poly8x8_t a, poly8x8_t b, uint8x8_t c) {
// CHECK-LABEL: test_vtbx1_p8
return vtbx1_p8(a, b, c);
// CHECK: movi {{v[0-9]+}}.8b, #0
// CHECK: movi {{v[0-9]+.8b|d[0-9]+}}, #0
// CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
// CHECK: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
// CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
}
@ -395,15 +398,15 @@ poly8x8_t test_vtbx1_p8(poly8x8_t a, poly8x8_t b, uint8x8_t c) {
poly8x8_t test_vtbx2_p8(poly8x8_t a, poly8x8x2_t b, uint8x8_t c) {
// CHECK-LABEL: test_vtbx2_p8
return vtbx2_p8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbx {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
poly8x8_t test_vtbx3_p8(poly8x8_t a, poly8x8x3_t b, uint8x8_t c) {
// CHECK-LABEL: test_vtbx3_p8
return vtbx3_p8(a, b, c);
// CHECK: movi {{v[0-9]+}}.8b, #0
// CHECK: movi {{v[0-9]+.8b|d[0-9]+}}, #0
// CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
// CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbl {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
// CHECK: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
// CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
}
@ -411,53 +414,53 @@ poly8x8_t test_vtbx3_p8(poly8x8_t a, poly8x8x3_t b, uint8x8_t c) {
poly8x8_t test_vtbx4_p8(poly8x8_t a, poly8x8x4_t b, uint8x8_t c) {
// CHECK-LABEL: test_vtbx4_p8
return vtbx4_p8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbx {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
poly8x8_t test_vqtbx1_p8(poly8x8_t a, uint8x16_t b, uint8x8_t c) {
// CHECK-LABEL: test_vqtbx1_p8
return vqtbx1_p8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbx {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
poly8x8_t test_vqtbx2_p8(poly8x8_t a, poly8x16x2_t b, uint8x8_t c) {
// CHECK-LABEL: test_vqtbx2_p8
return vqtbx2_p8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbx {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
poly8x8_t test_vqtbx3_p8(poly8x8_t a, poly8x16x3_t b, uint8x8_t c) {
// CHECK-LABEL: test_vqtbx3_p8
return vqtbx3_p8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbx {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
poly8x8_t test_vqtbx4_p8(poly8x8_t a, poly8x16x4_t b, uint8x8_t c) {
// CHECK-LABEL: test_vqtbx4_p8
return vqtbx4_p8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
// CHECK: tbx {{v[0-9]+}}.8b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.8b
}
poly8x16_t test_vqtbx1q_p8(poly8x16_t a, uint8x16_t b, uint8x16_t c) {
// CHECK-LABEL: test_vqtbx1q_p8
return vqtbx1q_p8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbx {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
poly8x16_t test_vqtbx2q_p8(poly8x16_t a, poly8x16x2_t b, uint8x16_t c) {
// CHECK-LABEL: test_vqtbx2q_p8
return vqtbx2q_p8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbx {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
poly8x16_t test_vqtbx3q_p8(poly8x16_t a, poly8x16x3_t b, uint8x16_t c) {
// CHECK-LABEL: test_vqtbx3q_p8
return vqtbx3q_p8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbx {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}
poly8x16_t test_vqtbx4q_p8(poly8x16_t a, poly8x16x4_t b, uint8x16_t c) {
// CHECK-LABEL: test_vqtbx4q_p8
return vqtbx4q_p8(a, b, c);
// CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
// CHECK: tbx {{v[0-9]+}}.16b, {{{ ?v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b ?}}}, {{v[0-9]+}}.16b
}

View File

@ -1,6 +1,8 @@
// REQUIRES: aarch64-registered-target
// REQUIRES: arm64-registered-target
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
// RUN: -S -O3 -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple arm64-none-linux-gnu -S -O3 -o - %s | FileCheck %s
// Test new aarch64 intrinsics and types

View File

@ -1,176 +1,193 @@
// REQUIRES: aarch64-registered-target
// REQUIRES: arm64-registered-target
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s --check-prefix CHECK-COMMON --check-prefix CHECK-AARCH64
// RUN: %clang_cc1 -triple arm64-none-linux-gnu \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s --check-prefix CHECK-COMMON --check-prefix CHECK-ARM64
// Test new aarch64 intrinsics and types
#include <arm_neon.h>
int8x8_t test_vget_high_s8(int8x16_t a) {
// CHECK-LABEL: test_vget_high_s8:
// CHECK-COMMON-LABEL: test_vget_high_s8:
return vget_high_s8(a);
// CHECK: dup d0, {{v[0-9]+}}.d[1]
// CHECK-AARCH64: dup d0, {{v[0-9]+}}.d[1]
// CHECK-ARM64: ext v0.16b, v0.16b, v0.16b, #8
}
int16x4_t test_vget_high_s16(int16x8_t a) {
// CHECK-LABEL: test_vget_high_s16:
// CHECK-COMMON-LABEL: test_vget_high_s16:
return vget_high_s16(a);
// CHECK: dup d0, {{v[0-9]+}}.d[1]
// CHECK-AARCH64: dup d0, {{v[0-9]+}}.d[1]
// CHECK-ARM64: ext v0.16b, v0.16b, v0.16b, #8
}
int32x2_t test_vget_high_s32(int32x4_t a) {
// CHECK-LABEL: test_vget_high_s32:
// CHECK-COMMON-LABEL: test_vget_high_s32:
return vget_high_s32(a);
// CHECK: dup d0, {{v[0-9]+}}.d[1]
// CHECK-AARCH64: dup d0, {{v[0-9]+}}.d[1]
// CHECK-ARM64: ext v0.16b, v0.16b, v0.16b, #8
}
int64x1_t test_vget_high_s64(int64x2_t a) {
// CHECK-LABEL: test_vget_high_s64:
// CHECK-COMMON-LABEL: test_vget_high_s64:
return vget_high_s64(a);
// CHECK: dup d0, {{v[0-9]+}}.d[1]
// CHECK-AARCH64: dup d0, {{v[0-9]+}}.d[1]
// CHECK-ARM64: ext v0.16b, v0.16b, v0.16b, #8
}
uint8x8_t test_vget_high_u8(uint8x16_t a) {
// CHECK-LABEL: test_vget_high_u8:
// CHECK-COMMON-LABEL: test_vget_high_u8:
return vget_high_u8(a);
// CHECK: dup d0, {{v[0-9]+}}.d[1]
// CHECK-AARCH64: dup d0, {{v[0-9]+}}.d[1]
// CHECK-ARM64: ext v0.16b, v0.16b, v0.16b, #8
}
uint16x4_t test_vget_high_u16(uint16x8_t a) {
// CHECK-LABEL: test_vget_high_u16:
// CHECK-COMMON-LABEL: test_vget_high_u16:
return vget_high_u16(a);
// CHECK: dup d0, {{v[0-9]+}}.d[1]
// CHECK-AARCH64: dup d0, {{v[0-9]+}}.d[1]
// CHECK-ARM64: ext v0.16b, v0.16b, v0.16b, #8
}
uint32x2_t test_vget_high_u32(uint32x4_t a) {
// CHECK-LABEL: test_vget_high_u32:
// CHECK-COMMON-LABEL: test_vget_high_u32:
return vget_high_u32(a);
// CHECK: dup d0, {{v[0-9]+}}.d[1]
// CHECK-AARCH64: dup d0, {{v[0-9]+}}.d[1]
// CHECK-ARM64: ext v0.16b, v0.16b, v0.16b, #8
}
uint64x1_t test_vget_high_u64(uint64x2_t a) {
// CHECK-LABEL: test_vget_high_u64:
// CHECK-COMMON-LABEL: test_vget_high_u64:
return vget_high_u64(a);
// CHECK: dup d0, {{v[0-9]+}}.d[1]
// CHECK-AARCH64: dup d0, {{v[0-9]+}}.d[1]
// CHECK-ARM64: ext v0.16b, v0.16b, v0.16b, #8
}
poly64x1_t test_vget_high_p64(poly64x2_t a) {
// CHECK-LABEL: test_vget_high_p64:
// CHECK-COMMON-LABEL: test_vget_high_p64:
return vget_high_p64(a);
// CHECK: dup d0, {{v[0-9]+}}.d[1]
// CHECK-AARCH64: dup d0, {{v[0-9]+}}.d[1]
// CHECK-ARM64: ext v0.16b, v0.16b, v0.16b, #8
}
float16x4_t test_vget_high_f16(float16x8_t a) {
// CHECK-LABEL: test_vget_high_f16:
// CHECK-COMMON-LABEL: test_vget_high_f16:
return vget_high_f16(a);
// CHECK: dup d0, {{v[0-9]+}}.d[1]
// CHECK-AARCH64: dup d0, {{v[0-9]+}}.d[1]
// CHECK-ARM64: ext v0.16b, v0.16b, v0.16b, #8
}
float32x2_t test_vget_high_f32(float32x4_t a) {
// CHECK-LABEL: test_vget_high_f32:
// CHECK-COMMON-LABEL: test_vget_high_f32:
return vget_high_f32(a);
// CHECK: dup d0, {{v[0-9]+}}.d[1]
// CHECK-AARCH64: dup d0, {{v[0-9]+}}.d[1]
// CHECK-ARM64: ext v0.16b, v0.16b, v0.16b, #8
}
poly8x8_t test_vget_high_p8(poly8x16_t a) {
// CHECK-LABEL: test_vget_high_p8:
// CHECK-COMMON-LABEL: test_vget_high_p8:
return vget_high_p8(a);
// CHECK: dup d0, {{v[0-9]+}}.d[1]
// CHECK-AARCH64: dup d0, {{v[0-9]+}}.d[1]
// CHECK-ARM64: ext v0.16b, v0.16b, v0.16b, #8
}
poly16x4_t test_vget_high_p16(poly16x8_t a) {
// CHECK-LABEL: test_vget_high_p16
// CHECK-COMMON-LABEL: test_vget_high_p16
return vget_high_p16(a);
// CHECK: dup d0, {{v[0-9]+}}.d[1]
// CHECK-AARCH64: dup d0, {{v[0-9]+}}.d[1]
// CHECK-ARM64: ext v0.16b, v0.16b, v0.16b, #8
}
float64x1_t test_vget_high_f64(float64x2_t a) {
// CHECK-LABEL: test_vget_high_f64
// CHECK-COMMON-LABEL: test_vget_high_f64
return vget_high_f64(a);
// CHECK: dup d0, {{v[0-9]+}}.d[1]
// CHECK-AARCH64: dup d0, {{v[0-9]+}}.d[1]
// CHECK-ARM64: ext v0.16b, v0.16b, v0.16b, #8
}
int8x8_t test_vget_low_s8(int8x16_t a) {
// CHECK-LABEL: test_vget_low_s8:
// CHECK-COMMON-LABEL: test_vget_low_s8:
return vget_low_s8(a);
// CHECK-NEXT: ret
// CHECK-COMMON-NEXT: ret
}
int16x4_t test_vget_low_s16(int16x8_t a) {
// CHECK-LABEL: test_vget_low_s16:
// CHECK-COMMON-LABEL: test_vget_low_s16:
return vget_low_s16(a);
// CHECK-NEXT: ret
// CHECK-COMMON-NEXT: ret
}
int32x2_t test_vget_low_s32(int32x4_t a) {
// CHECK-LABEL: test_vget_low_s32:
// CHECK-COMMON-LABEL: test_vget_low_s32:
return vget_low_s32(a);
// CHECK-NEXT: ret
// CHECK-COMMON-NEXT: ret
}
int64x1_t test_vget_low_s64(int64x2_t a) {
// CHECK-LABEL: test_vget_low_s64:
// CHECK-COMMON-LABEL: test_vget_low_s64:
return vget_low_s64(a);
// CHECK-NEXT: ret
// CHECK-COMMON-NEXT: ret
}
uint8x8_t test_vget_low_u8(uint8x16_t a) {
// CHECK-LABEL: test_vget_low_u8:
// CHECK-COMMON-LABEL: test_vget_low_u8:
return vget_low_u8(a);
// CHECK-NEXT: ret
// CHECK-COMMON-NEXT: ret
}
uint16x4_t test_vget_low_u16(uint16x8_t a) {
// CHECK-LABEL: test_vget_low_u16:
// CHECK-COMMON-LABEL: test_vget_low_u16:
return vget_low_u16(a);
// CHECK-NEXT: ret
// CHECK-COMMON-NEXT: ret
}
uint32x2_t test_vget_low_u32(uint32x4_t a) {
// CHECK-LABEL: test_vget_low_u32:
// CHECK-COMMON-LABEL: test_vget_low_u32:
return vget_low_u32(a);
// CHECK-NEXT: ret
// CHECK-COMMON-NEXT: ret
}
uint64x1_t test_vget_low_u64(uint64x2_t a) {
// CHECK-LABEL: test_vget_low_u64:
// CHECK-COMMON-LABEL: test_vget_low_u64:
return vget_low_u64(a);
// CHECK-NEXT: ret
// CHECK-COMMON-NEXT: ret
}
poly64x1_t test_vget_low_p64(poly64x2_t a) {
// CHECK-LABEL: test_vget_low_p64:
// CHECK-COMMON-LABEL: test_vget_low_p64:
return vget_low_p64(a);
// CHECK-NEXT: ret
// CHECK-COMMON-NEXT: ret
}
float16x4_t test_vget_low_f16(float16x8_t a) {
// CHECK-LABEL: test_vget_low_f16:
// CHECK-COMMON-LABEL: test_vget_low_f16:
return vget_low_f16(a);
// CHECK-NEXT: ret
// CHECK-COMMON-NEXT: ret
}
float32x2_t test_vget_low_f32(float32x4_t a) {
// CHECK-LABEL: test_vget_low_f32:
// CHECK-COMMON-LABEL: test_vget_low_f32:
return vget_low_f32(a);
// CHECK-NEXT: ret
// CHECK-COMMON-NEXT: ret
}
poly8x8_t test_vget_low_p8(poly8x16_t a) {
// CHECK-LABEL: test_vget_low_p8:
// CHECK-COMMON-LABEL: test_vget_low_p8:
return vget_low_p8(a);
// CHECK-NEXT: ret
// CHECK-COMMON-NEXT: ret
}
poly16x4_t test_vget_low_p16(poly16x8_t a) {
// CHECK-LABEL: test_vget_low_p16:
// CHECK-COMMON-LABEL: test_vget_low_p16:
return vget_low_p16(a);
// CHECK-NEXT: ret
// CHECK-COMMON-NEXT: ret
}
float64x1_t test_vget_low_f64(float64x2_t a) {
// CHECK-LABEL: test_vget_low_f64:
// CHECK-COMMON-LABEL: test_vget_low_f64:
return vget_low_f64(a);
// CHECK-NEXT: ret
// CHECK-COMMON-NEXT: ret
}

View File

@ -1,5 +1,6 @@
// RUN: %clang_cc1 -triple aarch64 -emit-llvm -o - %s | FileCheck -check-prefix=CHECK --check-prefix=CHECK-LE %s
// RUN: %clang_cc1 -triple aarch64_be -emit-llvm -o - %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-BE %s
// RUN: %clang_cc1 -triple arm64-linux-gnu -emit-llvm -o - %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-LE %s
#include <stdarg.h>

View File

@ -12,7 +12,15 @@
// RUN: -ffreestanding \
// RUN: -emit-llvm -w -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple arm64-apple-darwin9 \
// RUN: -ffreestanding \
// RUN: -emit-llvm -w -o - %s | FileCheck -check-prefix=CHECK64 %s
#ifdef __arm64__
#include <arm_neon.h>
#else
#include <arm_neon.h>
#endif
struct homogeneous_struct {
float f[2];
@ -20,6 +28,7 @@ struct homogeneous_struct {
float f4;
};
// CHECK: define arm_aapcs_vfpcc %struct.homogeneous_struct @test_struct(float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}})
// CHECK64: define %struct.homogeneous_struct @test_struct(float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}})
extern struct homogeneous_struct struct_callee(struct homogeneous_struct);
struct homogeneous_struct test_struct(struct homogeneous_struct arg) {
return struct_callee(arg);
@ -34,6 +43,7 @@ struct nested_array {
double d[4];
};
// CHECK: define arm_aapcs_vfpcc void @test_array(double %{{.*}}, double %{{.*}}, double %{{.*}}, double %{{.*}})
// CHECK64: define void @test_array(double %{{.*}}, double %{{.*}}, double %{{.*}}, double %{{.*}})
extern void array_callee(struct nested_array);
void test_array(struct nested_array arg) {
array_callee(arg);
@ -41,6 +51,7 @@ void test_array(struct nested_array arg) {
extern void complex_callee(__complex__ double);
// CHECK: define arm_aapcs_vfpcc void @test_complex(double %{{.*}}, double %{{.*}})
// CHECK64: define void @test_complex(double %{{.*}}, double %{{.*}})
void test_complex(__complex__ double cd) {
complex_callee(cd);
}
@ -62,6 +73,9 @@ struct big_struct {
float f4;
};
// CHECK: define arm_aapcs_vfpcc void @test_big([5 x i32] %{{.*}})
// CHECK64: define void @test_big(%struct.big_struct* %{{.*}})
// CHECK64: call void @llvm.memcpy
// CHECK64: call void @big_callee(%struct.big_struct*
extern void big_callee(struct big_struct);
void test_big(struct big_struct arg) {
big_callee(arg);
@ -75,6 +89,7 @@ struct heterogeneous_struct {
int i2;
};
// CHECK: define arm_aapcs_vfpcc void @test_hetero([2 x i32] %{{.*}})
// CHECK64: define void @test_hetero(i64 %{{.*}})
extern void hetero_callee(struct heterogeneous_struct);
void test_hetero(struct heterogeneous_struct arg) {
hetero_callee(arg);
@ -82,6 +97,7 @@ void test_hetero(struct heterogeneous_struct arg) {
// Neon multi-vector types are homogeneous aggregates.
// CHECK: define arm_aapcs_vfpcc <16 x i8> @f0(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK64: define <16 x i8> @f0(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
int8x16_t f0(int8x16x4_t v4) {
return vaddq_s8(v4.val[0], v4.val[3]);
}
@ -95,6 +111,7 @@ struct neon_struct {
int16x4_t v4;
};
// CHECK: define arm_aapcs_vfpcc void @test_neon(<8 x i8> %{{.*}}, <8 x i8> %{{.*}}, <2 x i32> %{{.*}}, <4 x i16> %{{.*}})
// CHECK64: define void @test_neon(<8 x i8> %{{.*}}, <8 x i8> %{{.*}}, <2 x i32> %{{.*}}, <4 x i16> %{{.*}})
extern void neon_callee(struct neon_struct);
void test_neon(struct neon_struct arg) {
neon_callee(arg);

View File

@ -1,6 +1,11 @@
// REQUIRES: arm-registered-target
// RUN: %clang_cc1 -triple armv7---eabi -target-abi aapcs -mfloat-abi hard -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -triple arm64-apple-darwin9 -target-abi darwinpcs \
// RUN: -ffreestanding -emit-llvm -w -o - %s | FileCheck -check-prefix=CHECK64 %s
// RUN: %clang_cc1 -triple arm64-linux-gnu -ffreestanding -emit-llvm -w -o - %s \
// RUN: | FileCheck --check-prefix=CHECK64-AAPCS %s
typedef long long int64_t;
typedef unsigned int uint32_t;
@ -170,6 +175,10 @@ struct_of_four_doubles g_s4d;
void test_struct_of_four_doubles(void) {
// CHECK: test_struct_of_four_doubles
// CHECK: call arm_aapcs_vfpcc void @takes_struct_of_four_doubles(double {{.*}}, double {{.*}}, double {{.*}}, double {{.*}}, double {{.*}}, [6 x float] undef, double {{.*}}, double {{.*}}, double {{.*}}, double {{.*}}, double {{.*}})
// CHECK64: test_struct_of_four_doubles
// CHECK64: call void @takes_struct_of_four_doubles(double {{.*}}, double {{.*}}, double {{.*}}, double {{.*}}, double {{.*}}, [3 x float] undef, double {{.*}}, double {{.*}}, double {{.*}}, double {{.*}}, double {{.*}})
// CHECK64-AAPCS: test_struct_of_four_doubles
// CHECK64-AAPCS: call void @takes_struct_of_four_doubles(double {{.*}}, double {{.*}}, double {{.*}}, double {{.*}}, double {{.*}}, [3 x float] undef, [4 x double] {{.*}}, double {{.*}})
takes_struct_of_four_doubles(3.0, g_s4d, g_s4d, 4.0);
}
@ -202,6 +211,10 @@ struct_of_vecs g_vec;
void test_struct_of_vecs(void) {
// CHECK: test_struct_of_vecs
// CHECK: call arm_aapcs_vfpcc void @takes_struct_of_vecs(double {{.*}}, <8 x i8> {{.*}}, <4 x i16> {{.*}}, <8 x i8> {{.*}}, <4 x i16> {{.*}}, [6 x float] undef, <8 x i8> {{.*}}, <4 x i16> {{.*}}, <8 x i8> {{.*}}, <4 x i16> {{.*}}, double {{.*}})
// CHECK64: test_struct_of_vecs
// CHECK64: call void @takes_struct_of_vecs(double {{.*}}, <8 x i8> {{.*}}, <4 x i16> {{.*}}, <8 x i8> {{.*}}, <4 x i16> {{.*}}, [3 x float] undef, <8 x i8> {{.*}}, <4 x i16> {{.*}}, <8 x i8> {{.*}}, <4 x i16> {{.*}}, double {{.*}})
// CHECK64-AAPCS: test_struct_of_vecs
// CHECK64-AAPCS: call void @takes_struct_of_vecs(double {{.*}}, <8 x i8> {{.*}}, <4 x i16> {{.*}}, <8 x i8> {{.*}}, <4 x i16> {{.*}}, [3 x float] undef, [4 x double] {{.*}})
takes_struct_of_vecs(3.0, g_vec, g_vec, 4.0);
}

View File

@ -0,0 +1,430 @@
// RUN: %clang_cc1 -triple arm64-apple-ios7 -target-abi darwinpcs -emit-llvm -o - %s | FileCheck %s
#include <stdarg.h>
typedef __attribute__(( ext_vector_type(3) )) char __char3;
typedef __attribute__(( ext_vector_type(4) )) char __char4;
typedef __attribute__(( ext_vector_type(5) )) char __char5;
typedef __attribute__(( ext_vector_type(9) )) char __char9;
typedef __attribute__(( ext_vector_type(19) )) char __char19;
typedef __attribute__(( ext_vector_type(3) )) short __short3;
typedef __attribute__(( ext_vector_type(5) )) short __short5;
typedef __attribute__(( ext_vector_type(3) )) int __int3;
typedef __attribute__(( ext_vector_type(5) )) int __int5;
typedef __attribute__(( ext_vector_type(3) )) double __double3;
double varargs_vec_3c(int fixed, ...) {
// CHECK: varargs_vec_3c
// CHECK: alloca <3 x i8>, align 4
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
// CHECK: bitcast i8* [[AP_CUR]] to <3 x i8>*
va_list ap;
double sum = fixed;
va_start(ap, fixed);
__char3 c3 = va_arg(ap, __char3);
sum = sum + c3.x + c3.y;
va_end(ap);
return sum;
}
double test_3c(__char3 *in) {
// CHECK: test_3c
// CHECK: call double (i32, ...)* @varargs_vec_3c(i32 3, i32 {{%.*}})
return varargs_vec_3c(3, *in);
}
double varargs_vec_4c(int fixed, ...) {
// CHECK: varargs_vec_4c
// CHECK: alloca <4 x i8>, align 4
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
// CHECK: bitcast i8* [[AP_CUR]] to <4 x i8>*
va_list ap;
double sum = fixed;
va_start(ap, fixed);
__char4 c4 = va_arg(ap, __char4);
sum = sum + c4.x + c4.y;
va_end(ap);
return sum;
}
double test_4c(__char4 *in) {
// CHECK: test_4c
// CHECK: call double (i32, ...)* @varargs_vec_4c(i32 4, i32 {{%.*}})
return varargs_vec_4c(4, *in);
}
double varargs_vec_5c(int fixed, ...) {
// CHECK: varargs_vec_5c
// CHECK: alloca <5 x i8>, align 8
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
// CHECK: bitcast i8* [[AP_CUR]] to <5 x i8>*
va_list ap;
double sum = fixed;
va_start(ap, fixed);
__char5 c5 = va_arg(ap, __char5);
sum = sum + c5.x + c5.y;
va_end(ap);
return sum;
}
double test_5c(__char5 *in) {
// CHECK: test_5c
// CHECK: call double (i32, ...)* @varargs_vec_5c(i32 5, <2 x i32> {{%.*}})
return varargs_vec_5c(5, *in);
}
double varargs_vec_9c(int fixed, ...) {
// CHECK: varargs_vec_9c
// CHECK: alloca <9 x i8>, align 16
// CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16
// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8*
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_ALIGN]], i32 16
// CHECK: bitcast i8* [[AP_ALIGN]] to <9 x i8>*
va_list ap;
double sum = fixed;
va_start(ap, fixed);
__char9 c9 = va_arg(ap, __char9);
sum = sum + c9.x + c9.y;
va_end(ap);
return sum;
}
double test_9c(__char9 *in) {
// CHECK: test_9c
// CHECK: call double (i32, ...)* @varargs_vec_9c(i32 9, <4 x i32> {{%.*}})
return varargs_vec_9c(9, *in);
}
double varargs_vec_19c(int fixed, ...) {
// CHECK: varargs_vec_19c
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8**
// CHECK: [[VAR2:%.*]] = load i8** [[VAR]]
// CHECK: bitcast i8* [[VAR2]] to <19 x i8>*
va_list ap;
double sum = fixed;
va_start(ap, fixed);
__char19 c19 = va_arg(ap, __char19);
sum = sum + c19.x + c19.y;
va_end(ap);
return sum;
}
double test_19c(__char19 *in) {
// CHECK: test_19c
// CHECK: call double (i32, ...)* @varargs_vec_19c(i32 19, <19 x i8>* {{%.*}})
return varargs_vec_19c(19, *in);
}
double varargs_vec_3s(int fixed, ...) {
// CHECK: varargs_vec_3s
// CHECK: alloca <3 x i16>, align 8
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
// CHECK: bitcast i8* [[AP_CUR]] to <3 x i16>*
va_list ap;
double sum = fixed;
va_start(ap, fixed);
__short3 c3 = va_arg(ap, __short3);
sum = sum + c3.x + c3.y;
va_end(ap);
return sum;
}
double test_3s(__short3 *in) {
// CHECK: test_3s
// CHECK: call double (i32, ...)* @varargs_vec_3s(i32 3, <2 x i32> {{%.*}})
return varargs_vec_3s(3, *in);
}
double varargs_vec_5s(int fixed, ...) {
// CHECK: varargs_vec_5s
// CHECK: alloca <5 x i16>, align 16
// CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16
// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8*
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_ALIGN]], i32 16
// CHECK: bitcast i8* [[AP_ALIGN]] to <5 x i16>*
va_list ap;
double sum = fixed;
va_start(ap, fixed);
__short5 c5 = va_arg(ap, __short5);
sum = sum + c5.x + c5.y;
va_end(ap);
return sum;
}
double test_5s(__short5 *in) {
// CHECK: test_5s
// CHECK: call double (i32, ...)* @varargs_vec_5s(i32 5, <4 x i32> {{%.*}})
return varargs_vec_5s(5, *in);
}
double varargs_vec_3i(int fixed, ...) {
// CHECK: varargs_vec_3i
// CHECK: alloca <3 x i32>, align 16
// CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16
// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8*
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_ALIGN]], i32 16
// CHECK: bitcast i8* [[AP_ALIGN]] to <3 x i32>*
va_list ap;
double sum = fixed;
va_start(ap, fixed);
__int3 c3 = va_arg(ap, __int3);
sum = sum + c3.x + c3.y;
va_end(ap);
return sum;
}
double test_3i(__int3 *in) {
// CHECK: test_3i
// CHECK: call double (i32, ...)* @varargs_vec_3i(i32 3, <4 x i32> {{%.*}})
return varargs_vec_3i(3, *in);
}
double varargs_vec_5i(int fixed, ...) {
// CHECK: varargs_vec_5i
// CHECK: alloca <5 x i32>, align 16
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8**
// CHECK: [[VAR2:%.*]] = load i8** [[VAR]]
// CHECK: bitcast i8* [[VAR2]] to <5 x i32>*
va_list ap;
double sum = fixed;
va_start(ap, fixed);
__int5 c5 = va_arg(ap, __int5);
sum = sum + c5.x + c5.y;
va_end(ap);
return sum;
}
double test_5i(__int5 *in) {
// CHECK: test_5i
// CHECK: call double (i32, ...)* @varargs_vec_5i(i32 5, <5 x i32>* {{%.*}})
return varargs_vec_5i(5, *in);
}
double varargs_vec_3d(int fixed, ...) {
// CHECK: varargs_vec_3d
// CHECK: alloca <3 x double>, align 16
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8**
// CHECK: [[VAR2:%.*]] = load i8** [[VAR]]
// CHECK: bitcast i8* [[VAR2]] to <3 x double>*
va_list ap;
double sum = fixed;
va_start(ap, fixed);
__double3 c3 = va_arg(ap, __double3);
sum = sum + c3.x + c3.y;
va_end(ap);
return sum;
}
double test_3d(__double3 *in) {
// CHECK: test_3d
// CHECK: call double (i32, ...)* @varargs_vec_3d(i32 3, <3 x double>* {{%.*}})
return varargs_vec_3d(3, *in);
}
double varargs_vec(int fixed, ...) {
// CHECK: varargs_vec
va_list ap;
double sum = fixed;
va_start(ap, fixed);
__char3 c3 = va_arg(ap, __char3);
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
// CHECK: bitcast i8* [[AP_CUR]] to <3 x i8>*
sum = sum + c3.x + c3.y;
__char5 c5 = va_arg(ap, __char5);
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
// CHECK: bitcast i8* [[AP_CUR]] to <5 x i8>*
sum = sum + c5.x + c5.y;
__char9 c9 = va_arg(ap, __char9);
// CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16
// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8*
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_ALIGN]], i32 16
// CHECK: bitcast i8* [[AP_ALIGN]] to <9 x i8>*
sum = sum + c9.x + c9.y;
__char19 c19 = va_arg(ap, __char19);
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8**
// CHECK: [[VAR2:%.*]] = load i8** [[VAR]]
// CHECK: bitcast i8* [[VAR2]] to <19 x i8>*
sum = sum + c19.x + c19.y;
__short3 s3 = va_arg(ap, __short3);
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
// CHECK: bitcast i8* [[AP_CUR]] to <3 x i16>*
sum = sum + s3.x + s3.y;
__short5 s5 = va_arg(ap, __short5);
// CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16
// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8*
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_ALIGN]], i32 16
// CHECK: bitcast i8* [[AP_ALIGN]] to <5 x i16>*
sum = sum + s5.x + s5.y;
__int3 i3 = va_arg(ap, __int3);
// CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16
// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8*
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_ALIGN]], i32 16
// CHECK: bitcast i8* [[AP_ALIGN]] to <3 x i32>*
sum = sum + i3.x + i3.y;
__int5 i5 = va_arg(ap, __int5);
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8**
// CHECK: [[VAR2:%.*]] = load i8** [[VAR]]
// CHECK: bitcast i8* [[VAR2]] to <5 x i32>*
sum = sum + i5.x + i5.y;
__double3 d3 = va_arg(ap, __double3);
// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8**
// CHECK: [[VAR2:%.*]] = load i8** [[VAR]]
// CHECK: bitcast i8* [[VAR2]] to <3 x double>*
sum = sum + d3.x + d3.y;
va_end(ap);
return sum;
}
double test(__char3 *c3, __char5 *c5, __char9 *c9, __char19 *c19,
__short3 *s3, __short5 *s5, __int3 *i3, __int5 *i5,
__double3 *d3) {
double ret = varargs_vec(3, *c3, *c5, *c9, *c19, *s3, *s5, *i3, *i5, *d3);
// CHECK: call double (i32, ...)* @varargs_vec(i32 3, i32 {{%.*}}, <2 x i32> {{%.*}}, <4 x i32> {{%.*}}, <19 x i8>* {{%.*}}, <2 x i32> {{%.*}}, <4 x i32> {{%.*}}, <4 x i32> {{%.*}}, <5 x i32>* {{%.*}}, <3 x double>* {{%.*}})
return ret;
}
__attribute__((noinline)) double args_vec_3c(int fixed, __char3 c3) {
// CHECK: args_vec_3c
// CHECK: [[C3:%.*]] = alloca <3 x i8>, align 4
// CHECK: [[TMP:%.*]] = bitcast <3 x i8>* [[C3]] to i32*
// CHECK: store i32 {{%.*}}, i32* [[TMP]]
double sum = fixed;
sum = sum + c3.x + c3.y;
return sum;
}
double fixed_3c(__char3 *in) {
// CHECK: fixed_3c
// CHECK: call double @args_vec_3c(i32 3, i32 {{%.*}})
return args_vec_3c(3, *in);
}
__attribute__((noinline)) double args_vec_5c(int fixed, __char5 c5) {
// CHECK: args_vec_5c
// CHECK: [[C5:%.*]] = alloca <5 x i8>, align 8
// CHECK: [[TMP:%.*]] = bitcast <5 x i8>* [[C5]] to <2 x i32>*
// CHECK: store <2 x i32> {{%.*}}, <2 x i32>* [[TMP]], align 1
double sum = fixed;
sum = sum + c5.x + c5.y;
return sum;
}
double fixed_5c(__char5 *in) {
// CHECK: fixed_5c
// CHECK: call double @args_vec_5c(i32 5, <2 x i32> {{%.*}})
return args_vec_5c(5, *in);
}
__attribute__((noinline)) double args_vec_9c(int fixed, __char9 c9) {
// CHECK: args_vec_9c
// CHECK: [[C9:%.*]] = alloca <9 x i8>, align 16
// CHECK: [[TMP:%.*]] = bitcast <9 x i8>* [[C9]] to <4 x i32>*
// CHECK: store <4 x i32> {{%.*}}, <4 x i32>* [[TMP]], align 1
double sum = fixed;
sum = sum + c9.x + c9.y;
return sum;
}
double fixed_9c(__char9 *in) {
// CHECK: fixed_9c
// CHECK: call double @args_vec_9c(i32 9, <4 x i32> {{%.*}})
return args_vec_9c(9, *in);
}
__attribute__((noinline)) double args_vec_19c(int fixed, __char19 c19) {
// CHECK: args_vec_19c
// CHECK: [[C19:%.*]] = load <19 x i8>* {{.*}}, align 16
double sum = fixed;
sum = sum + c19.x + c19.y;
return sum;
}
double fixed_19c(__char19 *in) {
// CHECK: fixed_19c
// CHECK: call double @args_vec_19c(i32 19, <19 x i8>* {{%.*}})
return args_vec_19c(19, *in);
}
__attribute__((noinline)) double args_vec_3s(int fixed, __short3 c3) {
// CHECK: args_vec_3s
// CHECK: [[C3:%.*]] = alloca <3 x i16>, align 8
// CHECK: [[TMP:%.*]] = bitcast <3 x i16>* [[C3]] to <2 x i32>*
// CHECK: store <2 x i32> {{%.*}}, <2 x i32>* [[TMP]], align 1
double sum = fixed;
sum = sum + c3.x + c3.y;
return sum;
}
double fixed_3s(__short3 *in) {
// CHECK: fixed_3s
// CHECK: call double @args_vec_3s(i32 3, <2 x i32> {{%.*}})
return args_vec_3s(3, *in);
}
__attribute__((noinline)) double args_vec_5s(int fixed, __short5 c5) {
// CHECK: args_vec_5s
// CHECK: [[C5:%.*]] = alloca <5 x i16>, align 16
// CHECK: [[TMP:%.*]] = bitcast <5 x i16>* [[C5]] to <4 x i32>*
// CHECK: store <4 x i32> {{%.*}}, <4 x i32>* [[TMP]], align 1
double sum = fixed;
sum = sum + c5.x + c5.y;
return sum;
}
double fixed_5s(__short5 *in) {
// CHECK: fixed_5s
// CHECK: call double @args_vec_5s(i32 5, <4 x i32> {{%.*}})
return args_vec_5s(5, *in);
}
__attribute__((noinline)) double args_vec_3i(int fixed, __int3 c3) {
// CHECK: args_vec_3i
// CHECK: [[C3:%.*]] = alloca <3 x i32>, align 16
// CHECK: [[TMP:%.*]] = bitcast <3 x i32>* [[C3]] to <4 x i32>*
// CHECK: store <4 x i32> {{%.*}}, <4 x i32>* [[TMP]], align 1
double sum = fixed;
sum = sum + c3.x + c3.y;
return sum;
}
double fixed_3i(__int3 *in) {
// CHECK: fixed_3i
// CHECK: call double @args_vec_3i(i32 3, <4 x i32> {{%.*}})
return args_vec_3i(3, *in);
}
__attribute__((noinline)) double args_vec_5i(int fixed, __int5 c5) {
// CHECK: args_vec_5i
// CHECK: [[C5:%.*]] = load <5 x i32>* {{%.*}}, align 16
double sum = fixed;
sum = sum + c5.x + c5.y;
return sum;
}
double fixed_5i(__int5 *in) {
// CHECK: fixed_5i
// CHECK: call double @args_vec_5i(i32 5, <5 x i32>* {{%.*}})
return args_vec_5i(5, *in);
}
__attribute__((noinline)) double args_vec_3d(int fixed, __double3 c3) {
// CHECK: args_vec_3d
// CHECK: [[CAST:%.*]] = bitcast <3 x double>* {{%.*}} to <4 x double>*
// CHECK: [[LOAD:%.*]] = load <4 x double>* [[CAST]]
// CHECK: shufflevector <4 x double> [[LOAD]], <4 x double> undef, <3 x i32> <i32 0, i32 1, i32 2>
double sum = fixed;
sum = sum + c3.x + c3.y;
return sum;
}
double fixed_3d(__double3 *in) {
// CHECK: fixed_3d
// CHECK: call double @args_vec_3d(i32 3, <3 x double>* {{%.*}})
return args_vec_3d(3, *in);
}

View File

@ -0,0 +1,713 @@
// RUN: %clang_cc1 -triple arm64-apple-ios7 -target-abi darwinpcs -ffreestanding -emit-llvm -w -o - %s | FileCheck %s
// CHECK: define signext i8 @f0()
char f0(void) {
return 0;
}
// Struct as return type. Aggregates <= 16 bytes are passed directly and round
// up to multiple of 8 bytes.
// CHECK: define i64 @f1()
struct s1 { char f0; };
struct s1 f1(void) {}
// CHECK: define i64 @f2()
struct s2 { short f0; };
struct s2 f2(void) {}
// CHECK: define i64 @f3()
struct s3 { int f0; };
struct s3 f3(void) {}
// CHECK: define i64 @f4()
struct s4 { struct s4_0 { int f0; } f0; };
struct s4 f4(void) {}
// CHECK: define i64 @f5()
struct s5 { struct { } f0; int f1; };
struct s5 f5(void) {}
// CHECK: define i64 @f6()
struct s6 { int f0[1]; };
struct s6 f6(void) {}
// CHECK: define void @f7()
struct s7 { struct { int : 0; } f0; };
struct s7 f7(void) {}
// CHECK: define void @f8()
struct s8 { struct { int : 0; } f0[1]; };
struct s8 f8(void) {}
// CHECK: define i64 @f9()
struct s9 { int f0; int : 0; };
struct s9 f9(void) {}
// CHECK: define i64 @f10()
struct s10 { int f0; int : 0; int : 0; };
struct s10 f10(void) {}
// CHECK: define i64 @f11()
struct s11 { int : 0; int f0; };
struct s11 f11(void) {}
// CHECK: define i64 @f12()
union u12 { char f0; short f1; int f2; };
union u12 f12(void) {}
// Homogeneous Aggregate as return type will be passed directly.
// CHECK: define %struct.s13 @f13()
struct s13 { float f0; };
struct s13 f13(void) {}
// CHECK: define %union.u14 @f14()
union u14 { float f0; };
union u14 f14(void) {}
// CHECK: define void @f15()
void f15(struct s7 a0) {}
// CHECK: define void @f16()
void f16(struct s8 a0) {}
// CHECK: define i64 @f17()
struct s17 { short f0 : 13; char f1 : 4; };
struct s17 f17(void) {}
// CHECK: define i64 @f18()
struct s18 { short f0; char f1 : 4; };
struct s18 f18(void) {}
// CHECK: define i64 @f19()
struct s19 { int f0; struct s8 f1; };
struct s19 f19(void) {}
// CHECK: define i64 @f20()
struct s20 { struct s8 f1; int f0; };
struct s20 f20(void) {}
// CHECK: define i64 @f21()
struct s21 { struct {} f1; int f0 : 4; };
struct s21 f21(void) {}
// CHECK: define i64 @f22()
// CHECK: define i64 @f23()
// CHECK: define i64 @f24()
// CHECK: define i128 @f25()
// CHECK: define { float, float } @f26()
// CHECK: define { double, double } @f27()
_Complex char f22(void) {}
_Complex short f23(void) {}
_Complex int f24(void) {}
_Complex long long f25(void) {}
_Complex float f26(void) {}
_Complex double f27(void) {}
// CHECK: define i64 @f28()
struct s28 { _Complex char f0; };
struct s28 f28() {}
// CHECK: define i64 @f29()
struct s29 { _Complex short f0; };
struct s29 f29() {}
// CHECK: define i64 @f30()
struct s30 { _Complex int f0; };
struct s30 f30() {}
struct s31 { char x; };
void f31(struct s31 s) { }
// CHECK: define void @f31(i64 %s.coerce)
// CHECK: %s = alloca %struct.s31, align 8
// CHECK: trunc i64 %s.coerce to i8
// CHECK: store i8 %{{.*}},
struct s32 { double x; };
void f32(struct s32 s) { }
// Expand Homogeneous Aggregate.
// CHECK: @f32(double %{{.*}})
// A composite type larger than 16 bytes should be passed indirectly.
struct s33 { char buf[32*32]; };
void f33(struct s33 s) { }
// CHECK: define void @f33(%struct.s33* %s)
struct s34 { char c; };
void f34(struct s34 s);
void g34(struct s34 *s) { f34(*s); }
// CHECK: @g34(%struct.s34* %s)
// CHECK: %[[a:.*]] = load i8* %{{.*}}
// CHECK: zext i8 %[[a]] to i64
// CHECK: call void @f34(i64 %{{.*}})
/*
* Check that va_arg accesses stack according to ABI alignment
*/
long long t1(int i, ...) {
// CHECK: t1
__builtin_va_list ap;
__builtin_va_start(ap, i);
// CHECK-NOT: add i32 %{{.*}} 7
// CHECK-NOT: and i32 %{{.*}} -8
long long ll = __builtin_va_arg(ap, long long);
__builtin_va_end(ap);
return ll;
}
double t2(int i, ...) {
// CHECK: t2
__builtin_va_list ap;
__builtin_va_start(ap, i);
// CHECK-NOT: add i32 %{{.*}} 7
// CHECK-NOT: and i32 %{{.*}} -8
double ll = __builtin_va_arg(ap, double);
__builtin_va_end(ap);
return ll;
}
#include <arm_neon.h>
// Homogeneous Vector Aggregate as return type and argument type.
// CHECK: define %struct.int8x16x2_t @f0_0(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
int8x16x2_t f0_0(int8x16_t a0, int8x16_t a1) {
return vzipq_s8(a0, a1);
}
// Test direct vector passing.
typedef float T_float32x2 __attribute__ ((__vector_size__ (8)));
typedef float T_float32x4 __attribute__ ((__vector_size__ (16)));
typedef float T_float32x8 __attribute__ ((__vector_size__ (32)));
typedef float T_float32x16 __attribute__ ((__vector_size__ (64)));
// CHECK: define <2 x float> @f1_0(<2 x float> %{{.*}})
T_float32x2 f1_0(T_float32x2 a0) { return a0; }
// CHECK: define <4 x float> @f1_1(<4 x float> %{{.*}})
T_float32x4 f1_1(T_float32x4 a0) { return a0; }
// Vector with length bigger than 16-byte is illegal and is passed indirectly.
// CHECK: define void @f1_2(<8 x float>* noalias sret %{{.*}}, <8 x float>*)
T_float32x8 f1_2(T_float32x8 a0) { return a0; }
// CHECK: define void @f1_3(<16 x float>* noalias sret %{{.*}}, <16 x float>*)
T_float32x16 f1_3(T_float32x16 a0) { return a0; }
// Testing alignment with aggregates: HFA, aggregates with size <= 16 bytes and
// aggregates with size > 16 bytes.
struct s35
{
float v[4]; //Testing HFA.
} __attribute__((aligned(16)));
typedef struct s35 s35_with_align;
typedef __attribute__((neon_vector_type(4))) float float32x4_t;
float32x4_t f35(int i, s35_with_align s1, s35_with_align s2) {
// CHECK: define <4 x float> @f35(i32 %i, float %s1.0, float %s1.1, float %s1.2, float %s1.3, float %s2.0, float %s2.1, float %s2.2, float %s2.3)
// CHECK: %s1 = alloca %struct.s35, align 16
// CHECK: %s2 = alloca %struct.s35, align 16
// CHECK: %[[a:.*]] = bitcast %struct.s35* %s1 to <4 x float>*
// CHECK: load <4 x float>* %[[a]], align 16
// CHECK: %[[b:.*]] = bitcast %struct.s35* %s2 to <4 x float>*
// CHECK: load <4 x float>* %[[b]], align 16
float32x4_t v = vaddq_f32(*(float32x4_t *)&s1,
*(float32x4_t *)&s2);
return v;
}
struct s36
{
int v[4]; //Testing 16-byte aggregate.
} __attribute__((aligned(16)));
typedef struct s36 s36_with_align;
typedef __attribute__((neon_vector_type(4))) int int32x4_t;
int32x4_t f36(int i, s36_with_align s1, s36_with_align s2) {
// CHECK: define <4 x i32> @f36(i32 %i, i128 %s1.coerce, i128 %s2.coerce)
// CHECK: %s1 = alloca %struct.s36, align 16
// CHECK: %s2 = alloca %struct.s36, align 16
// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 1
// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 1
// CHECK: %[[a:.*]] = bitcast %struct.s36* %s1 to <4 x i32>*
// CHECK: load <4 x i32>* %[[a]], align 16
// CHECK: %[[b:.*]] = bitcast %struct.s36* %s2 to <4 x i32>*
// CHECK: load <4 x i32>* %[[b]], align 16
int32x4_t v = vaddq_s32(*(int32x4_t *)&s1,
*(int32x4_t *)&s2);
return v;
}
struct s37
{
int v[18]; //Testing large aggregate.
} __attribute__((aligned(16)));
typedef struct s37 s37_with_align;
int32x4_t f37(int i, s37_with_align s1, s37_with_align s2) {
// CHECK: define <4 x i32> @f37(i32 %i, %struct.s37* %s1, %struct.s37* %s2)
// CHECK: %[[a:.*]] = bitcast %struct.s37* %s1 to <4 x i32>*
// CHECK: load <4 x i32>* %[[a]], align 16
// CHECK: %[[b:.*]] = bitcast %struct.s37* %s2 to <4 x i32>*
// CHECK: load <4 x i32>* %[[b]], align 16
int32x4_t v = vaddq_s32(*(int32x4_t *)&s1,
*(int32x4_t *)&s2);
return v;
}
s37_with_align g37;
int32x4_t caller37() {
// CHECK: caller37
// CHECK: %[[a:.*]] = alloca %struct.s37, align 16
// CHECK: %[[b:.*]] = alloca %struct.s37, align 16
// CHECK: call void @llvm.memcpy
// CHECK: call void @llvm.memcpy
// CHECK: call <4 x i32> @f37(i32 3, %struct.s37* %[[a]], %struct.s37* %[[b]])
return f37(3, g37, g37);
}
// rdar://problem/12648441
// Test passing structs with size < 8, < 16 and > 16
// with alignment of 16 and without
// structs with size <= 8 bytes, without alignment attribute
// passed as i64 regardless of the align attribute
struct s38
{
int i;
short s;
};
typedef struct s38 s38_no_align;
// passing structs in registers
__attribute__ ((noinline))
int f38(int i, s38_no_align s1, s38_no_align s2) {
// CHECK: define i32 @f38(i32 %i, i64 %s1.coerce, i64 %s2.coerce)
// CHECK: %s1 = alloca %struct.s38, align 8
// CHECK: %s2 = alloca %struct.s38, align 8
// CHECK: store i64 %s1.coerce, i64* %{{.*}}, align 1
// CHECK: store i64 %s2.coerce, i64* %{{.*}}, align 1
// CHECK: getelementptr inbounds %struct.s38* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s38* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s38* %s1, i32 0, i32 1
// CHECK: getelementptr inbounds %struct.s38* %s2, i32 0, i32 1
return s1.i + s2.i + i + s1.s + s2.s;
}
s38_no_align g38;
s38_no_align g38_2;
int caller38() {
// CHECK: define i32 @caller38()
// CHECK: %[[a:.*]] = load i64* bitcast (%struct.s38* @g38 to i64*), align 1
// CHECK: %[[b:.*]] = load i64* bitcast (%struct.s38* @g38_2 to i64*), align 1
// CHECK: call i32 @f38(i32 3, i64 %[[a]], i64 %[[b]])
return f38(3, g38, g38_2);
}
// passing structs on stack
__attribute__ ((noinline))
int f38_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8,
int i9, s38_no_align s1, s38_no_align s2) {
// CHECK: define i32 @f38_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i64 %s1.coerce, i64 %s2.coerce)
// CHECK: %s1 = alloca %struct.s38, align 8
// CHECK: %s2 = alloca %struct.s38, align 8
// CHECK: store i64 %s1.coerce, i64* %{{.*}}, align 1
// CHECK: store i64 %s2.coerce, i64* %{{.*}}, align 1
// CHECK: getelementptr inbounds %struct.s38* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s38* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s38* %s1, i32 0, i32 1
// CHECK: getelementptr inbounds %struct.s38* %s2, i32 0, i32 1
return s1.i + s2.i + i + i2 + i3 + i4 + i5 + i6 + i7 + i8 + i9 + s1.s + s2.s;
}
int caller38_stack() {
// CHECK: define i32 @caller38_stack()
// CHECK: %[[a:.*]] = load i64* bitcast (%struct.s38* @g38 to i64*), align 1
// CHECK: %[[b:.*]] = load i64* bitcast (%struct.s38* @g38_2 to i64*), align 1
// CHECK: call i32 @f38_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i64 %[[a]], i64 %[[b]])
return f38_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g38, g38_2);
}
// structs with size <= 8 bytes, with alignment attribute
struct s39
{
int i;
short s;
} __attribute__((aligned(16)));
typedef struct s39 s39_with_align;
// passing aligned structs in registers
__attribute__ ((noinline))
int f39(int i, s39_with_align s1, s39_with_align s2) {
// CHECK: define i32 @f39(i32 %i, i128 %s1.coerce, i128 %s2.coerce)
// CHECK: %s1 = alloca %struct.s39, align 16
// CHECK: %s2 = alloca %struct.s39, align 16
// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 1
// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 1
// CHECK: getelementptr inbounds %struct.s39* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s39* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s39* %s1, i32 0, i32 1
// CHECK: getelementptr inbounds %struct.s39* %s2, i32 0, i32 1
return s1.i + s2.i + i + s1.s + s2.s;
}
s39_with_align g39;
s39_with_align g39_2;
int caller39() {
// CHECK: define i32 @caller39()
// CHECK: %[[a:.*]] = load i128* bitcast (%struct.s39* @g39 to i128*), align 1
// CHECK: %[[b:.*]] = load i128* bitcast (%struct.s39* @g39_2 to i128*), align 1
// CHECK: call i32 @f39(i32 3, i128 %[[a]], i128 %[[b]])
return f39(3, g39, g39_2);
}
// passing aligned structs on stack
__attribute__ ((noinline))
int f39_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8,
int i9, s39_with_align s1, s39_with_align s2) {
// CHECK: define i32 @f39_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i128 %s1.coerce, i128 %s2.coerce)
// CHECK: %s1 = alloca %struct.s39, align 16
// CHECK: %s2 = alloca %struct.s39, align 16
// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 1
// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 1
// CHECK: getelementptr inbounds %struct.s39* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s39* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s39* %s1, i32 0, i32 1
// CHECK: getelementptr inbounds %struct.s39* %s2, i32 0, i32 1
return s1.i + s2.i + i + i2 + i3 + i4 + i5 + i6 + i7 + i8 + i9 + s1.s + s2.s;
}
int caller39_stack() {
// CHECK: define i32 @caller39_stack()
// CHECK: %[[a:.*]] = load i128* bitcast (%struct.s39* @g39 to i128*), align 1
// CHECK: %[[b:.*]] = load i128* bitcast (%struct.s39* @g39_2 to i128*), align 1
// CHECK: call i32 @f39_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i128 %[[a]], i128 %[[b]])
return f39_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g39, g39_2);
}
// structs with size <= 16 bytes, without alignment attribute
struct s40
{
int i;
short s;
int i2;
short s2;
};
typedef struct s40 s40_no_align;
// passing structs in registers
__attribute__ ((noinline))
int f40(int i, s40_no_align s1, s40_no_align s2) {
// CHECK: define i32 @f40(i32 %i, [2 x i64] %s1.coerce, [2 x i64] %s2.coerce)
// CHECK: %s1 = alloca %struct.s40, align 8
// CHECK: %s2 = alloca %struct.s40, align 8
// CHECK: store [2 x i64] %s1.coerce, [2 x i64]* %{{.*}}, align 1
// CHECK: store [2 x i64] %s2.coerce, [2 x i64]* %{{.*}}, align 1
// CHECK: getelementptr inbounds %struct.s40* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s40* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s40* %s1, i32 0, i32 1
// CHECK: getelementptr inbounds %struct.s40* %s2, i32 0, i32 1
return s1.i + s2.i + i + s1.s + s2.s;
}
s40_no_align g40;
s40_no_align g40_2;
int caller40() {
// CHECK: define i32 @caller40()
// CHECK: %[[a:.*]] = load [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 1
// CHECK: %[[b:.*]] = load [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 1
// CHECK: call i32 @f40(i32 3, [2 x i64] %[[a]], [2 x i64] %[[b]])
return f40(3, g40, g40_2);
}
// passing structs on stack
__attribute__ ((noinline))
int f40_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8,
int i9, s40_no_align s1, s40_no_align s2) {
// CHECK: define i32 @f40_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, [2 x i64] %s1.coerce, [2 x i64] %s2.coerce)
// CHECK: %s1 = alloca %struct.s40, align 8
// CHECK: %s2 = alloca %struct.s40, align 8
// CHECK: store [2 x i64] %s1.coerce, [2 x i64]* %{{.*}}, align 1
// CHECK: store [2 x i64] %s2.coerce, [2 x i64]* %{{.*}}, align 1
// CHECK: getelementptr inbounds %struct.s40* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s40* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s40* %s1, i32 0, i32 1
// CHECK: getelementptr inbounds %struct.s40* %s2, i32 0, i32 1
return s1.i + s2.i + i + i2 + i3 + i4 + i5 + i6 + i7 + i8 + i9 + s1.s + s2.s;
}
int caller40_stack() {
// CHECK: define i32 @caller40_stack()
// CHECK: %[[a:.*]] = load [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 1
// CHECK: %[[b:.*]] = load [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 1
// CHECK: call i32 @f40_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, [2 x i64] %[[a]], [2 x i64] %[[b]])
return f40_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g40, g40_2);
}
// structs with size <= 16 bytes, with alignment attribute
struct s41
{
int i;
short s;
int i2;
short s2;
} __attribute__((aligned(16)));
typedef struct s41 s41_with_align;
// passing aligned structs in registers
__attribute__ ((noinline))
int f41(int i, s41_with_align s1, s41_with_align s2) {
// CHECK: define i32 @f41(i32 %i, i128 %s1.coerce, i128 %s2.coerce)
// CHECK: %s1 = alloca %struct.s41, align 16
// CHECK: %s2 = alloca %struct.s41, align 16
// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 1
// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 1
// CHECK: getelementptr inbounds %struct.s41* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s41* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s41* %s1, i32 0, i32 1
// CHECK: getelementptr inbounds %struct.s41* %s2, i32 0, i32 1
return s1.i + s2.i + i + s1.s + s2.s;
}
s41_with_align g41;
s41_with_align g41_2;
int caller41() {
// CHECK: define i32 @caller41()
// CHECK: %[[a:.*]] = load i128* bitcast (%struct.s41* @g41 to i128*), align 1
// CHECK: %[[b:.*]] = load i128* bitcast (%struct.s41* @g41_2 to i128*), align 1
// CHECK: call i32 @f41(i32 3, i128 %[[a]], i128 %[[b]])
return f41(3, g41, g41_2);
}
// passing aligned structs on stack
__attribute__ ((noinline))
int f41_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8,
int i9, s41_with_align s1, s41_with_align s2) {
// CHECK: define i32 @f41_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i128 %s1.coerce, i128 %s2.coerce)
// CHECK: %s1 = alloca %struct.s41, align 16
// CHECK: %s2 = alloca %struct.s41, align 16
// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 1
// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 1
// CHECK: getelementptr inbounds %struct.s41* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s41* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s41* %s1, i32 0, i32 1
// CHECK: getelementptr inbounds %struct.s41* %s2, i32 0, i32 1
return s1.i + s2.i + i + i2 + i3 + i4 + i5 + i6 + i7 + i8 + i9 + s1.s + s2.s;
}
int caller41_stack() {
// CHECK: define i32 @caller41_stack()
// CHECK: %[[a:.*]] = load i128* bitcast (%struct.s41* @g41 to i128*), align 1
// CHECK: %[[b:.*]] = load i128* bitcast (%struct.s41* @g41_2 to i128*), align 1
// CHECK: call i32 @f41_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i128 %[[a]], i128 %[[b]])
return f41_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g41, g41_2);
}
// structs with size > 16 bytes, without alignment attribute
struct s42
{
int i;
short s;
int i2;
short s2;
int i3;
short s3;
};
typedef struct s42 s42_no_align;
// passing structs in registers
__attribute__ ((noinline))
int f42(int i, s42_no_align s1, s42_no_align s2) {
// CHECK: define i32 @f42(i32 %i, %struct.s42* %s1, %struct.s42* %s2)
// CHECK: getelementptr inbounds %struct.s42* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s42* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s42* %s1, i32 0, i32 1
// CHECK: getelementptr inbounds %struct.s42* %s2, i32 0, i32 1
return s1.i + s2.i + i + s1.s + s2.s;
}
s42_no_align g42;
s42_no_align g42_2;
int caller42() {
// CHECK: define i32 @caller42()
// CHECK: %[[a:.*]] = alloca %struct.s42, align 4
// CHECK: %[[b:.*]] = alloca %struct.s42, align 4
// CHECK: %[[c:.*]] = bitcast %struct.s42* %[[a]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
// CHECK: %[[d:.*]] = bitcast %struct.s42* %[[b]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
// CHECK: call i32 @f42(i32 3, %struct.s42* %[[a]], %struct.s42* %[[b]])
return f42(3, g42, g42_2);
}
// passing structs on stack
__attribute__ ((noinline))
int f42_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8,
int i9, s42_no_align s1, s42_no_align s2) {
// CHECK: define i32 @f42_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, %struct.s42* %s1, %struct.s42* %s2)
// CHECK: getelementptr inbounds %struct.s42* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s42* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s42* %s1, i32 0, i32 1
// CHECK: getelementptr inbounds %struct.s42* %s2, i32 0, i32 1
return s1.i + s2.i + i + i2 + i3 + i4 + i5 + i6 + i7 + i8 + i9 + s1.s + s2.s;
}
int caller42_stack() {
// CHECK: define i32 @caller42_stack()
// CHECK: %[[a:.*]] = alloca %struct.s42, align 4
// CHECK: %[[b:.*]] = alloca %struct.s42, align 4
// CHECK: %[[c:.*]] = bitcast %struct.s42* %[[a]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
// CHECK: %[[d:.*]] = bitcast %struct.s42* %[[b]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
// CHECK: call i32 @f42_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, %struct.s42* %[[a]], %struct.s42* %[[b]])
return f42_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g42, g42_2);
}
// structs with size > 16 bytes, with alignment attribute
struct s43
{
int i;
short s;
int i2;
short s2;
int i3;
short s3;
} __attribute__((aligned(16)));
typedef struct s43 s43_with_align;
// passing aligned structs in registers
__attribute__ ((noinline))
int f43(int i, s43_with_align s1, s43_with_align s2) {
// CHECK: define i32 @f43(i32 %i, %struct.s43* %s1, %struct.s43* %s2)
// CHECK: getelementptr inbounds %struct.s43* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s43* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s43* %s1, i32 0, i32 1
// CHECK: getelementptr inbounds %struct.s43* %s2, i32 0, i32 1
return s1.i + s2.i + i + s1.s + s2.s;
}
s43_with_align g43;
s43_with_align g43_2;
int caller43() {
// CHECK: define i32 @caller43()
// CHECK: %[[a:.*]] = alloca %struct.s43, align 16
// CHECK: %[[b:.*]] = alloca %struct.s43, align 16
// CHECK: %[[c:.*]] = bitcast %struct.s43* %[[a]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
// CHECK: %[[d:.*]] = bitcast %struct.s43* %[[b]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
// CHECK: call i32 @f43(i32 3, %struct.s43* %[[a]], %struct.s43* %[[b]])
return f43(3, g43, g43_2);
}
// passing aligned structs on stack
__attribute__ ((noinline))
int f43_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8,
int i9, s43_with_align s1, s43_with_align s2) {
// CHECK: define i32 @f43_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, %struct.s43* %s1, %struct.s43* %s2)
// CHECK: getelementptr inbounds %struct.s43* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s43* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s43* %s1, i32 0, i32 1
// CHECK: getelementptr inbounds %struct.s43* %s2, i32 0, i32 1
return s1.i + s2.i + i + i2 + i3 + i4 + i5 + i6 + i7 + i8 + i9 + s1.s + s2.s;
}
int caller43_stack() {
// CHECK: define i32 @caller43_stack()
// CHECK: %[[a:.*]] = alloca %struct.s43, align 16
// CHECK: %[[b:.*]] = alloca %struct.s43, align 16
// CHECK: %[[c:.*]] = bitcast %struct.s43* %[[a]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
// CHECK: %[[d:.*]] = bitcast %struct.s43* %[[b]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
// CHECK: call i32 @f43_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, %struct.s43* %[[a]], %struct.s43* %[[b]])
return f43_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g43, g43_2);
}
// rdar://13668927
// We should not split argument s1 between registers and stack.
__attribute__ ((noinline))
int f40_split(int i, int i2, int i3, int i4, int i5, int i6, int i7,
s40_no_align s1, s40_no_align s2) {
// CHECK: define i32 @f40_split(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, [1 x i32], [2 x i64] %s1.coerce, [2 x i64] %s2.coerce)
return s1.i + s2.i + i + i2 + i3 + i4 + i5 + i6 + i7 + s1.s + s2.s;
}
int caller40_split() {
// CHECK: define i32 @caller40_split()
// CHECK: call i32 @f40_split(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, [1 x i32] undef, [2 x i64] %{{.*}} [2 x i64] %{{.*}})
return f40_split(1, 2, 3, 4, 5, 6, 7, g40, g40_2);
}
__attribute__ ((noinline))
int f41_split(int i, int i2, int i3, int i4, int i5, int i6, int i7,
s41_with_align s1, s41_with_align s2) {
// CHECK: define i32 @f41_split(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, [1 x i32], i128 %s1.coerce, i128 %s2.coerce)
return s1.i + s2.i + i + i2 + i3 + i4 + i5 + i6 + i7 + s1.s + s2.s;
}
int caller41_split() {
// CHECK: define i32 @caller41_split()
// CHECK: call i32 @f41_split(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, [1 x i32] undef, i128 %{{.*}}, i128 %{{.*}})
return f41_split(1, 2, 3, 4, 5, 6, 7, g41, g41_2);
}
// Handle homogeneous aggregates properly in variadic functions.
struct HFA {
float a, b, c, d;
};
float test_hfa(int n, ...) {
// CHECK-LABEL: define float @test_hfa(i32 %n, ...)
// CHECK: [[THELIST:%.*]] = alloca i8*
// CHECK: [[CURLIST:%.*]] = load i8** [[THELIST]]
// HFA is not indirect, so occupies its full 16 bytes on the stack.
// CHECK: [[NEXTLIST:%.*]] = getelementptr i8* [[CURLIST]], i32 16
// CHECK: store i8* [[NEXTLIST]], i8** [[THELIST]]
// CHECK: bitcast i8* [[CURLIST]] to %struct.HFA*
__builtin_va_list thelist;
__builtin_va_start(thelist, n);
struct HFA h = __builtin_va_arg(thelist, struct HFA);
return h.d;
}
struct TooBigHFA {
float a, b, c, d, e;
};
float test_toobig_hfa(int n, ...) {
// CHECK-LABEL: define float @test_toobig_hfa(i32 %n, ...)
// CHECK: [[THELIST:%.*]] = alloca i8*
// CHECK: [[CURLIST:%.*]] = load i8** [[THELIST]]
// TooBigHFA is not actually an HFA, so gets passed indirectly. Only 8 bytes
// of stack consumed.
// CHECK: [[NEXTLIST:%.*]] = getelementptr i8* [[CURLIST]], i32 8
// CHECK: store i8* [[NEXTLIST]], i8** [[THELIST]]
// CHECK: [[HFAPTRPTR:%.*]] = bitcast i8* [[CURLIST]] to i8**
// CHECK: [[HFAPTR:%.*]] = load i8** [[HFAPTRPTR]]
// CHECK: bitcast i8* [[HFAPTR]] to %struct.TooBigHFA*
__builtin_va_list thelist;
__builtin_va_start(thelist, n);
struct TooBigHFA h = __builtin_va_arg(thelist, struct TooBigHFA);
return h.d;
}
struct HVA {
int32x4_t a, b;
};
int32x4_t test_hva(int n, ...) {
// CHECK-LABEL: define <4 x i32> @test_hva(i32 %n, ...)
// CHECK: [[THELIST:%.*]] = alloca i8*
// CHECK: [[CURLIST:%.*]] = load i8** [[THELIST]]
// HVA is not indirect, so occupies its full 16 bytes on the stack. but it
// must be properly aligned.
// CHECK: [[ALIGN0:%.*]] = getelementptr i8* [[CURLIST]], i32 15
// CHECK: [[ALIGN1:%.*]] = ptrtoint i8* [[ALIGN0]] to i64
// CHECK: [[ALIGN2:%.*]] = and i64 [[ALIGN1]], -16
// CHECK: [[ALIGNED_LIST:%.*]] = inttoptr i64 [[ALIGN2]] to i8*
// CHECK: [[NEXTLIST:%.*]] = getelementptr i8* [[ALIGNED_LIST]], i32 32
// CHECK: store i8* [[NEXTLIST]], i8** [[THELIST]]
// CHECK: bitcast i8* [[ALIGNED_LIST]] to %struct.HVA*
__builtin_va_list thelist;
__builtin_va_start(thelist, n);
struct HVA h = __builtin_va_arg(thelist, struct HVA);
return h.b;
}
struct TooBigHVA {
int32x4_t a, b, c, d, e;
};
int32x4_t test_toobig_hva(int n, ...) {
// CHECK-LABEL: define <4 x i32> @test_toobig_hva(i32 %n, ...)
// CHECK: [[THELIST:%.*]] = alloca i8*
// CHECK: [[CURLIST:%.*]] = load i8** [[THELIST]]
// TooBigHVA is not actually an HVA, so gets passed indirectly. Only 8 bytes
// of stack consumed.
// CHECK: [[NEXTLIST:%.*]] = getelementptr i8* [[CURLIST]], i32 8
// CHECK: store i8* [[NEXTLIST]], i8** [[THELIST]]
// CHECK: [[HVAPTRPTR:%.*]] = bitcast i8* [[CURLIST]] to i8**
// CHECK: [[HVAPTR:%.*]] = load i8** [[HVAPTRPTR]]
// CHECK: bitcast i8* [[HVAPTR]] to %struct.TooBigHVA*
__builtin_va_list thelist;
__builtin_va_start(thelist, n);
struct TooBigHVA h = __builtin_va_arg(thelist, struct TooBigHVA);
return h.d;
}

View File

@ -0,0 +1,55 @@
// REQUIRES: arm64-registered-target
// RUN: %clang_cc1 -triple arm64-none-linux-gnu \
// RUN: -O3 -S -emit-llvm -o - %s | FileCheck %s
int crc32b(int a, char b)
{
return __builtin_arm_crc32b(a,b);
// CHECK: [[T0:%[0-9]+]] = zext i8 %b to i32
// CHECK: call i32 @llvm.arm64.crc32b(i32 %a, i32 [[T0]])
}
int crc32cb(int a, char b)
{
return __builtin_arm_crc32cb(a,b);
// CHECK: [[T0:%[0-9]+]] = zext i8 %b to i32
// CHECK: call i32 @llvm.arm64.crc32cb(i32 %a, i32 [[T0]])
}
int crc32h(int a, short b)
{
return __builtin_arm_crc32h(a,b);
// CHECK: [[T0:%[0-9]+]] = zext i16 %b to i32
// CHECK: call i32 @llvm.arm64.crc32h(i32 %a, i32 [[T0]])
}
int crc32ch(int a, short b)
{
return __builtin_arm_crc32ch(a,b);
// CHECK: [[T0:%[0-9]+]] = zext i16 %b to i32
// CHECK: call i32 @llvm.arm64.crc32ch(i32 %a, i32 [[T0]])
}
int crc32w(int a, int b)
{
return __builtin_arm_crc32w(a,b);
// CHECK: call i32 @llvm.arm64.crc32w(i32 %a, i32 %b)
}
int crc32cw(int a, int b)
{
return __builtin_arm_crc32cw(a,b);
// CHECK: call i32 @llvm.arm64.crc32cw(i32 %a, i32 %b)
}
int crc32d(int a, long b)
{
return __builtin_arm_crc32d(a,b);
// CHECK: call i32 @llvm.arm64.crc32x(i32 %a, i64 %b)
}
int crc32cd(int a, long b)
{
return __builtin_arm_crc32cd(a,b);
// CHECK: call i32 @llvm.arm64.crc32cx(i32 %a, i64 %b)
}

View File

@ -0,0 +1,63 @@
// RUN: %clang_cc1 -O3 -triple arm64-apple-ios7 -ffreestanding -emit-llvm -o - %s | FileCheck %s
#include <arm_neon.h>
// CHECK-LABEL: @test_vdupb_lane_s8
int8_t test_vdupb_lane_s8(int8x8_t src) {
return vdupb_lane_s8(src, 2);
// CHECK: extractelement <8 x i8> %src, i32 2
}
// CHECK-LABEL: @test_vdupb_lane_u8
uint8_t test_vdupb_lane_u8(uint8x8_t src) {
return vdupb_lane_u8(src, 2);
// CHECK: extractelement <8 x i8> %src, i32 2
}
// CHECK-LABEL: @test_vduph_lane_s16
int16_t test_vduph_lane_s16(int16x4_t src) {
return vduph_lane_s16(src, 2);
// CHECK: extractelement <4 x i16> %src, i32 2
}
// CHECK-LABEL: @test_vduph_lane_u16
uint16_t test_vduph_lane_u16(uint16x4_t src) {
return vduph_lane_u16(src, 2);
// CHECK: extractelement <4 x i16> %src, i32 2
}
// CHECK-LABEL: @test_vdups_lane_s32
int32_t test_vdups_lane_s32(int32x2_t src) {
return vdups_lane_s32(src, 0);
// CHECK: extractelement <2 x i32> %src, i32 0
}
// CHECK-LABEL: @test_vdups_lane_u32
uint32_t test_vdups_lane_u32(uint32x2_t src) {
return vdups_lane_u32(src, 0);
// CHECK: extractelement <2 x i32> %src, i32 0
}
// CHECK-LABEL: @test_vdups_lane_f32
float32_t test_vdups_lane_f32(float32x2_t src) {
return vdups_lane_f32(src, 0);
// CHECK: extractelement <2 x float> %src, i32 0
}
// CHECK-LABEL: @test_vdupd_lane_s64
int64_t test_vdupd_lane_s64(int64x1_t src) {
return vdupd_lane_s64(src, 0);
// CHECK: extractelement <1 x i64> %src, i32 0
}
// CHECK-LABEL: @test_vdupd_lane_u64
uint64_t test_vdupd_lane_u64(uint64x1_t src) {
return vdupd_lane_u64(src, 0);
// CHECK: extractelement <1 x i64> %src, i32 0
}
// CHECK-LABEL: @test_vdupd_lane_f64
float64_t test_vdupd_lane_f64(float64x1_t src) {
return vdupd_lane_f64(src, 0);
// CHECK: extractelement <1 x double> %src, i32 0
}

View File

@ -0,0 +1,535 @@
// REQUIRES: arm64-registered-target
// RUN: %clang_cc1 -triple arm64-apple-ios7.0 \
// RUN: -S -O1 -o - -ffreestanding %s | FileCheck %s
// We're explicitly using arm_neon.h here: some types probably don't match
// the ACLE definitions, but we want to check current codegen.
#include <arm_neon.h>
float test_vrsqrtss_f32(float a, float b) {
// CHECK: test_vrsqrtss_f32
return vrsqrtss_f32(a, b);
// CHECK: frsqrts {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
}
double test_vrsqrtsd_f64(double a, double b) {
// CHECK: test_vrsqrtsd_f64
return vrsqrtsd_f64(a, b);
// CHECK: frsqrts {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
}
int64x1_t test_vrshl_s64(int64x1_t a, int64x1_t b) {
// CHECK: test_vrshl_s64
return vrshl_s64(a, b);
// CHECK: srshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
}
uint64x1_t test_vrshl_u64(uint64x1_t a, int64x1_t b) {
// CHECK: test_vrshl_u64
return vrshl_u64(a, b);
// CHECK: urshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vrshld_s64
int64_t test_vrshld_s64(int64_t a, int64_t b) {
return vrshld_s64(a, b);
// CHECK: srshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vrshld_u64
uint64_t test_vrshld_u64(uint64_t a, uint64_t b) {
return vrshld_u64(a, b);
// CHECK: urshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vqrshlb_s8
int8_t test_vqrshlb_s8(int8_t a, int8_t b) {
return vqrshlb_s8(a, b);
// CHECK: sqrshl.8b {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqrshlh_s16
int16_t test_vqrshlh_s16(int16_t a, int16_t b) {
return vqrshlh_s16(a, b);
// CHECK: sqrshl.4h {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqrshls_s32
int32_t test_vqrshls_s32(int32_t a, int32_t b) {
return vqrshls_s32(a, b);
// CHECK: sqrshl {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vqrshld_s64
int64_t test_vqrshld_s64(int64_t a, int64_t b) {
return vqrshld_s64(a, b);
// CHECK: sqrshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vqrshlb_u8
uint8_t test_vqrshlb_u8(uint8_t a, uint8_t b) {
return vqrshlb_u8(a, b);
// CHECK: uqrshl.8b {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqrshlh_u16
uint16_t test_vqrshlh_u16(uint16_t a, uint16_t b) {
return vqrshlh_u16(a, b);
// CHECK: uqrshl.4h {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqrshls_u32
uint32_t test_vqrshls_u32(uint32_t a, uint32_t b) {
return vqrshls_u32(a, b);
// CHECK: uqrshl {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vqrshld_u64
uint64_t test_vqrshld_u64(uint64_t a, uint64_t b) {
return vqrshld_u64(a, b);
// CHECK: uqrshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vqshlb_s8
int8_t test_vqshlb_s8(int8_t a, int8_t b) {
return vqshlb_s8(a, b);
// CHECK: sqshl.8b {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqshlh_s16
int16_t test_vqshlh_s16(int16_t a, int16_t b) {
return vqshlh_s16(a, b);
// CHECK: sqshl.4h {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqshls_s32
int32_t test_vqshls_s32(int32_t a, int32_t b) {
return vqshls_s32(a, b);
// CHECK: sqshl {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vqshld_s64
int64_t test_vqshld_s64(int64_t a, int64_t b) {
return vqshld_s64(a, b);
// CHECK: sqshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vqshlb_u8
uint8_t test_vqshlb_u8(uint8_t a, uint8_t b) {
return vqshlb_u8(a, b);
// CHECK: uqshl.8b {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqshlh_u16
uint16_t test_vqshlh_u16(uint16_t a, uint16_t b) {
return vqshlh_u16(a, b);
// CHECK: uqshl.4h {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqshls_u32
uint32_t test_vqshls_u32(uint32_t a, uint32_t b) {
return vqshls_u32(a, b);
// CHECK: uqshl {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vqshld_u64
uint64_t test_vqshld_u64(uint64_t a, uint64_t b) {
return vqshld_u64(a, b);
// CHECK: uqshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vshld_u64
uint64_t test_vshld_u64(uint64_t a, uint64_t b) {
return vshld_u64(a, b);
// CHECK: ushl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vshld_s64
int64_t test_vshld_s64(int64_t a, int64_t b) {
return vshld_s64(a, b);
// CHECK: sshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vqdmullh_s16
int32_t test_vqdmullh_s16(int16_t a, int16_t b) {
return vqdmullh_s16(a, b);
// CHECK: sqdmull.4s {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqdmulls_s32
int64_t test_vqdmulls_s32(int32_t a, int32_t b) {
return vqdmulls_s32(a, b);
// CHECK: sqdmull {{d[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vqaddb_s8
int8_t test_vqaddb_s8(int8_t a, int8_t b) {
return vqaddb_s8(a, b);
// CHECK: sqadd.8b {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqaddh_s16
int16_t test_vqaddh_s16(int16_t a, int16_t b) {
return vqaddh_s16(a, b);
// CHECK: sqadd.4h {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqadds_s32
int32_t test_vqadds_s32(int32_t a, int32_t b) {
return vqadds_s32(a, b);
// CHECK: sqadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vqaddd_s64
int64_t test_vqaddd_s64(int64_t a, int64_t b) {
return vqaddd_s64(a, b);
// CHECK: sqadd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vqaddb_u8
uint8_t test_vqaddb_u8(uint8_t a, uint8_t b) {
return vqaddb_u8(a, b);
// CHECK: uqadd.8b {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqaddh_u16
uint16_t test_vqaddh_u16(uint16_t a, uint16_t b) {
return vqaddh_u16(a, b);
// CHECK: uqadd.4h {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqadds_u32
uint32_t test_vqadds_u32(uint32_t a, uint32_t b) {
return vqadds_u32(a, b);
// CHECK: uqadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vqaddd_u64
uint64_t test_vqaddd_u64(uint64_t a, uint64_t b) {
return vqaddd_u64(a, b);
// CHECK: uqadd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vqsubb_s8
int8_t test_vqsubb_s8(int8_t a, int8_t b) {
return vqsubb_s8(a, b);
// CHECK: sqsub.8b {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqsubh_s16
int16_t test_vqsubh_s16(int16_t a, int16_t b) {
return vqsubh_s16(a, b);
// CHECK: sqsub.4h {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqsubs_s32
int32_t test_vqsubs_s32(int32_t a, int32_t b) {
return vqsubs_s32(a, b);
// CHECK: sqsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vqsubd_s64
int64_t test_vqsubd_s64(int64_t a, int64_t b) {
return vqsubd_s64(a, b);
// CHECK: sqsub {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vqsubb_u8
uint8_t test_vqsubb_u8(uint8_t a, uint8_t b) {
return vqsubb_u8(a, b);
// CHECK: uqsub.8b {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqsubh_u16
uint16_t test_vqsubh_u16(uint16_t a, uint16_t b) {
return vqsubh_u16(a, b);
// CHECK: uqsub.4h {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqsubs_u32
uint32_t test_vqsubs_u32(uint32_t a, uint32_t b) {
return vqsubs_u32(a, b);
// CHECK: uqsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vqsubd_u64
uint64_t test_vqsubd_u64(uint64_t a, uint64_t b) {
return vqsubd_u64(a, b);
// CHECK: uqsub {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vqmovnh_s16
int8_t test_vqmovnh_s16(int16_t a) {
return vqmovnh_s16(a);
// CHECK: sqxtn.8b {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqmovnh_u16
uint8_t test_vqmovnh_u16(uint16_t a) {
return vqmovnh_u16(a);
// CHECK: uqxtn.8b {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqmovns_s32
int16_t test_vqmovns_s32(int32_t a) {
return vqmovns_s32(a);
// CHECK: sqxtn.4h {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqmovns_u32
uint16_t test_vqmovns_u32(uint32_t a) {
return vqmovns_u32(a);
// CHECK: uqxtn.4h {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqmovnd_s64
int32_t test_vqmovnd_s64(int64_t a) {
return vqmovnd_s64(a);
// CHECK: sqxtn {{s[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vqmovnd_u64
uint32_t test_vqmovnd_u64(uint64_t a) {
return vqmovnd_u64(a);
// CHECK: uqxtn {{s[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vqmovunh_s16
int8_t test_vqmovunh_s16(int16_t a) {
return vqmovunh_s16(a);
// CHECK: sqxtun.8b {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqmovuns_s32
int16_t test_vqmovuns_s32(int32_t a) {
return vqmovuns_s32(a);
// CHECK: sqxtun.4h {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqmovund_s64
int32_t test_vqmovund_s64(int64_t a) {
return vqmovund_s64(a);
// CHECK: sqxtun {{s[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vqabsb_s8
int8_t test_vqabsb_s8(int8_t a) {
return vqabsb_s8(a);
// CHECK: sqabs.8b {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqabsh_s16
int16_t test_vqabsh_s16(int16_t a) {
return vqabsh_s16(a);
// CHECK: sqabs.4h {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqabss_s32
int32_t test_vqabss_s32(int32_t a) {
return vqabss_s32(a);
// CHECK: sqabs {{s[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vqabsd_s64
int64_t test_vqabsd_s64(int64_t a) {
return vqabsd_s64(a);
// CHECK: sqabs {{d[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vqnegb_s8
int8_t test_vqnegb_s8(int8_t a) {
return vqnegb_s8(a);
// CHECK: sqneg.8b {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqnegh_s16
int16_t test_vqnegh_s16(int16_t a) {
return vqnegh_s16(a);
// CHECK: sqneg.4h {{v[0-9]+}}, {{v[0-9]+}}
}
// CHECK: test_vqnegs_s32
int32_t test_vqnegs_s32(int32_t a) {
return vqnegs_s32(a);
// CHECK: sqneg {{s[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vqnegd_s64
int64_t test_vqnegd_s64(int64_t a) {
return vqnegd_s64(a);
// CHECK: sqneg {{d[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vcvts_n_f32_s32
float32_t test_vcvts_n_f32_s32(int32_t a) {
return vcvts_n_f32_s32(a, 3);
// CHECK: scvtf {{s[0-9]+}}, {{s[0-9]+}}, #3
}
// CHECK: test_vcvts_n_f32_u32
float32_t test_vcvts_n_f32_u32(uint32_t a) {
return vcvts_n_f32_u32(a, 3);
// CHECK: ucvtf {{s[0-9]+}}, {{s[0-9]+}}, #3
}
// CHECK: test_vcvtd_n_f64_s64
float64_t test_vcvtd_n_f64_s64(int64_t a) {
return vcvtd_n_f64_s64(a, 3);
// CHECK: scvtf {{d[0-9]+}}, {{d[0-9]+}}, #3
}
// CHECK: test_vcvtd_n_f64_u64
float64_t test_vcvtd_n_f64_u64(uint64_t a) {
return vcvtd_n_f64_u64(a, 3);
// CHECK: ucvtf {{d[0-9]+}}, {{d[0-9]+}}, #3
}
// CHECK: test_vcvts_n_s32_f32
int32_t test_vcvts_n_s32_f32(float32_t a) {
return vcvts_n_s32_f32(a, 3);
// CHECK: fcvtzs {{s[0-9]+}}, {{s[0-9]+}}, #3
}
// CHECK: test_vcvts_n_u32_f32
uint32_t test_vcvts_n_u32_f32(float32_t a) {
return vcvts_n_u32_f32(a, 3);
// CHECK: fcvtzu {{s[0-9]+}}, {{s[0-9]+}}, #3
}
// CHECK: test_vcvtd_n_s64_f64
int64_t test_vcvtd_n_s64_f64(float64_t a) {
return vcvtd_n_s64_f64(a, 3);
// CHECK: fcvtzs {{d[0-9]+}}, {{d[0-9]+}}, #3
}
// CHECK: test_vcvtd_n_u64_f64
uint64_t test_vcvtd_n_u64_f64(float64_t a) {
return vcvtd_n_u64_f64(a, 3);
// CHECK: fcvtzu {{d[0-9]+}}, {{d[0-9]+}}, #3
}
// CHECK: test_vcvtas_s32_f32
int32_t test_vcvtas_s32_f32(float32_t a) {
return vcvtas_s32_f32(a);
// CHECK: fcvtas {{w[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vcvtas_u32_f32
uint32_t test_vcvtas_u32_f32(float32_t a) {
return vcvtas_u32_f32(a);
// CHECK: fcvtau {{w[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vcvtad_s64_f64
int64_t test_vcvtad_s64_f64(float64_t a) {
return vcvtad_s64_f64(a);
// CHECK: fcvtas {{x[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vcvtad_u64_f64
uint64_t test_vcvtad_u64_f64(float64_t a) {
return vcvtad_u64_f64(a);
// CHECK: fcvtau {{x[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vcvtms_s32_f32
int32_t test_vcvtms_s32_f32(float32_t a) {
return vcvtms_s32_f32(a);
// CHECK: fcvtms {{w[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vcvtms_u32_f32
uint32_t test_vcvtms_u32_f32(float32_t a) {
return vcvtms_u32_f32(a);
// CHECK: fcvtmu {{w[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vcvtmd_s64_f64
int64_t test_vcvtmd_s64_f64(float64_t a) {
return vcvtmd_s64_f64(a);
// CHECK: fcvtms {{x[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vcvtmd_u64_f64
uint64_t test_vcvtmd_u64_f64(float64_t a) {
return vcvtmd_u64_f64(a);
// CHECK: fcvtmu {{x[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vcvtns_s32_f32
int32_t test_vcvtns_s32_f32(float32_t a) {
return vcvtns_s32_f32(a);
// CHECK: fcvtns {{w[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vcvtns_u32_f32
uint32_t test_vcvtns_u32_f32(float32_t a) {
return vcvtns_u32_f32(a);
// CHECK: fcvtnu {{w[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vcvtnd_s64_f64
int64_t test_vcvtnd_s64_f64(float64_t a) {
return vcvtnd_s64_f64(a);
// CHECK: fcvtns {{x[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vcvtnd_u64_f64
uint64_t test_vcvtnd_u64_f64(float64_t a) {
return vcvtnd_u64_f64(a);
// CHECK: fcvtnu {{x[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vcvtps_s32_f32
int32_t test_vcvtps_s32_f32(float32_t a) {
return vcvtps_s32_f32(a);
// CHECK: fcvtps {{w[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vcvtps_u32_f32
uint32_t test_vcvtps_u32_f32(float32_t a) {
return vcvtps_u32_f32(a);
// CHECK: fcvtpu {{w[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vcvtpd_s64_f64
int64_t test_vcvtpd_s64_f64(float64_t a) {
return vcvtpd_s64_f64(a);
// CHECK: fcvtps {{x[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vcvtpd_u64_f64
uint64_t test_vcvtpd_u64_f64(float64_t a) {
return vcvtpd_u64_f64(a);
// CHECK: fcvtpu {{x[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vcvtxd_f32_f64
float32_t test_vcvtxd_f32_f64(float64_t a) {
return vcvtxd_f32_f64(a);
// CHECK: fcvtxn {{s[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vabds_f32
float32_t test_vabds_f32(float32_t a, float32_t b) {
return vabds_f32(a, b);
// CHECK: fabd {{s[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vabdd_f64
float64_t test_vabdd_f64(float64_t a, float64_t b) {
return vabdd_f64(a, b);
// CHECK: fabd {{d[0-9]+}}, {{d[0-9]+}}
}
// CHECK: test_vmulxs_f32
float32_t test_vmulxs_f32(float32_t a, float32_t b) {
return vmulxs_f32(a, b);
// CHECK: fmulx {{s[0-9]+}}, {{s[0-9]+}}
}
// CHECK: test_vmulxd_f64
float64_t test_vmulxd_f64(float64_t a, float64_t b) {
return vmulxd_f64(a, b);
// CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}
}

View File

@ -0,0 +1,51 @@
// RUN: %clang_cc1 -triple arm64-apple-ios7 -ffreestanding -emit-llvm -o - %s | FileCheck %s
#include <arm_neon.h>
int32x2_t rnd1(float32x2_t a) { return vrnd_f32(a); }
// CHECK: call <2 x float> @llvm.trunc.v2f32(<2 x float>
int32x4_t rnd3(float32x4_t a) { return vrndq_f32(a); }
// CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float>
int64x2_t rnd5(float64x2_t a) { return vrndq_f64(a); }
// CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double>
int32x2_t rnd7(float32x2_t a) { return vrndn_f32(a); }
// CHECK: call <2 x float> @llvm.arm64.neon.frintn.v2f32(<2 x float>
int32x4_t rnd8(float32x4_t a) { return vrndnq_f32(a); }
// CHECK: call <4 x float> @llvm.arm64.neon.frintn.v4f32(<4 x float>
int64x2_t rnd9(float64x2_t a) { return vrndnq_f64(a); }
// CHECK: call <2 x double> @llvm.arm64.neon.frintn.v2f64(<2 x double>
int64x2_t rnd10(float64x2_t a) { return vrndnq_f64(a); }
// CHECK: call <2 x double> @llvm.arm64.neon.frintn.v2f64(<2 x double>
int32x2_t rnd11(float32x2_t a) { return vrndm_f32(a); }
// CHECK: call <2 x float> @llvm.floor.v2f32(<2 x float>
int32x4_t rnd12(float32x4_t a) { return vrndmq_f32(a); }
// CHECK: call <4 x float> @llvm.floor.v4f32(<4 x float>
int64x2_t rnd13(float64x2_t a) { return vrndmq_f64(a); }
// CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double>
int64x2_t rnd14(float64x2_t a) { return vrndmq_f64(a); }
// CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double>
int32x2_t rnd15(float32x2_t a) { return vrndp_f32(a); }
// CHECK: call <2 x float> @llvm.ceil.v2f32(<2 x float>
int32x4_t rnd16(float32x4_t a) { return vrndpq_f32(a); }
// CHECK: call <4 x float> @llvm.ceil.v4f32(<4 x float>
int64x2_t rnd18(float64x2_t a) { return vrndpq_f64(a); }
// CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double>
int32x2_t rnd19(float32x2_t a) { return vrnda_f32(a); }
// CHECK: call <2 x float> @llvm.round.v2f32(<2 x float>
int32x4_t rnd20(float32x4_t a) { return vrndaq_f32(a); }
// CHECK: call <4 x float> @llvm.round.v4f32(<4 x float>
int64x2_t rnd22(float64x2_t a) { return vrndaq_f64(a); }
// CHECK: call <2 x double> @llvm.round.v2f64(<2 x double>
int32x2_t rnd23(float32x2_t a) { return vrndx_f32(a); }
// CHECK: call <2 x float> @llvm.rint.v2f32(<2 x float>
int32x4_t rnd24(float32x4_t a) { return vrndxq_f32(a); }
// CHECK: call <4 x float> @llvm.rint.v4f32(<4 x float>
int64x2_t rnd25(float64x2_t a) { return vrndxq_f64(a); }
// CHECK: call <2 x double> @llvm.rint.v2f64(<2 x double>

View File

@ -0,0 +1,43 @@
// RUN: %clang_cc1 -triple arm64-apple-ios7.0 -ffreestanding -emit-llvm -O1 -o - %s | FileCheck %s
#include <arm_neon.h>
uint32x2_t test_vrsqrte_u32(uint32x2_t in) {
// CHECK-LABEL: @test_vrsqrte_u32
// CHECK: call <2 x i32> @llvm.arm64.neon.ursqrte.v2i32(<2 x i32> %in)
return vrsqrte_u32(in);
}
float32x2_t test_vrsqrte_f32(float32x2_t in) {
// CHECK-LABEL: @test_vrsqrte_f32
// CHECK: call <2 x float> @llvm.arm64.neon.frsqrte.v2f32(<2 x float> %in)
return vrsqrte_f32(in);
}
uint32x4_t test_vrsqrteq_u32(uint32x4_t in) {
// CHECK-LABEL: @test_vrsqrteq_u32
// CHECK: call <4 x i32> @llvm.arm64.neon.ursqrte.v4i32(<4 x i32> %in)
return vrsqrteq_u32(in);
}
float32x4_t test_vrsqrteq_f32(float32x4_t in) {
// CHECK-LABEL: @test_vrsqrteq_f32
// CHECK: call <4 x float> @llvm.arm64.neon.frsqrte.v4f32(<4 x float> %in)
return vrsqrteq_f32(in);
}
float32x2_t test_vrsqrts_f32(float32x2_t est, float32x2_t val) {
// CHECK-LABEL: @test_vrsqrts_f32
// CHECK: call <2 x float> @llvm.arm64.neon.frsqrts.v2f32(<2 x float> %est, <2 x float> %val)
return vrsqrts_f32(est, val);
}
float32x4_t test_vrsqrtsq_f32(float32x4_t est, float32x4_t val) {
// CHECK-LABEL: @test_vrsqrtsq_f32
// CHECK: call <4 x float> @llvm.arm64.neon.frsqrts.v4f32(<4 x float> %est, <4 x float> %val)
return vrsqrtsq_f32(est, val);
}

View File

@ -0,0 +1,93 @@
// RUN: %clang_cc1 -triple arm64-apple-ios7.0 -ffreestanding -Os -S -o - %s | FileCheck %s
// REQUIRES: arm64-registered-target
#include <arm_neon.h>
uint8x16_t test_aese(uint8x16_t data, uint8x16_t key) {
// CHECK-LABEL: test_aese:
// CHECK: aese.16b v0, v1
return vaeseq_u8(data, key);
}
uint8x16_t test_aesd(uint8x16_t data, uint8x16_t key) {
// CHECK-LABEL: test_aesd:
// CHECK: aesd.16b v0, v1
return vaesdq_u8(data, key);
}
uint8x16_t test_aesmc(uint8x16_t data, uint8x16_t key) {
// CHECK-LABEL: test_aesmc:
// CHECK: aesmc.16b v0, v0
return vaesmcq_u8(data);
}
uint8x16_t test_aesimc(uint8x16_t data, uint8x16_t key) {
// CHECK-LABEL: test_aesimc:
// CHECK: aesimc.16b v0, v0
return vaesimcq_u8(data);
}
uint32x4_t test_sha1c(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) {
// CHECK-LABEL: test_sha1c:
// CHECK: fmov [[HASH_E:s[0-9]+]], w0
// CHECK: sha1c.4s q0, [[HASH_E]], v1
return vsha1cq_u32(hash_abcd, hash_e, wk);
}
uint32x4_t test_sha1p(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) {
// CHECK-LABEL: test_sha1p:
// CHECK: fmov [[HASH_E:s[0-9]+]], w0
// CHECK: sha1p.4s q0, [[HASH_E]], v1
return vsha1pq_u32(hash_abcd, hash_e, wk);
}
uint32x4_t test_sha1m(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) {
// CHECK-LABEL: test_sha1m:
// CHECK: fmov [[HASH_E:s[0-9]+]], w0
// CHECK: sha1m.4s q0, [[HASH_E]], v1
return vsha1mq_u32(hash_abcd, hash_e, wk);
}
uint32_t test_sha1h(uint32_t hash_e) {
// CHECK-LABEL: test_sha1h:
// CHECK: fmov [[HASH_E:s[0-9]+]], w0
// CHECK: sha1h [[RES:s[0-9]+]], [[HASH_E]]
// CHECK: fmov w0, [[RES]]
return vsha1h_u32(hash_e);
}
uint32x4_t test_sha1su0(uint32x4_t wk0_3, uint32x4_t wk4_7, uint32x4_t wk8_11) {
// CHECK-LABEL: test_sha1su0:
// CHECK: sha1su0.4s v0, v1, v2
return vsha1su0q_u32(wk0_3, wk4_7, wk8_11);
}
uint32x4_t test_sha1su1(uint32x4_t wk0_3, uint32x4_t wk12_15) {
// CHECK-LABEL: test_sha1su1:
// CHECK: sha1su1.4s v0, v1
return vsha1su1q_u32(wk0_3, wk12_15);
}
uint32x4_t test_sha256h(uint32x4_t hash_abcd, uint32x4_t hash_efgh, uint32x4_t wk) {
// CHECK-LABEL: test_sha256h:
// CHECK: sha256h.4s q0, q1, v2
return vsha256hq_u32(hash_abcd, hash_efgh, wk);
}
uint32x4_t test_sha256h2(uint32x4_t hash_efgh, uint32x4_t hash_abcd, uint32x4_t wk) {
// CHECK-LABEL: test_sha256h2:
// CHECK: sha256h2.4s q0, q1, v2
return vsha256h2q_u32(hash_efgh, hash_abcd, wk);
}
uint32x4_t test_sha256su0(uint32x4_t w0_3, uint32x4_t w4_7) {
// CHECK-LABEL: test_sha256su0:
// CHECK: sha256su0.4s v0, v1
return vsha256su0q_u32(w0_3, w4_7);
}
uint32x4_t test_sha256su1(uint32x4_t w0_3, uint32x4_t w8_11, uint32x4_t w12_15) {
// CHECK-LABEL: test_sha256su1:
// CHECK: sha256su1.4s v0, v1, v2
return vsha256su1q_u32(w0_3, w8_11, w12_15);
}

View File

@ -0,0 +1,559 @@
// RUN: %clang_cc1 -triple arm64-apple-ios7.0 -ffreestanding -Os -S -o - %s | FileCheck %s
// REQUIRES: arm64-registered-target
#include <arm_neon.h>
int16x8_t test_vaddw_high_s8(int16x8_t lhs, int8x16_t rhs) {
// CHECK: saddw2.8h
return vaddw_high_s8(lhs, rhs);
}
int32x4_t test_vaddw_high_s16(int32x4_t lhs, int16x8_t rhs) {
// CHECK: saddw2.4s
return vaddw_high_s16(lhs, rhs);
}
int64x2_t test_vaddw_high_s32(int64x2_t lhs, int32x4_t rhs) {
// CHECK: saddw2.2d
return vaddw_high_s32(lhs, rhs);
}
uint16x8_t test_vaddw_high_u8(uint16x8_t lhs, uint8x16_t rhs) {
// CHECK: uaddw2.8h
return vaddw_high_u8(lhs, rhs);
}
uint32x4_t test_vaddw_high_u16(uint32x4_t lhs, uint16x8_t rhs) {
// CHECK: uaddw2.4s
return vaddw_high_u16(lhs, rhs);
}
uint64x2_t test_vaddw_high_u32(uint64x2_t lhs, uint32x4_t rhs) {
// CHECK: uaddw2.2d
return vaddw_high_u32(lhs, rhs);
}
int16x8_t test_vsubw_high_s8(int16x8_t lhs, int8x16_t rhs) {
// CHECK: ssubw2.8h
return vsubw_high_s8(lhs, rhs);
}
int32x4_t test_vsubw_high_s16(int32x4_t lhs, int16x8_t rhs) {
// CHECK: ssubw2.4s
return vsubw_high_s16(lhs, rhs);
}
int64x2_t test_vsubw_high_s32(int64x2_t lhs, int32x4_t rhs) {
// CHECK: ssubw2.2d
return vsubw_high_s32(lhs, rhs);
}
uint16x8_t test_vsubw_high_u8(uint16x8_t lhs, uint8x16_t rhs) {
// CHECK: usubw2.8h
return vsubw_high_u8(lhs, rhs);
}
uint32x4_t test_vsubw_high_u16(uint32x4_t lhs, uint16x8_t rhs) {
// CHECK: usubw2.4s
return vsubw_high_u16(lhs, rhs);
}
uint64x2_t test_vsubw_high_u32(uint64x2_t lhs, uint32x4_t rhs) {
// CHECK: usubw2.2d
return vsubw_high_u32(lhs, rhs);
}
int16x8_t test_vabdl_high_s8(int8x16_t lhs, int8x16_t rhs) {
// CHECK: sabdl2.8h
return vabdl_high_s8(lhs, rhs);
}
int32x4_t test_vabdl_high_s16(int16x8_t lhs, int16x8_t rhs) {
// CHECK: sabdl2.4s
return vabdl_high_s16(lhs, rhs);
}
int64x2_t test_vabdl_high_s32(int32x4_t lhs, int32x4_t rhs) {
// CHECK: sabdl2.2d
return vabdl_high_s32(lhs, rhs);
}
uint16x8_t test_vabdl_high_u8(uint8x16_t lhs, uint8x16_t rhs) {
// CHECK: uabdl2.8h
return vabdl_high_u8(lhs, rhs);
}
uint32x4_t test_vabdl_high_u16(uint16x8_t lhs, uint16x8_t rhs) {
// CHECK: uabdl2.4s
return vabdl_high_u16(lhs, rhs);
}
uint64x2_t test_vabdl_high_u32(uint32x4_t lhs, uint32x4_t rhs) {
// CHECK: uabdl2.2d
return vabdl_high_u32(lhs, rhs);
}
int16x8_t test_vabal_high_s8(int16x8_t accum, int8x16_t lhs, int8x16_t rhs) {
// CHECK: sabal2.8h
return vabal_high_s8(accum, lhs, rhs);
}
int32x4_t test_vabal_high_s16(int32x4_t accum, int16x8_t lhs, int16x8_t rhs) {
// CHECK: sabal2.4s
return vabal_high_s16(accum, lhs, rhs);
}
int64x2_t test_vabal_high_s32(int64x2_t accum, int32x4_t lhs, int32x4_t rhs) {
// CHECK: sabal2.2d
return vabal_high_s32(accum, lhs, rhs);
}
uint16x8_t test_vabal_high_u8(uint16x8_t accum, uint8x16_t lhs, uint8x16_t rhs) {
// CHECK: uabal2.8h
return vabal_high_u8(accum, lhs, rhs);
}
uint32x4_t test_vabal_high_u16(uint32x4_t accum, uint16x8_t lhs, uint16x8_t rhs) {
// CHECK: uabal2.4s
return vabal_high_u16(accum, lhs, rhs);
}
uint64x2_t test_vabal_high_u32(uint64x2_t accum, uint32x4_t lhs, uint32x4_t rhs) {
// CHECK: uabal2.2d
return vabal_high_u32(accum, lhs, rhs);
}
int32x4_t test_vqdmlal_high_s16(int32x4_t accum, int16x8_t lhs, int16x8_t rhs) {
// CHECK: sqdmlal2.4s
return vqdmlal_high_s16(accum, lhs, rhs);
}
int64x2_t test_vqdmlal_high_s32(int64x2_t accum, int32x4_t lhs, int32x4_t rhs) {
// CHECK: sqdmlal2.2d
return vqdmlal_high_s32(accum, lhs, rhs);
}
int32x4_t test_vqdmlsl_high_s16(int32x4_t accum, int16x8_t lhs, int16x8_t rhs) {
// CHECK: sqdmlsl2.4s
return vqdmlsl_high_s16(accum, lhs, rhs);
}
int64x2_t test_vqdmlsl_high_s32(int64x2_t accum, int32x4_t lhs, int32x4_t rhs) {
// CHECK: sqdmlsl2.2d
return vqdmlsl_high_s32(accum, lhs, rhs);
}
int32x4_t test_vqdmull_high_s16(int16x8_t lhs, int16x8_t rhs) {
// CHECK: sqdmull2.4s
return vqdmull_high_s16(lhs, rhs);
}
int64x2_t test_vqdmull_high_s32(int32x4_t lhs, int32x4_t rhs) {
// CHECK: sqdmull2.2d
return vqdmull_high_s32(lhs, rhs);
}
int16x8_t test_vshll_high_n_s8(int8x16_t in) {
// CHECK: sshll2.8h
return vshll_high_n_s8(in, 7);
}
int32x4_t test_vshll_high_n_s16(int16x8_t in) {
// CHECK: sshll2.4s
return vshll_high_n_s16(in, 15);
}
int64x2_t test_vshll_high_n_s32(int32x4_t in) {
// CHECK: sshll2.2d
return vshll_high_n_s32(in, 31);
}
int16x8_t test_vshll_high_n_u8(int8x16_t in) {
// CHECK: ushll2.8h
return vshll_high_n_u8(in, 7);
}
int32x4_t test_vshll_high_n_u16(int16x8_t in) {
// CHECK: ushll2.4s
return vshll_high_n_u16(in, 15);
}
int64x2_t test_vshll_high_n_u32(int32x4_t in) {
// CHECK: ushll2.2d
return vshll_high_n_u32(in, 31);
}
int16x8_t test_vshll_high_n_s8_max(int8x16_t in) {
// CHECK: shll2.8h
return vshll_high_n_s8(in, 8);
}
int32x4_t test_vshll_high_n_s16_max(int16x8_t in) {
// CHECK: shll2.4s
return vshll_high_n_s16(in, 16);
}
int64x2_t test_vshll_high_n_s32_max(int32x4_t in) {
// CHECK: shll2.2d
return vshll_high_n_s32(in, 32);
}
int16x8_t test_vshll_high_n_u8_max(int8x16_t in) {
// CHECK: shll2.8h
return vshll_high_n_u8(in, 8);
}
int32x4_t test_vshll_high_n_u16_max(int16x8_t in) {
// CHECK: shll2.4s
return vshll_high_n_u16(in, 16);
}
int64x2_t test_vshll_high_n_u32_max(int32x4_t in) {
// CHECK: shll2.2d
return vshll_high_n_u32(in, 32);
}
int16x8_t test_vsubl_high_s8(int8x16_t lhs, int8x16_t rhs) {
// CHECK: ssubl2.8h
return vsubl_high_s8(lhs, rhs);
}
int32x4_t test_vsubl_high_s16(int16x8_t lhs, int16x8_t rhs) {
// CHECK: ssubl2.4s
return vsubl_high_s16(lhs, rhs);
}
int64x2_t test_vsubl_high_s32(int32x4_t lhs, int32x4_t rhs) {
// CHECK: ssubl2.2d
return vsubl_high_s32(lhs, rhs);
}
uint16x8_t test_vsubl_high_u8(uint8x16_t lhs, uint8x16_t rhs) {
// CHECK: usubl2.8h
return vsubl_high_u8(lhs, rhs);
}
uint32x4_t test_vsubl_high_u16(uint16x8_t lhs, uint16x8_t rhs) {
// CHECK: usubl2.4s
return vsubl_high_u16(lhs, rhs);
}
uint64x2_t test_vsubl_high_u32(uint32x4_t lhs, uint32x4_t rhs) {
// CHECK: usubl2.2d
return vsubl_high_u32(lhs, rhs);
}
int8x16_t test_vrshrn_high_n_s16(int8x8_t lowpart, int16x8_t input) {
// CHECK: rshrn2.16b
return vrshrn_high_n_s16(lowpart, input, 2);
}
int16x8_t test_vrshrn_high_n_s32(int16x4_t lowpart, int32x4_t input) {
// CHECK: rshrn2.8h
return vrshrn_high_n_s32(lowpart, input, 2);
}
int32x4_t test_vrshrn_high_n_s64(int32x2_t lowpart, int64x2_t input) {
// CHECK: shrn2.4s
return vrshrn_high_n_s64(lowpart, input, 2);
}
uint8x16_t test_vrshrn_high_n_u16(uint8x8_t lowpart, uint16x8_t input) {
// CHECK: rshrn2.16b
return vrshrn_high_n_u16(lowpart, input, 2);
}
uint16x8_t test_vrshrn_high_n_u32(uint16x4_t lowpart, uint32x4_t input) {
// CHECK: rshrn2.8h
return vrshrn_high_n_u32(lowpart, input, 2);
}
uint32x4_t test_vrshrn_high_n_u64(uint32x2_t lowpart, uint64x2_t input) {
// CHECK: rshrn2.4s
return vrshrn_high_n_u64(lowpart, input, 2);
}
int8x16_t test_vshrn_high_n_s16(int8x8_t lowpart, int16x8_t input) {
// CHECK: shrn2.16b
return vshrn_high_n_s16(lowpart, input, 2);
}
int16x8_t test_vshrn_high_n_s32(int16x4_t lowpart, int32x4_t input) {
// CHECK: shrn2.8h
return vshrn_high_n_s32(lowpart, input, 2);
}
int32x4_t test_vshrn_high_n_s64(int32x2_t lowpart, int64x2_t input) {
// CHECK: shrn2.4s
return vshrn_high_n_s64(lowpart, input, 2);
}
uint8x16_t test_vshrn_high_n_u16(uint8x8_t lowpart, uint16x8_t input) {
// CHECK: shrn2.16b
return vshrn_high_n_u16(lowpart, input, 2);
}
uint16x8_t test_vshrn_high_n_u32(uint16x4_t lowpart, uint32x4_t input) {
// CHECK: shrn2.8h
return vshrn_high_n_u32(lowpart, input, 2);
}
uint32x4_t test_vshrn_high_n_u64(uint32x2_t lowpart, uint64x2_t input) {
// CHECK: shrn2.4s
return vshrn_high_n_u64(lowpart, input, 2);
}
uint8x16_t test_vqshrun_high_n_s16(uint8x8_t lowpart, int16x8_t input) {
// CHECK: sqshrun2.16b
return vqshrun_high_n_s16(lowpart, input, 2);
}
uint16x8_t test_vqshrun_high_n_s32(uint16x4_t lowpart, int32x4_t input) {
// CHECK: sqshrun2.8h
return vqshrun_high_n_s32(lowpart, input, 2);
}
uint32x4_t test_vqshrun_high_n_s64(uint32x2_t lowpart, int64x2_t input) {
// CHECK: sqshrun2.4s
return vqshrun_high_n_s64(lowpart, input, 2);
}
uint8x16_t test_vqrshrun_high_n_s16(uint8x8_t lowpart, int16x8_t input) {
// CHECK: sqrshrun2.16b
return vqrshrun_high_n_s16(lowpart, input, 2);
}
uint16x8_t test_vqrshrun_high_n_s32(uint16x4_t lowpart, int32x4_t input) {
// CHECK: sqrshrun2.8h
return vqrshrun_high_n_s32(lowpart, input, 2);
}
uint32x4_t test_vqrshrun_high_n_s64(uint32x2_t lowpart, int64x2_t input) {
// CHECK: sqrshrun2.4s
return vqrshrun_high_n_s64(lowpart, input, 2);
}
int8x16_t test_vqshrn_high_n_s16(int8x8_t lowpart, int16x8_t input) {
// CHECK: sqshrn2.16b
return vqshrn_high_n_s16(lowpart, input, 2);
}
int16x8_t test_vqshrn_high_n_s32(int16x4_t lowpart, int32x4_t input) {
// CHECK: sqshrn2.8h
return vqshrn_high_n_s32(lowpart, input, 2);
}
int32x4_t test_vqshrn_high_n_s64(int32x2_t lowpart, int64x2_t input) {
// CHECK: sqshrn2.4s
return vqshrn_high_n_s64(lowpart, input, 2);
}
uint8x16_t test_vqshrn_high_n_u16(uint8x8_t lowpart, uint16x8_t input) {
// CHECK: uqshrn2.16b
return vqshrn_high_n_u16(lowpart, input, 2);
}
uint16x8_t test_vqshrn_high_n_u32(uint16x4_t lowpart, uint32x4_t input) {
// CHECK: uqshrn2.8h
return vqshrn_high_n_u32(lowpart, input, 2);
}
uint32x4_t test_vqshrn_high_n_u64(uint32x2_t lowpart, uint64x2_t input) {
// CHECK: uqshrn2.4s
return vqshrn_high_n_u64(lowpart, input, 2);
}
int8x16_t test_vqrshrn_high_n_s16(int8x8_t lowpart, int16x8_t input) {
// CHECK: sqrshrn2.16b
return vqrshrn_high_n_s16(lowpart, input, 2);
}
int16x8_t test_vqrshrn_high_n_s32(int16x4_t lowpart, int32x4_t input) {
// CHECK: sqrshrn2.8h
return vqrshrn_high_n_s32(lowpart, input, 2);
}
int32x4_t test_vqrshrn_high_n_s64(int32x2_t lowpart, int64x2_t input) {
// CHECK: sqrshrn2.4s
return vqrshrn_high_n_s64(lowpart, input, 2);
}
uint8x16_t test_vqrshrn_high_n_u16(uint8x8_t lowpart, uint16x8_t input) {
// CHECK: uqrshrn2.16b
return vqrshrn_high_n_u16(lowpart, input, 2);
}
uint16x8_t test_vqrshrn_high_n_u32(uint16x4_t lowpart, uint32x4_t input) {
// CHECK: uqrshrn2.8h
return vqrshrn_high_n_u32(lowpart, input, 2);
}
uint32x4_t test_vqrshrn_high_n_u64(uint32x2_t lowpart, uint64x2_t input) {
// CHECK: uqrshrn2.4s
return vqrshrn_high_n_u64(lowpart, input, 2);
}
int8x16_t test_vaddhn_high_s16(int8x8_t lowpart, int16x8_t lhs, int16x8_t rhs) {
// CHECK: addhn2.16b v0, v1, v2
return vaddhn_high_s16(lowpart, lhs, rhs);
}
int16x8_t test_vaddhn_high_s32(int16x4_t lowpart, int32x4_t lhs, int32x4_t rhs) {
// CHECK: addhn2.8h v0, v1, v2
return vaddhn_high_s32(lowpart, lhs, rhs);
}
int32x4_t test_vaddhn_high_s64(int32x2_t lowpart, int64x2_t lhs, int64x2_t rhs) {
// CHECK: addhn2.4s v0, v1, v2
return vaddhn_high_s64(lowpart, lhs, rhs);
}
uint8x16_t test_vaddhn_high_u16(uint8x8_t lowpart, uint16x8_t lhs, uint16x8_t rhs) {
// CHECK: addhn2.16b v0, v1, v2
return vaddhn_high_s16(lowpart, lhs, rhs);
}
uint16x8_t test_vaddhn_high_u32(uint16x4_t lowpart, uint32x4_t lhs, uint32x4_t rhs) {
// CHECK: addhn2.8h v0, v1, v2
return vaddhn_high_s32(lowpart, lhs, rhs);
}
uint32x4_t test_vaddhn_high_u64(uint32x2_t lowpart, uint64x2_t lhs, uint64x2_t rhs) {
// CHECK: addhn2.4s v0, v1, v2
return vaddhn_high_s64(lowpart, lhs, rhs);
}
int8x16_t test_vraddhn_high_s16(int8x8_t lowpart, int16x8_t lhs, int16x8_t rhs) {
// CHECK: raddhn2.16b v0, v1, v2
return vraddhn_high_s16(lowpart, lhs, rhs);
}
int16x8_t test_vraddhn_high_s32(int16x4_t lowpart, int32x4_t lhs, int32x4_t rhs) {
// CHECK: raddhn2.8h v0, v1, v2
return vraddhn_high_s32(lowpart, lhs, rhs);
}
int32x4_t test_vraddhn_high_s64(int32x2_t lowpart, int64x2_t lhs, int64x2_t rhs) {
// CHECK: raddhn2.4s v0, v1, v2
return vraddhn_high_s64(lowpart, lhs, rhs);
}
uint8x16_t test_vraddhn_high_u16(uint8x8_t lowpart, uint16x8_t lhs, uint16x8_t rhs) {
// CHECK: raddhn2.16b v0, v1, v2
return vraddhn_high_s16(lowpart, lhs, rhs);
}
uint16x8_t test_vraddhn_high_u32(uint16x4_t lowpart, uint32x4_t lhs, uint32x4_t rhs) {
// CHECK: raddhn2.8h v0, v1, v2
return vraddhn_high_s32(lowpart, lhs, rhs);
}
uint32x4_t test_vraddhn_high_u64(uint32x2_t lowpart, uint64x2_t lhs, uint64x2_t rhs) {
// CHECK: raddhn2.4s v0, v1, v2
return vraddhn_high_s64(lowpart, lhs, rhs);
}
int8x16_t test_vmovn_high_s16(int8x8_t lowpart, int16x8_t wide) {
// CHECK: xtn2.16b v0, v1
return vmovn_high_s16(lowpart, wide);
}
int16x8_t test_vmovn_high_s32(int16x4_t lowpart, int32x4_t wide) {
// CHECK: xtn2.8h v0, v1
return vmovn_high_s32(lowpart, wide);
}
int32x4_t test_vmovn_high_s64(int32x2_t lowpart, int64x2_t wide) {
// CHECK: xtn2.4s v0, v1
return vmovn_high_s64(lowpart, wide);
}
uint8x16_t test_vmovn_high_u16(uint8x8_t lowpart, uint16x8_t wide) {
// CHECK: xtn2.16b v0, v1
return vmovn_high_u16(lowpart, wide);
}
uint16x8_t test_vmovn_high_u32(uint16x4_t lowpart, uint32x4_t wide) {
// CHECK: xtn2.8h v0, v1
return vmovn_high_u32(lowpart, wide);
}
uint32x4_t test_vmovn_high_u64(uint32x2_t lowpart, uint64x2_t wide) {
// CHECK: xtn2.4s v0, v1
return vmovn_high_u64(lowpart, wide);
}
int8x16_t test_vqmovn_high_s16(int8x8_t lowpart, int16x8_t wide) {
// CHECK: sqxtn2.16b v0, v1
return vqmovn_high_s16(lowpart, wide);
}
int16x8_t test_vqmovn_high_s32(int16x4_t lowpart, int32x4_t wide) {
// CHECK: sqxtn2.8h v0, v1
return vqmovn_high_s32(lowpart, wide);
}
int32x4_t test_vqmovn_high_s64(int32x2_t lowpart, int64x2_t wide) {
// CHECK: sqxtn2.4s v0, v1
return vqmovn_high_s64(lowpart, wide);
}
uint8x16_t test_vqmovn_high_u16(uint8x8_t lowpart, int16x8_t wide) {
// CHECK: uqxtn2.16b v0, v1
return vqmovn_high_u16(lowpart, wide);
}
uint16x8_t test_vqmovn_high_u32(uint16x4_t lowpart, int32x4_t wide) {
// CHECK: uqxtn2.8h v0, v1
return vqmovn_high_u32(lowpart, wide);
}
uint32x4_t test_vqmovn_high_u64(uint32x2_t lowpart, int64x2_t wide) {
// CHECK: uqxtn2.4s v0, v1
return vqmovn_high_u64(lowpart, wide);
}
uint8x16_t test_vqmovun_high_s16(uint8x8_t lowpart, int16x8_t wide) {
// CHECK: sqxtun2.16b v0, v1
return vqmovun_high_s16(lowpart, wide);
}
uint16x8_t test_vqmovun_high_s32(uint16x4_t lowpart, int32x4_t wide) {
// CHECK: sqxtun2.8h v0, v1
return vqmovun_high_s32(lowpart, wide);
}
uint32x4_t test_vqmovun_high_s64(uint32x2_t lowpart, int64x2_t wide) {
// CHECK: sqxtun2.4s v0, v1
return vqmovun_high_s64(lowpart, wide);
}
float32x4_t test_vcvtx_high_f32_f64(float32x2_t lowpart, float64x2_t wide) {
// CHECK: fcvtxn2 v0.4s, v1.2d
return vcvtx_high_f32_f64(lowpart, wide);
}
float64x2_t test_vcvt_f64_f32(float32x2_t x) {
// CHECK: fcvtl v0.2d, v0.2s
return vcvt_f64_f32(x);
}
float64x2_t test_vcvt_high_f64_f32(float32x4_t x) {
// CHECK: fcvtl2 v0.2d, v0.4s
return vcvt_high_f64_f32(x);
}
float32x2_t test_vcvt_f32_f64(float64x2_t v) {
// CHECK: fcvtn v0.2s, v0.2d
return vcvt_f32_f64(v);
}
float32x4_t test_vcvt_high_f32_f64(float32x2_t x, float64x2_t v) {
// CHECK: fcvtn2 v0.4s, v1.2d
return vcvt_high_f32_f64(x, v);
}
float32x2_t test_vcvtx_f32_f64(float64x2_t v) {
// CHECK: fcvtxn v0.2s, v0.2d
return vcvtx_f32_f64(v);
}

View File

@ -0,0 +1,108 @@
// RUN: %clang -O1 -target arm64-apple-ios7 -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
// Test ARM64 SIMD fused multiply add intrinsics
#include <arm_neon.h>
int64x2_t test_vabsq_s64(int64x2_t a1) {
// CHECK: test_vabsq_s64
return vabsq_s64(a1);
// CHECK: llvm.arm64.neon.abs.v2i64
// CHECK-NEXT: ret
}
int64_t test_vceqd_s64(int64_t a1, int64_t a2) {
// CHECK: test_vceqd_s64
return vceqd_s64(a1, a2);
// CHECK: [[BIT:%[0-9a-zA-Z.]+]] = icmp eq i64 %a1, %a2
// CHECK: sext i1 [[BIT]] to i64
}
int64_t test_vceqd_f64(float64_t a1, float64_t a2) {
// CHECK: test_vceqd_f64
return vceqd_f64(a1, a2);
// CHECK: [[BIT:%[0-9a-zA-Z.]+]] = fcmp oeq double %a1, %a2
// CHECK: sext i1 [[BIT]] to i64
}
uint64_t test_vcgtd_u64(uint64_t a1, uint64_t a2) {
// CHECK: test_vcgtd_u64
return vcgtd_u64(a1, a2);
// CHECK: [[BIT:%[0-9a-zA-Z.]+]] = icmp ugt i64 %a1, %a2
// CHECK: sext i1 [[BIT]] to i64
}
uint64_t test_vcled_u64(uint64_t a1, uint64_t a2) {
// CHECK: test_vcled_u64
return vcled_u64(a1, a2);
// CHECK: [[BIT:%[0-9a-zA-Z.]+]] = icmp ule i64 %a1, %a2
// CHECK: sext i1 [[BIT]] to i64
}
int64_t test_vceqzd_s64(int64_t a1) {
// CHECK: test_vceqzd_s64
return vceqzd_s64(a1);
// CHECK: [[BIT:%[0-9a-zA-Z.]+]] = icmp eq i64 %a1, 0
// CHECK: sext i1 [[BIT]] to i64
}
uint64x2_t test_vceqq_u64(uint64x2_t a1, uint64x2_t a2) {
// CHECK: test_vceqq_u64
return vceqq_u64(a1, a2);
// CHECK: icmp eq <2 x i64> %a1, %a2
}
uint64x2_t test_vcgeq_s64(int64x2_t a1, int64x2_t a2) {
// CHECK: test_vcgeq_s64
return vcgeq_s64(a1, a2);
// CHECK: icmp sge <2 x i64> %a1, %a2
}
uint64x2_t test_vcgeq_u64(uint64x2_t a1, uint64x2_t a2) {
// CHECK: test_vcgeq_u64
return vcgeq_u64(a1, a2);
// CHECK: icmp uge <2 x i64> %a1, %a2
}
uint64x2_t test_vcgtq_s64(int64x2_t a1, int64x2_t a2) {
// CHECK: test_vcgtq_s64
return vcgtq_s64(a1, a2);
// CHECK: icmp sgt <2 x i64> %a1, %a2
}
uint64x2_t test_vcgtq_u64(uint64x2_t a1, uint64x2_t a2) {
// CHECK: test_vcgtq_u64
return vcgtq_u64(a1, a2);
// CHECK: icmp ugt <2 x i64> %a1, %a2
}
uint64x2_t test_vcleq_s64(int64x2_t a1, int64x2_t a2) {
// CHECK: test_vcleq_s64
return vcleq_s64(a1, a2);
// CHECK: icmp sle <2 x i64> %a1, %a2
}
uint64x2_t test_vcleq_u64(uint64x2_t a1, uint64x2_t a2) {
// CHECK: test_vcleq_u64
return vcleq_u64(a1, a2);
// CHECK: icmp ule <2 x i64> %a1, %a2
}
uint64x2_t test_vcltq_s64(int64x2_t a1, int64x2_t a2) {
// CHECK: test_vcltq_s64
return vcltq_s64(a1, a2);
// CHECK: icmp slt <2 x i64> %a1, %a2
}
uint64x2_t test_vcltq_u64(uint64x2_t a1, uint64x2_t a2) {
// CHECK: test_vcltq_u64
return vcltq_u64(a1, a2);
// CHECK: icmp ult <2 x i64> %a1, %a2
}
int64x2_t test_vqabsq_s64(int64x2_t a1) {
// CHECK: test_vqabsq_s64
return vqabsq_s64(a1);
// CHECK: llvm.arm64.neon.sqabs.v2i64(<2 x i64> %a1)
// CHECK-NEXT: ret
}

View File

@ -0,0 +1,141 @@
// RUN: %clang -O1 -target arm64-apple-ios7 -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
// Test ARM64 SIMD load and stores of an N-element structure intrinsics
#include <arm_neon.h>
int64x2x2_t test_vld2q_lane_s64(const void * a1, int64x2x2_t a2) {
// CHECK: test_vld2q_lane_s64
return vld2q_lane_s64(a1, a2, 1);
// CHECK: llvm.arm64.neon.ld2lane.v2i64.p0i8
}
uint64x2x2_t test_vld2q_lane_u64(const void * a1, uint64x2x2_t a2) {
// CHECK: test_vld2q_lane_u64
return vld2q_lane_u64(a1, a2, 1);
// CHECK: llvm.arm64.neon.ld2lane.v2i64.p0i8
}
int64x1x2_t test_vld2_lane_s64(const void * a1, int64x1x2_t a2) {
// CHECK: test_vld2_lane_s64
return vld2_lane_s64(a1, a2, 0);
// CHECK: llvm.arm64.neon.ld2lane.v1i64.p0i8
}
uint64x1x2_t test_vld2_lane_u64(const void * a1, uint64x1x2_t a2) {
// CHECK: test_vld2_lane_u64
return vld2_lane_u64(a1, a2, 0);
// CHECK: llvm.arm64.neon.ld2lane.v1i64.p0i8
}
poly8x16x2_t test_vld2q_lane_p8(const void * a1, poly8x16x2_t a2) {
// CHECK: test_vld2q_lane_p8
return vld2q_lane_p8(a1, a2, 0);
// CHECK: extractvalue {{.*}} 0{{ *$}}
// CHECK: extractvalue {{.*}} 1{{ *$}}
}
uint8x16x2_t test_vld2q_lane_u8(const void * a1, uint8x16x2_t a2) {
// CHECK: test_vld2q_lane_u8
return vld2q_lane_u8(a1, a2, 0);
// CHECK: llvm.arm64.neon.ld2lane.v16i8.p0i8
}
int64x2x3_t test_vld3q_lane_s64(const void * a1, int64x2x3_t a2) {
// CHECK: test_vld3q_lane_s64
return vld3q_lane_s64(a1, a2, 1);
// CHECK: llvm.arm64.neon.ld3lane.v2i64.p0i8
}
uint64x2x3_t test_vld3q_lane_u64(const void * a1, uint64x2x3_t a2) {
// CHECK: test_vld3q_lane_u64
return vld3q_lane_u64(a1, a2, 1);
// CHECK: llvm.arm64.neon.ld3lane.v2i64.p0i8
}
int64x1x3_t test_vld3_lane_s64(const void * a1, int64x1x3_t a2) {
// CHECK: test_vld3_lane_s64
return vld3_lane_s64(a1, a2, 0);
// CHECK: llvm.arm64.neon.ld3lane.v1i64.p0i8
}
uint64x1x3_t test_vld3_lane_u64(const void * a1, uint64x1x3_t a2) {
// CHECK: test_vld3_lane_u64
return vld3_lane_u64(a1, a2, 0);
// CHECK: llvm.arm64.neon.ld3lane.v1i64.p0i8
}
int8x8x3_t test_vld3_lane_s8(const void * a1, int8x8x3_t a2) {
// CHECK: test_vld3_lane_s8
return vld3_lane_s8(a1, a2, 0);
// CHECK: llvm.arm64.neon.ld3lane.v8i8.p0i8
}
poly8x16x3_t test_vld3q_lane_p8(const void * a1, poly8x16x3_t a2) {
// CHECK: test_vld3q_lane_p8
return vld3q_lane_p8(a1, a2, 0);
// CHECK: llvm.arm64.neon.ld3lane.v16i8.p0i8
}
uint8x16x3_t test_vld3q_lane_u8(const void * a1, uint8x16x3_t a2) {
// CHECK: test_vld3q_lane_u8
return vld3q_lane_u8(a1, a2, 0);
// CHECK: llvm.arm64.neon.ld3lane.v16i8.p0i8
}
int64x2x4_t test_vld4q_lane_s64(const void * a1, int64x2x4_t a2) {
// CHECK: test_vld4q_lane_s64
return vld4q_lane_s64(a1, a2, 0);
// CHECK: llvm.arm64.neon.ld4lane.v2i64.p0i8
}
uint64x2x4_t test_vld4q_lane_u64(const void * a1, uint64x2x4_t a2) {
// CHECK: test_vld4q_lane_u64
return vld4q_lane_u64(a1, a2, 0);
// CHECK: llvm.arm64.neon.ld4lane.v2i64.p0i8
}
int64x1x4_t test_vld4_lane_s64(const void * a1, int64x1x4_t a2) {
// CHECK: test_vld4_lane_s64
return vld4_lane_s64(a1, a2, 0);
// CHECK: llvm.arm64.neon.ld4lane.v1i64.p0i8
}
uint64x1x4_t test_vld4_lane_u64(const void * a1, uint64x1x4_t a2) {
// CHECK: test_vld4_lane_u64
return vld4_lane_u64(a1, a2, 0);
// CHECK: llvm.arm64.neon.ld4lane.v1i64.p0i8
}
int8x8x4_t test_vld4_lane_s8(const void * a1, int8x8x4_t a2) {
// CHECK: test_vld4_lane_s8
return vld4_lane_s8(a1, a2, 0);
// CHECK: llvm.arm64.neon.ld4lane.v8i8.p0i8
}
uint8x8x4_t test_vld4_lane_u8(const void * a1, uint8x8x4_t a2) {
// CHECK: test_vld4_lane_u8
return vld4_lane_u8(a1, a2, 0);
// CHECK: llvm.arm64.neon.ld4lane.v8i8.p0i8
}
poly8x16x4_t test_vld4q_lane_p8(const void * a1, poly8x16x4_t a2) {
// CHECK: test_vld4q_lane_p8
return vld4q_lane_p8(a1, a2, 0);
// CHECK: llvm.arm64.neon.ld4lane.v16i8.p0i8
}
int8x16x4_t test_vld4q_lane_s8(const void * a1, int8x16x4_t a2) {
// CHECK: test_vld4q_lane_s8
return vld4q_lane_s8(a1, a2, 0);
// CHECK: extractvalue {{.*}} 0{{ *$}}
// CHECK: extractvalue {{.*}} 1{{ *$}}
// CHECK: extractvalue {{.*}} 2{{ *$}}
// CHECK: extractvalue {{.*}} 3{{ *$}}
}
uint8x16x4_t test_vld4q_lane_u8(const void * a1, uint8x16x4_t a2) {
// CHECK: test_vld4q_lane_u8
return vld4q_lane_u8(a1, a2, 0);
// CHECK: llvm.arm64.neon.ld4lane.v16i8.p0i8
}

View File

@ -0,0 +1,207 @@
// RUN: %clang -O1 -target arm64-apple-ios7 -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
// RUN: %clang -O1 -target arm64-apple-ios7 -ffreestanding -S -o - %s | FileCheck -check-prefix=CHECK-CODEGEN %s
// REQUIRES: arm64-registered-target
// Test ARM64 SIMD max/min intrinsics
#include <arm_neon.h>
// Test a represntative sample of 8 and 16, signed and unsigned, 64 and 128 bit reduction
int8_t test_vmaxv_s8(int8x8_t a1) {
// CHECK: test_vmaxv_s8
return vmaxv_s8(a1);
// CHECK @llvm.arm64.neon.smaxv.i32.v8i8
}
uint16_t test_vminvq_u16(uint16x8_t a1) {
// CHECK: test_vminvq_u16
return vminvq_u16(a1);
// CHECK llvm.arm64.neon.uminv.i16.v8i16
}
// Test a represntative sample of 8 and 16, signed and unsigned, 64 and 128 bit pairwise
uint8x8_t test_vmin_u8(uint8x8_t a1, uint8x8_t a2) {
// CHECK: test_vmin_u8
return vmin_u8(a1, a2);
// CHECK llvm.arm64.neon.umin.v8i8
}
uint8x16_t test_vminq_u8(uint8x16_t a1, uint8x16_t a2) {
// CHECK: test_vminq_u8
return vminq_u8(a1, a2);
// CHECK llvm.arm64.neon.umin.v16i8
}
int16x8_t test_vmaxq_s16(int16x8_t a1, int16x8_t a2) {
// CHECK: test_vmaxq_s16
return vmaxq_s16(a1, a2);
// CHECK llvm.arm64.neon.smax.v8i16
}
// Test the more complicated cases of [suf]32 and f64
float64x2_t test_vmaxq_f64(float64x2_t a1, float64x2_t a2) {
// CHECK: test_vmaxq_f64
return vmaxq_f64(a1, a2);
// CHECK llvm.arm64.neon.fmax.v2f64
}
float32x4_t test_vmaxq_f32(float32x4_t a1, float32x4_t a2) {
// CHECK: test_vmaxq_f32
return vmaxq_f32(a1, a2);
// CHECK llvm.arm64.neon.fmax.v4f32
}
float64x2_t test_vminq_f64(float64x2_t a1, float64x2_t a2) {
// CHECK: test_vminq_f64
return vminq_f64(a1, a2);
// CHECK llvm.arm64.neon.fmin.v2f64
}
float32x2_t test_vmax_f32(float32x2_t a1, float32x2_t a2) {
// CHECK: test_vmax_f32
return vmax_f32(a1, a2);
// CHECK llvm.arm64.neon.fmax.v2f32
}
int32x2_t test_vmax_s32(int32x2_t a1, int32x2_t a2) {
// CHECK: test_vmax_s32
return vmax_s32(a1, a2);
// CHECK llvm.arm64.neon.smax.v2i32
}
uint32x2_t test_vmin_u32(uint32x2_t a1, uint32x2_t a2) {
// CHECK: test_vmin_u32
return vmin_u32(a1, a2);
// CHECK llvm.arm64.neon.umin.v2i32
}
float32_t test_vmaxnmv_f32(float32x2_t a1) {
// CHECK: test_vmaxnmv_f32
return vmaxnmv_f32(a1);
// CHECK: llvm.arm64.neon.fmaxnmv.f32.v2f32
// CHECK-NEXT: ret
}
// this doesn't translate into a valid instruction, regardless of what the
// ARM doc says.
#if 0
float64_t test_vmaxnmvq_f64(float64x2_t a1) {
// CHECK@ test_vmaxnmvq_f64
return vmaxnmvq_f64(a1);
// CHECK@ llvm.arm64.neon.saddlv.i64.v2i32
// CHECK-NEXT@ ret
}
#endif
float32_t test_vmaxnmvq_f32(float32x4_t a1) {
// CHECK: test_vmaxnmvq_f32
return vmaxnmvq_f32(a1);
// CHECK: llvm.arm64.neon.fmaxnmv.f32.v4f32
// CHECK-NEXT: ret
}
float32_t test_vmaxv_f32(float32x2_t a1) {
// CHECK: test_vmaxv_f32
return vmaxv_f32(a1);
// CHECK: llvm.arm64.neon.fmaxv.f32.v2f32
// FIXME check that the 2nd and 3rd arguments are the same V register below
// CHECK-CODEGEN: fmaxp.2s
// CHECK-NEXT: ret
}
int32_t test_vmaxv_s32(int32x2_t a1) {
// CHECK: test_vmaxv_s32
return vmaxv_s32(a1);
// CHECK: llvm.arm64.neon.smaxv.i32.v2i32
// FIXME check that the 2nd and 3rd arguments are the same V register below
// CHECK-CODEGEN: smaxp.2s
// CHECK-NEXT: ret
}
uint32_t test_vmaxv_u32(uint32x2_t a1) {
// CHECK: test_vmaxv_u32
return vmaxv_u32(a1);
// CHECK: llvm.arm64.neon.umaxv.i32.v2i32
// FIXME check that the 2nd and 3rd arguments are the same V register below
// CHECK-CODEGEN: umaxp.2s
// CHECK-NEXT: ret
}
// FIXME punt on this for now; don't forget to fix CHECKs
#if 0
float64_t test_vmaxvq_f64(float64x2_t a1) {
// CHECK@ test_vmaxvq_f64
return vmaxvq_f64(a1);
// CHECK@ llvm.arm64.neon.fmaxv.i64.v2f64
// CHECK-NEXT@ ret
}
#endif
float32_t test_vmaxvq_f32(float32x4_t a1) {
// CHECK: test_vmaxvq_f32
return vmaxvq_f32(a1);
// CHECK: llvm.arm64.neon.fmaxv.f32.v4f32
// CHECK-NEXT: ret
}
float32_t test_vminnmv_f32(float32x2_t a1) {
// CHECK: test_vminnmv_f32
return vminnmv_f32(a1);
// CHECK: llvm.arm64.neon.fminnmv.f32.v2f32
// CHECK-NEXT: ret
}
float32_t test_vminvq_f32(float32x4_t a1) {
// CHECK: test_vminvq_f32
return vminvq_f32(a1);
// CHECK: llvm.arm64.neon.fminv.f32.v4f32
// CHECK-NEXT: ret
}
// this doesn't translate into a valid instruction, regardless of what the ARM
// doc says.
#if 0
float64_t test_vminnmvq_f64(float64x2_t a1) {
// CHECK@ test_vminnmvq_f64
return vminnmvq_f64(a1);
// CHECK@ llvm.arm64.neon.saddlv.i64.v2i32
// CHECK-NEXT@ ret
}
#endif
float32_t test_vminnmvq_f32(float32x4_t a1) {
// CHECK: test_vminnmvq_f32
return vminnmvq_f32(a1);
// CHECK: llvm.arm64.neon.fminnmv.f32.v4f32
// CHECK-NEXT: ret
}
float32_t test_vminv_f32(float32x2_t a1) {
// CHECK: test_vminv_f32
return vminv_f32(a1);
// CHECK: llvm.arm64.neon.fminv.f32.v2f32
// CHECK-NEXT: ret
}
int32_t test_vminv_s32(int32x2_t a1) {
// CHECK: test_vminv_s32
return vminv_s32(a1);
// CHECK: llvm.arm64.neon.sminv.i32.v2i32
// CHECK-CODEGEN: sminp.2s
// CHECK-NEXT: ret
}
uint32_t test_vminv_u32(uint32x2_t a1) {
// CHECK: test_vminv_u32
return vminv_u32(a1);
// CHECK: llvm.arm64.neon.fminv.f32.v2f32
}
// FIXME punt on this for now; don't forget to fix CHECKs
#if 0
float64_t test_vminvq_f64(float64x2_t a1) {
// CHECK@ test_vminvq_f64
return vminvq_f64(a1);
// CHECK@ llvm.arm64.neon.saddlv.i64.v2i32
// CHECK-NEXT@ ret
}
#endif

View File

@ -0,0 +1,102 @@
// RUN: %clang -O1 -target arm64-apple-ios7 -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
// Test ARM64 SIMD add intrinsics
#include <arm_neon.h>
int64_t test_vaddlv_s32(int32x2_t a1) {
// CHECK: test_vaddlv_s32
return vaddlv_s32(a1);
// CHECK: llvm.arm64.neon.saddlv.i64.v2i32
// CHECK-NEXT: ret
}
uint64_t test_vaddlv_u32(uint32x2_t a1) {
// CHECK: test_vaddlv_u32
return vaddlv_u32(a1);
// CHECK: llvm.arm64.neon.uaddlv.i64.v2i32
// CHECK-NEXT: ret
}
int8_t test_vaddv_s8(int8x8_t a1) {
// CHECK: test_vaddv_s8
return vaddv_s8(a1);
// CHECK: llvm.arm64.neon.saddv.i32.v8i8
// don't check for return here (there's a trunc?)
}
int16_t test_vaddv_s16(int16x4_t a1) {
// CHECK: test_vaddv_s16
return vaddv_s16(a1);
// CHECK: llvm.arm64.neon.saddv.i32.v4i16
// don't check for return here (there's a trunc?)
}
int32_t test_vaddv_s32(int32x2_t a1) {
// CHECK: test_vaddv_s32
return vaddv_s32(a1);
// CHECK: llvm.arm64.neon.saddv.i32.v2i32
// CHECK-NEXT: ret
}
uint8_t test_vaddv_u8(int8x8_t a1) {
// CHECK: test_vaddv_u8
return vaddv_u8(a1);
// CHECK: llvm.arm64.neon.uaddv.i32.v8i8
// don't check for return here (there's a trunc?)
}
uint16_t test_vaddv_u16(int16x4_t a1) {
// CHECK: test_vaddv_u16
return vaddv_u16(a1);
// CHECK: llvm.arm64.neon.uaddv.i32.v4i16
// don't check for return here (there's a trunc?)
}
uint32_t test_vaddv_u32(int32x2_t a1) {
// CHECK: test_vaddv_u32
return vaddv_u32(a1);
// CHECK: llvm.arm64.neon.uaddv.i32.v2i32
// CHECK-NEXT: ret
}
int8_t test_vaddvq_s8(int8x16_t a1) {
// CHECK: test_vaddvq_s8
return vaddvq_s8(a1);
// CHECK: llvm.arm64.neon.saddv.i32.v16i8
// don't check for return here (there's a trunc?)
}
int16_t test_vaddvq_s16(int16x8_t a1) {
// CHECK: test_vaddvq_s16
return vaddvq_s16(a1);
// CHECK: llvm.arm64.neon.saddv.i32.v8i16
// don't check for return here (there's a trunc?)
}
int32_t test_vaddvq_s32(int32x4_t a1) {
// CHECK: test_vaddvq_s32
return vaddvq_s32(a1);
// CHECK: llvm.arm64.neon.saddv.i32.v4i32
// CHECK-NEXT: ret
}
uint8_t test_vaddvq_u8(int8x16_t a1) {
// CHECK: test_vaddvq_u8
return vaddvq_u8(a1);
// CHECK: llvm.arm64.neon.uaddv.i32.v16i8
// don't check for return here (there's a trunc?)
}
uint16_t test_vaddvq_u16(int16x8_t a1) {
// CHECK: test_vaddvq_u16
return vaddvq_u16(a1);
// CHECK: llvm.arm64.neon.uaddv.i32.v8i16
// don't check for return here (there's a trunc?)
}
uint32_t test_vaddvq_u32(int32x4_t a1) {
// CHECK: test_vaddvq_u32
return vaddvq_u32(a1);
// CHECK: llvm.arm64.neon.uaddv.i32.v4i32
// CHECK-NEXT: ret
}

View File

@ -0,0 +1,59 @@
// RUN: %clang -O1 -target arm64-apple-ios7 -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
// Test ARM64 vector compare absolute intrinsics
#include <arm_neon.h>
uint32x2_t test_vcale_f32(float32x2_t a1, float32x2_t a2) {
// CHECK: test_vcale_f32
return vcale_f32(a1, a2);
// CHECK: llvm.arm64.neon.facge.v2i32.v2f32
// no check for ret here, as there is a bitcast
}
uint32x4_t test_vcaleq_f32(float32x4_t a1, float32x4_t a2) {
// CHECK: test_vcaleq_f32
return vcaleq_f32(a1, a2);
// CHECK: llvm.arm64.neon.facge.v4i32.v4f32{{.*a2,.*a1}}
// no check for ret here, as there is a bitcast
}
uint32x2_t test_vcalt_f32(float32x2_t a1, float32x2_t a2) {
// CHECK: test_vcalt_f32
return vcalt_f32(a1, a2);
// CHECK: llvm.arm64.neon.facgt.v2i32.v2f32{{.*a2,.*a1}}
// no check for ret here, as there is a bitcast
}
uint32x4_t test_vcaltq_f32(float32x4_t a1, float32x4_t a2) {
// CHECK: test_vcaltq_f32
return vcaltq_f32(a1, a2);
// CHECK: llvm.arm64.neon.facgt.v4i32.v4f32{{.*a2,.*a1}}
}
uint64x2_t test_vcagtq_f64(float64x2_t a1, float64x2_t a2) {
// CHECK: test_vcagtq_f64
return vcagtq_f64(a1, a2);
// CHECK: llvm.arm64.neon.facgt.v2i64.v2f64{{.*a1,.*a2}}
// no check for ret here, as there is a bitcast
}
uint64x2_t test_vcaltq_f64(float64x2_t a1, float64x2_t a2) {
// CHECK: test_vcaltq_f64
return vcaltq_f64(a1, a2);
// CHECK: llvm.arm64.neon.facgt.v2i64.v2f64{{.*a2,.*a1}}
// no check for ret here, as there is a bitcast
}
uint64x2_t test_vcageq_f64(float64x2_t a1, float64x2_t a2) {
// CHECK: test_vcageq_f64
return vcageq_f64(a1, a2);
// CHECK: llvm.arm64.neon.facge.v2i64.v2f64{{.*a1,.*a2}}
// no check for ret here, as there is a bitcast
}
uint64x2_t test_vcaleq_f64(float64x2_t a1, float64x2_t a2) {
// CHECK: test_vcaleq_f64
return vcaleq_f64(a1, a2);
// CHECK: llvm.arm64.neon.facge.v2i64.v2f64{{.*a2,.*a1}}
// no check for ret here, as there is a bitcast
}

View File

@ -0,0 +1,69 @@
// RUN: %clang -O1 -target arm64-apple-ios7 -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
// Test ARM64 SIMD copy vector element to vector element: vcopyq_lane*
#include <arm_neon.h>
int8x16_t test_vcopyq_laneq_s8(int8x16_t a1, int8x16_t a2) {
// CHECK-LABEL: test_vcopyq_laneq_s8
return vcopyq_laneq_s8(a1, (int64_t) 3, a2, (int64_t) 13);
// CHECK: shufflevector <16 x i8> %a1, <16 x i8> %a2, <16 x i32> <i32 0, i32 1, i32 2, i32 29, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
}
uint8x16_t test_vcopyq_laneq_u8(uint8x16_t a1, uint8x16_t a2) {
// CHECK-LABEL: test_vcopyq_laneq_u8
return vcopyq_laneq_u8(a1, (int64_t) 3, a2, (int64_t) 13);
// CHECK: shufflevector <16 x i8> %a1, <16 x i8> %a2, <16 x i32> <i32 0, i32 1, i32 2, i32 29, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
}
int16x8_t test_vcopyq_laneq_s16(int16x8_t a1, int16x8_t a2) {
// CHECK-LABEL: test_vcopyq_laneq_s16
return vcopyq_laneq_s16(a1, (int64_t) 3, a2, (int64_t) 7);
// CHECK: shufflevector <8 x i16> %a1, <8 x i16> %a2, <8 x i32> <i32 0, i32 1, i32 2, i32 15, i32 4, i32 5, i32 6, i32 7>
}
uint16x8_t test_vcopyq_laneq_u16(uint16x8_t a1, uint16x8_t a2) {
// CHECK-LABEL: test_vcopyq_laneq_u16
return vcopyq_laneq_u16(a1, (int64_t) 3, a2, (int64_t) 7);
// CHECK: shufflevector <8 x i16> %a1, <8 x i16> %a2, <8 x i32> <i32 0, i32 1, i32 2, i32 15, i32 4, i32 5, i32 6, i32 7>
}
int32x4_t test_vcopyq_laneq_s32(int32x4_t a1, int32x4_t a2) {
// CHECK-LABEL: test_vcopyq_laneq_s32
return vcopyq_laneq_s32(a1, (int64_t) 3, a2, (int64_t) 3);
// CHECK: shufflevector <4 x i32> %a1, <4 x i32> %a2, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
}
uint32x4_t test_vcopyq_laneq_u32(uint32x4_t a1, uint32x4_t a2) {
// CHECK-LABEL: test_vcopyq_laneq_u32
return vcopyq_laneq_u32(a1, (int64_t) 3, a2, (int64_t) 3);
// CHECK: shufflevector <4 x i32> %a1, <4 x i32> %a2, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
}
int64x2_t test_vcopyq_laneq_s64(int64x2_t a1, int64x2_t a2) {
// CHECK-LABEL: test_vcopyq_laneq_s64
return vcopyq_laneq_s64(a1, (int64_t) 0, a2, (int64_t) 1);
// CHECK: shufflevector <2 x i64> %a1, <2 x i64> %a2, <2 x i32> <i32 3, i32 1>
}
uint64x2_t test_vcopyq_laneq_u64(uint64x2_t a1, uint64x2_t a2) {
// CHECK-LABEL: test_vcopyq_laneq_u64
return vcopyq_laneq_u64(a1, (int64_t) 0, a2, (int64_t) 1);
// CHECK: shufflevector <2 x i64> %a1, <2 x i64> %a2, <2 x i32> <i32 3, i32 1>
}
float32x4_t test_vcopyq_laneq_f32(float32x4_t a1, float32x4_t a2) {
// CHECK-LABEL: test_vcopyq_laneq_f32
return vcopyq_laneq_f32(a1, 0, a2, 3);
// CHECK: shufflevector <4 x float> %a1, <4 x float> %a2, <4 x i32> <i32 7, i32 1, i32 2, i32 3>
}
float64x2_t test_vcopyq_laneq_f64(float64x2_t a1, float64x2_t a2) {
// CHECK-LABEL: test_vcopyq_laneq_f64
return vcopyq_laneq_f64(a1, 0, a2, 1);
// CHECK: shufflevector <2 x double> %a1, <2 x double> %a2, <2 x i32> <i32 3, i32 1>
}

View File

@ -0,0 +1,23 @@
// RUN: %clang -O1 -target arm64-apple-ios7 -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
// Test ARM64 SIMD vcreate intrinsics
/*#include <arm_neon.h>*/
#include <arm_neon.h>
float32x2_t test_vcreate_f32(uint64_t a1) {
// CHECK: test_vcreate_f32
return vcreate_f32(a1);
// CHECK: bitcast {{.*}} to <2 x float>
// CHECK-NEXT: ret
}
// FIXME enable when scalar_to_vector in backend is fixed. Also, change
// CHECK@ to CHECK<colon> and CHECK-NEXT@ to CHECK-NEXT<colon>
/*
float64x1_t test_vcreate_f64(uint64_t a1) {
// CHECK@ test_vcreate_f64
return vcreate_f64(a1);
// CHECK@ llvm.arm64.neon.saddlv.i64.v2i32
// CHECK-NEXT@ ret
}
*/

View File

@ -0,0 +1,48 @@
// RUN: %clang -O1 -target arm64-apple-ios7 -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
#include <arm_neon.h>
float64x2_t test_vcvt_f64_f32(float32x2_t x) {
// CHECK-LABEL: test_vcvt_f64_f32
return vcvt_f64_f32(x);
// CHECK: fpext <2 x float> {{%.*}} to <2 x double>
// CHECK-NEXT: ret
}
float64x2_t test_vcvt_high_f64_f32(float32x4_t x) {
// CHECK-LABEL: test_vcvt_high_f64_f32
return vcvt_high_f64_f32(x);
// CHECK: [[HIGH:%.*]] = shufflevector <4 x float> {{%.*}}, <4 x float> undef, <2 x i32> <i32 2, i32 3>
// CHECK-NEXT: fpext <2 x float> [[HIGH]] to <2 x double>
// CHECK-NEXT: ret
}
float32x2_t test_vcvt_f32_f64(float64x2_t v) {
// CHECK: test_vcvt_f32_f64
return vcvt_f32_f64(v);
// CHECK: fptrunc <2 x double> {{%.*}} to <2 x float>
// CHECK-NEXT: ret
}
float32x4_t test_vcvt_high_f32_f64(float32x2_t x, float64x2_t v) {
// CHECK: test_vcvt_high_f32_f64
return vcvt_high_f32_f64(x, v);
// CHECK: [[TRUNC:%.*]] = fptrunc <2 x double> {{.*}} to <2 x float>
// CHECK-NEXT: shufflevector <2 x float> {{.*}}, <2 x float> [[TRUNC]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK-NEXT: ret
}
float32x2_t test_vcvtx_f32_f64(float64x2_t v) {
// CHECK: test_vcvtx_f32_f64
return vcvtx_f32_f64(v);
// CHECK: llvm.arm64.neon.fcvtxn.v2f32.v2f64
// CHECK-NEXT: ret
}
float32x4_t test_vcvtx_high_f32_f64(float32x2_t x, float64x2_t v) {
// CHECK: test_vcvtx_high_f32_f64
return vcvtx_high_f32_f64(x, v);
// CHECK: llvm.arm64.neon.fcvtxn.v2f32.v2f64
// CHECK: shufflevector
// CHECK-NEXT: ret
}

View File

@ -0,0 +1,42 @@
// RUN: %clang -target arm64-apple-ios7 -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
// Test ARM64 SIMD duplicate lane and n intrinsics
#include <arm_neon.h>
void test_vdup_lane_s64(int64x1_t a1) {
// CHECK-LABEL: test_vdup_lane_s64
vdup_lane_s64(a1, 0);
// CHECK: shufflevector
}
void test_vdup_lane_u64(uint64x1_t a1) {
// CHECK-LABEL: test_vdup_lane_u64
vdup_lane_u64(a1, 0);
// CHECK: shufflevector
}
// uncomment out the following code once scalar_to_vector in the backend
// works (for 64 bit?). Change the "CHECK@" to "CHECK<colon>"
/*
float64x1_t test_vdup_n_f64(float64_t a1) {
// CHECK-LABEL@ test_vdup_n_f64
return vdup_n_f64(a1);
// match that an element is inserted into part 0
// CHECK@ insertelement {{.*, i32 0 *$}}
}
*/
float16x8_t test_vdupq_n_f16(float16_t *a1) {
// CHECK-LABEL: test_vdupq_n_f16
return vdupq_n_f16(*a1);
// match that an element is inserted into parts 0-7. The backend better
// turn that into a single dup intruction
// CHECK: insertelement {{.*, i32 0 *$}}
// CHECK: insertelement {{.*, i32 1 *$}}
// CHECK: insertelement {{.*, i32 2 *$}}
// CHECK: insertelement {{.*, i32 3 *$}}
// CHECK: insertelement {{.*, i32 4 *$}}
// CHECK: insertelement {{.*, i32 5 *$}}
// CHECK: insertelement {{.*, i32 6 *$}}
// CHECK: insertelement {{.*, i32 7 *$}}
}

View File

@ -0,0 +1,88 @@
// RUN: %clang -O3 -target arm64-apple-ios7 -ffreestanding -S -o - %s | FileCheck %s
// RUN: %clang -O3 -target arm64-apple-ios7 -ffreestanding -S -o - -emit-llvm %s | \
// RUN: FileCheck -check-prefix=CHECK-IR %s
// REQUIRES: arm64-registered-target
/// Test vdupq_n_f64 and vmovq_nf64 ARM64 intrinsics
// <rdar://problem/11778405> ARM64: vdupq_n_f64 and vdupq_lane_f64 intrinsics
// missing
#include <arm_neon.h>
// vdupq_n_f64 -> dup.2d v0, v0[0]
//
float64x2_t test_vdupq_n_f64(float64_t w)
{
return vdupq_n_f64(w);
// CHECK-LABEL: test_vdupq_n_f64:
// CHECK: dup.2d v0, v0[0]
// CHECK-NEXT: ret
}
// might as well test this while we're here
// vdupq_n_f32 -> dup.4s v0, v0[0]
float32x4_t test_vdupq_n_f32(float32_t w)
{
return vdupq_n_f32(w);
// CHECK-LABEL: test_vdupq_n_f32:
// CHECK: dup.4s v0, v0[0]
// CHECK-NEXT: ret
}
// vdupq_lane_f64 -> dup.2d v0, v0[0]
// this was in <rdar://problem/11778405>, but had already been implemented,
// test anyway
float64x2_t test_vdupq_lane_f64(float64x1_t V)
{
return vdupq_lane_f64(V, 0);
// CHECK-LABEL: test_vdupq_lane_f64:
// CHECK: dup.2d v0, v0[0]
// CHECK-NEXT: ret
}
// vmovq_n_f64 -> dup Vd.2d,X0
// this wasn't in <rdar://problem/11778405>, but it was between the vdups
float64x2_t test_vmovq_n_f64(float64_t w)
{
return vmovq_n_f64(w);
// CHECK-LABEL: test_vmovq_n_f64:
// CHECK: dup.2d v0, v0[0]
// CHECK-NEXT: ret
}
float16x4_t test_vmov_n_f16(float16_t *a1)
{
// CHECK-IR-LABEL: test_vmov_n_f16
return vmov_n_f16(*a1);
// CHECK-IR: insertelement {{.*}} i32 0{{ *$}}
// CHECK-IR: insertelement {{.*}} i32 1{{ *$}}
// CHECK-IR: insertelement {{.*}} i32 2{{ *$}}
// CHECK-IR: insertelement {{.*}} i32 3{{ *$}}
}
// Disable until scalar problem in backend is fixed. Change CHECK-IR@ to
// CHECK-IR<colon>
/*
float64x1_t test_vmov_n_f64(float64_t a1)
{
// CHECK-IR@ test_vmov_n_f64
return vmov_n_f64(a1);
// CHECK-IR@ insertelement {{.*}} i32 0{{ *$}}
}
*/
float16x8_t test_vmovq_n_f16(float16_t *a1)
{
// CHECK-IR-LABEL: test_vmovq_n_f16
return vmovq_n_f16(*a1);
// CHECK-IR: insertelement {{.*}} i32 0{{ *$}}
// CHECK-IR: insertelement {{.*}} i32 1{{ *$}}
// CHECK-IR: insertelement {{.*}} i32 2{{ *$}}
// CHECK-IR: insertelement {{.*}} i32 3{{ *$}}
// CHECK-IR: insertelement {{.*}} i32 4{{ *$}}
// CHECK-IR: insertelement {{.*}} i32 5{{ *$}}
// CHECK-IR: insertelement {{.*}} i32 6{{ *$}}
// CHECK-IR: insertelement {{.*}} i32 7{{ *$}}
}

View File

@ -0,0 +1,111 @@
// RUN: %clang -O3 -target arm64-apple-ios7 -S -ffreestanding %s -o - | FileCheck %s
// REQUIRES: arm64-registered-target
// test code generation for <rdar://problem/11487757>
#include <arm_neon.h>
unsigned bar();
// Branch if any lane of V0 is zero; 64 bit => !min
unsigned anyZero64(uint16x4_t a) {
// CHECK: anyZero64:
// CHECK: uminv.8b b[[REGNO1:[0-9]+]], v0
// CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
// CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[.A-Z_0-9]+]]
// CHECK: [[LABEL]]:
// CHECK-NEXT: b {{_bar|bar}}
if (!vminv_u8(a))
return bar();
return 0;
}
// Branch if any lane of V0 is zero; 128 bit => !min
unsigned anyZero128(uint16x8_t a) {
// CHECK: anyZero128:
// CHECK: uminv.16b b[[REGNO1:[0-9]+]], v0
// CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
// CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[.A-Z_0-9]+]]
// CHECK: [[LABEL]]:
// CHECK-NEXT: b {{_bar|bar}}
if (!vminvq_u8(a))
return bar();
return 0;
}
// Branch if any lane of V0 is non-zero; 64 bit => max
unsigned anyNonZero64(uint16x4_t a) {
// CHECK: anyNonZero64:
// CHECK: umaxv.8b b[[REGNO1:[0-9]+]], v0
// CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
// CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[.A-Z_0-9]+]]
// CHECK: [[LABEL]]:
// CHECK-NEXT: movz w0, #0
if (vmaxv_u8(a))
return bar();
return 0;
}
// Branch if any lane of V0 is non-zero; 128 bit => max
unsigned anyNonZero128(uint16x8_t a) {
// CHECK: anyNonZero128:
// CHECK: umaxv.16b b[[REGNO1:[0-9]+]], v0
// CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
// CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[.A-Z_0-9]+]]
// CHECK: [[LABEL]]:
// CHECK-NEXT: movz w0, #0
if (vmaxvq_u8(a))
return bar();
return 0;
}
// Branch if all lanes of V0 are zero; 64 bit => !max
unsigned allZero64(uint16x4_t a) {
// CHECK: allZero64:
// CHECK: umaxv.8b b[[REGNO1:[0-9]+]], v0
// CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
// CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[.A-Z_0-9]+]]
// CHECK: [[LABEL]]:
// CHECK-NEXT: b {{_bar|bar}}
if (!vmaxv_u8(a))
return bar();
return 0;
}
// Branch if all lanes of V0 are zero; 128 bit => !max
unsigned allZero128(uint16x8_t a) {
// CHECK: allZero128:
// CHECK: umaxv.16b b[[REGNO1:[0-9]+]], v0
// CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
// CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[.A-Z_0-9]+]]
// CHECK: [[LABEL]]:
// CHECK-NEXT: b {{_bar|bar}}
if (!vmaxvq_u8(a))
return bar();
return 0;
}
// Branch if all lanes of V0 are non-zero; 64 bit => min
unsigned allNonZero64(uint16x4_t a) {
// CHECK: allNonZero64:
// CHECK: uminv.8b b[[REGNO1:[0-9]+]], v0
// CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
// CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[.A-Z_0-9]+]]
// CHECK: [[LABEL]]:
// CHECK-NEXT: movz w0, #0
if (vminv_u8(a))
return bar();
return 0;
}
// Branch if all lanes of V0 are non-zero; 128 bit => min
unsigned allNonZero128(uint16x8_t a) {
// CHECK: allNonZero128:
// CHECK: uminv.16b b[[REGNO1:[0-9]+]], v0
// CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
// CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[.A-Z_0-9]+]]
// CHECK: [[LABEL]]:
// CHECK-NEXT: movz w0, #0
if (vminvq_u8(a))
return bar();
return 0;
}

View File

@ -0,0 +1,239 @@
// RUN: %clang -target arm64-apple-ios7 -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
// Test ARM64 extract intrinsics
// can use as back end test by adding a run line with
// -check-prefix=CHECK-CODEGEN on the FileCheck
#include <arm_neon.h>
void test_vext_s8()
{
// CHECK: test_vext_s8
int8x8_t xS8x8;
xS8x8 = vext_s8(xS8x8, xS8x8, 1);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vext_s8:
// CHECK-CODEGEN: {{ext.8.*#1}}
}
void test_vext_u8()
{
// CHECK: test_vext_u8
uint8x8_t xU8x8;
xU8x8 = vext_u8(xU8x8, xU8x8, 2);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vext_u8:
// CHECK-CODEGEN: {{ext.8.*#2}}
}
void test_vext_p8()
{
// CHECK: test_vext_p8
poly8x8_t xP8x8;
xP8x8 = vext_p8(xP8x8, xP8x8, 3);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vext_p8:
// CHECK-CODEGEN: {{ext.8.*#3}}
}
void test_vext_s16()
{
// CHECK: test_vext_s16
int16x4_t xS16x4;
xS16x4 = vext_s16(xS16x4, xS16x4, 1);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vext_s16:
// CHECK-CODEGEN: {{ext.8.*#2}}
}
void test_vext_u16()
{
// CHECK: test_vext_u16
uint16x4_t xU16x4;
xU16x4 = vext_u16(xU16x4, xU16x4, 2);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vext_u16:
// CHECK-CODEGEN: {{ext.8.*#4}}
}
void test_vext_p16()
{
// CHECK: test_vext_p16
poly16x4_t xP16x4;
xP16x4 = vext_p16(xP16x4, xP16x4, 3);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vext_p16:
// CHECK-CODEGEN: {{ext.8.*#6}}
}
void test_vext_s32()
{
// CHECK: test_vext_s32
int32x2_t xS32x2;
xS32x2 = vext_s32(xS32x2, xS32x2, 1);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vext_s32:
// CHECK-CODEGEN: {{ext.8.*#4}}
}
void test_vext_u32()
{
// CHECK: test_vext_u32
uint32x2_t xU32x2;
xU32x2 = vext_u32(xU32x2, xU32x2, 1);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vext_u32:
// CHECK-CODEGEN: {{ext.8.*#4}}
}
void test_vext_f32()
{
// CHECK: test_vext_f32
float32x2_t xF32x2;
xF32x2 = vext_f32(xF32x2, xF32x2, 1);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vext_f32:
// CHECK-CODEGEN: {{ext.8.*#4}}
}
void test_vext_s64()
{
// CHECK: test_vext_s64
int64x1_t xS64x1;
// FIXME don't use 1 as index or check for now, clang has a bug?
xS64x1 = vext_s64(xS64x1, xS64x1, /*1*/0);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vext_s64:
// CHECK_FIXME: {{ext.8.*#0}}
}
void test_vext_u64()
{
// CHECK: test_vext_u64
uint64x1_t xU64x1;
// FIXME don't use 1 as index or check for now, clang has a bug?
xU64x1 = vext_u64(xU64x1, xU64x1, /*1*/0);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vext_u64:
// CHECK_FIXME: {{ext.8.*#0}}
}
void test_vextq_s8()
{
// CHECK: test_vextq_s8
int8x16_t xS8x16;
xS8x16 = vextq_s8(xS8x16, xS8x16, 4);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vextq_s8:
// CHECK-CODEGEN: {{ext.16.*#4}}
}
void test_vextq_u8()
{
// CHECK: test_vextq_u8
uint8x16_t xU8x16;
xU8x16 = vextq_u8(xU8x16, xU8x16, 5);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vextq_u8:
// CHECK-CODEGEN: {{ext.16.*#5}}
}
void test_vextq_p8()
{
// CHECK: test_vextq_p8
poly8x16_t xP8x16;
xP8x16 = vextq_p8(xP8x16, xP8x16, 6);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vextq_p8:
// CHECK-CODEGEN: {{ext.16.*#6}}
}
void test_vextq_s16()
{
// CHECK: test_vextq_s16
int16x8_t xS16x8;
xS16x8 = vextq_s16(xS16x8, xS16x8, 7);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vextq_s16:
// CHECK-CODEGEN: {{ext.16.*#14}}
}
void test_vextq_u16()
{
// CHECK: test_vextq_u16
uint16x8_t xU16x8;
xU16x8 = vextq_u16(xU16x8, xU16x8, 4);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vextq_u16:
// CHECK-CODEGEN: {{ext.16.*#8}}
}
void test_vextq_p16()
{
// CHECK: test_vextq_p16
poly16x8_t xP16x8;
xP16x8 = vextq_p16(xP16x8, xP16x8, 5);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vextq_p16:
// CHECK-CODEGEN: {{ext.16.*#10}}
}
void test_vextq_s32()
{
// CHECK: test_vextq_s32
int32x4_t xS32x4;
xS32x4 = vextq_s32(xS32x4, xS32x4, 1);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vextq_s32:
// CHECK-CODEGEN: {{ext.16.*#4}}
}
void test_vextq_u32()
{
// CHECK: test_vextq_u32
uint32x4_t xU32x4;
xU32x4 = vextq_u32(xU32x4, xU32x4, 2);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vextq_u32:
// CHECK-CODEGEN: {{ext.16.*#8}}
}
void test_vextq_f32()
{
// CHECK: test_vextq_f32
float32x4_t xF32x4;
xF32x4 = vextq_f32(xF32x4, xF32x4, 3);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vextq_f32:
// CHECK-CODEGEN: {{ext.16.*#12}}
}
void test_vextq_s64()
{
// CHECK: test_vextq_s64
int64x2_t xS64x2;
xS64x2 = vextq_s64(xS64x2, xS64x2, 1);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vextq_s64:
// CHECK-CODEGEN: {{ext.16.*#8}}
}
void test_vextq_u64()
{
// CHECK: test_vextq_u64
uint64x2_t xU64x2;
xU64x2 = vextq_u64(xU64x2, xU64x2, 1);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vextq_u64:
// CHECK-CODEGEN: {{ext.16.*#8}}
}
void test_vextq_f64()
{
// CHECK: test_vextq_f64
float64x2_t xF64x2;
xF64x2 = vextq_f64(xF64x2, xF64x2, 1);
// CHECK: shufflevector
// CHECK-CODEGEN: test_vextq_u64:
// CHECK-CODEGEN: {{ext.16.*#8}}
}

View File

@ -0,0 +1,136 @@
// RUN: %clang -O1 -target arm64-apple-ios7 -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
// Test ARM64 SIMD fused multiply add intrinsics
#include <arm_neon.h>
float32x2_t test_vfma_f32(float32x2_t a1, float32x2_t a2, float32x2_t a3) {
// CHECK: test_vfma_f32
return vfma_f32(a1, a2, a3);
// CHECK: llvm.fma.v2f32({{.*a2, .*a3, .*a1}})
// CHECK-NEXT: ret
}
float32x4_t test_vfmaq_f32(float32x4_t a1, float32x4_t a2, float32x4_t a3) {
// CHECK: test_vfmaq_f32
return vfmaq_f32(a1, a2, a3);
// CHECK: llvm.fma.v4f32({{.*a2, .*a3, .*a1}})
// CHECK-NEXT: ret
}
float64x2_t test_vfmaq_f64(float64x2_t a1, float64x2_t a2, float64x2_t a3) {
// CHECK: test_vfmaq_f64
return vfmaq_f64(a1, a2, a3);
// CHECK: llvm.fma.v2f64({{.*a2, .*a3, .*a1}})
// CHECK-NEXT: ret
}
float32x2_t test_vfma_lane_f32(float32x2_t a1, float32x2_t a2, float32x2_t a3) {
// CHECK: test_vfma_lane_f32
return vfma_lane_f32(a1, a2, a3, 1);
// NB: the test below is deliberately lose, so that we don't depend too much
// upon the exact IR used to select lane 1 (usually a shufflevector)
// CHECK: llvm.fma.v2f32(<2 x float> %a2, <2 x float> {{.*}}, <2 x float> %a1)
// CHECK-NEXT: ret
}
float32x4_t test_vfmaq_lane_f32(float32x4_t a1, float32x4_t a2, float32x2_t a3) {
// CHECK: test_vfmaq_lane_f32
return vfmaq_lane_f32(a1, a2, a3, 1);
// NB: the test below is deliberately lose, so that we don't depend too much
// upon the exact IR used to select lane 1 (usually a shufflevector)
// CHECK: llvm.fma.v4f32(<4 x float> %a2, <4 x float> {{.*}}, <4 x float> %a1)
// CHECK-NEXT: ret
}
float64x2_t test_vfmaq_lane_f64(float64x2_t a1, float64x2_t a2, float64x1_t a3) {
// CHECK: test_vfmaq_lane_f64
return vfmaq_lane_f64(a1, a2, a3, 0);
// NB: the test below is deliberately lose, so that we don't depend too much
// upon the exact IR used to select lane 1 (usually a shufflevector)
// CHECK: llvm.fma.v2f64(<2 x double> %a2, <2 x double> {{.*}}, <2 x double> %a1)
// CHECK-NEXT: ret
}
float32x2_t test_vfma_n_f32(float32x2_t a1, float32x2_t a2, float32_t a3) {
// CHECK: test_vfma_n_f32
return vfma_n_f32(a1, a2, a3);
// NB: the test below is deliberately lose, so that we don't depend too much
// upon the exact IR used to select lane 0 (usually two insertelements)
// CHECK: llvm.fma.v2f32
// CHECK-NEXT: ret
}
float32x4_t test_vfmaq_n_f32(float32x4_t a1, float32x4_t a2, float32_t a3) {
// CHECK: test_vfmaq_n_f32
return vfmaq_n_f32(a1, a2, a3);
// NB: the test below is deliberately lose, so that we don't depend too much
// upon the exact IR used to select lane 0 (usually four insertelements)
// CHECK: llvm.fma.v4f32
// CHECK-NEXT: ret
}
float64x2_t test_vfmaq_n_f64(float64x2_t a1, float64x2_t a2, float64_t a3) {
// CHECK: test_vfmaq_n_f64
return vfmaq_n_f64(a1, a2, a3);
// NB: the test below is deliberately lose, so that we don't depend too much
// upon the exact IR used to select lane 0 (usually two insertelements)
// CHECK: llvm.fma.v2f64
// CHECK-NEXT: ret
}
float32x2_t test_vfms_f32(float32x2_t a1, float32x2_t a2, float32x2_t a3) {
// CHECK: test_vfms_f32
return vfms_f32(a1, a2, a3);
// CHECK: [[NEG:%.*]] = fsub <2 x float> {{.*}}, %a2
// CHECK: llvm.fma.v2f32(<2 x float> %a3, <2 x float> [[NEG]], <2 x float> %a1)
// CHECK-NEXT: ret
}
float32x4_t test_vfmsq_f32(float32x4_t a1, float32x4_t a2, float32x4_t a3) {
// CHECK: test_vfmsq_f32
return vfmsq_f32(a1, a2, a3);
// CHECK: [[NEG:%.*]] = fsub <4 x float> {{.*}}, %a2
// CHECK: llvm.fma.v4f32(<4 x float> %a3, <4 x float> [[NEG]], <4 x float> %a1)
// CHECK-NEXT: ret
}
float64x2_t test_vfmsq_f64(float64x2_t a1, float64x2_t a2, float64x2_t a3) {
// CHECK: test_vfmsq_f64
return vfmsq_f64(a1, a2, a3);
// CHECK: [[NEG:%.*]] = fsub <2 x double> {{.*}}, %a2
// CHECK: llvm.fma.v2f64(<2 x double> %a3, <2 x double> [[NEG]], <2 x double> %a1)
// CHECK-NEXT: ret
}
float32x2_t test_vfms_lane_f32(float32x2_t a1, float32x2_t a2, float32x2_t a3) {
// CHECK: test_vfms_lane_f32
return vfms_lane_f32(a1, a2, a3, 1);
// NB: the test below is deliberately lose, so that we don't depend too much
// upon the exact IR used to select lane 1 (usually a shufflevector)
// CHECK: [[NEG:%.*]] = fsub <2 x float> {{.*}}, %a3
// CHECK: [[LANE:%.*]] = shufflevector <2 x float> [[NEG]]
// CHECK: llvm.fma.v2f32(<2 x float> {{.*}}, <2 x float> [[LANE]], <2 x float> %a1)
// CHECK-NEXT: ret
}
float32x4_t test_vfmsq_lane_f32(float32x4_t a1, float32x4_t a2, float32x2_t a3) {
// CHECK: test_vfmsq_lane_f32
return vfmsq_lane_f32(a1, a2, a3, 1);
// NB: the test below is deliberately lose, so that we don't depend too much
// upon the exact IR used to select lane 1 (usually a shufflevector)
// CHECK: [[NEG:%.*]] = fsub <2 x float> {{.*}}, %a3
// CHECK: [[LANE:%.*]] = shufflevector <2 x float> [[NEG]]
// CHECK: llvm.fma.v4f32(<4 x float> {{.*}}, <4 x float> [[LANE]], <4 x float> %a1)
// CHECK-NEXT: ret
}
float64x2_t test_vfmsq_lane_f64(float64x2_t a1, float64x2_t a2, float64x1_t a3) {
// CHECK: test_vfmsq_lane_f64
return vfmsq_lane_f64(a1, a2, a3, 0);
// NB: the test below is deliberately lose, so that we don't depend too much
// upon the exact IR used to select lane 1 (usually a shufflevector)
// CHECK: [[NEG:%.*]] = fsub <1 x double> {{.*}}, %a3
// CHECK: [[LANE:%.*]] = shufflevector <1 x double> [[NEG]]
// CHECK: llvm.fma.v2f64(<2 x double> {{.*}}, <2 x double> [[LANE]], <2 x double> %a1)
// CHECK-NEXT: ret
}

View File

@ -0,0 +1,13 @@
// RUN: %clang -O1 -target arm64-apple-ios7 -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
// Test ARM64 SIMD vget intrinsics
#include <arm_neon.h>
float64_t test_vget_lane_f64(float64x1_t a1) {
// CHECK: test_vget_lane_f64
// why isn't 1 allowed as second argument?
return vget_lane_f64(a1, 0);
// CHECK: extractelement {{.*}} i32 0
// CHECK-NEXT: ret
}

View File

@ -0,0 +1,18 @@
// RUN: %clang -O1 -target arm64-apple-ios7 -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
// Test ARM64 SIMD negate and saturating negate intrinsics
#include <arm_neon.h>
int64x2_t test_vnegq_s64(int64x2_t a1) {
// CHECK: test_vnegq_s64
return vnegq_s64(a1);
// CHECK: sub <2 x i64> zeroinitializer, %a1
// CHECK-NEXT: ret
}
int64x2_t test_vqnegq_s64(int64x2_t a1) {
// CHECK: test_vqnegq_s64
return vqnegq_s64(a1);
// CHECK: llvm.arm64.neon.sqneg.v2i64
// CHECK-NEXT: ret
}

View File

@ -0,0 +1,77 @@
// RUN: %clang -O3 -target arm64-apple-ios7 -ffreestanding -S -o - %s | FileCheck %s
// REQUIRES: arm64-registered-target
/// Test vqmov[u]n_high_<su>{16,32,64) ARM64 intrinsics
#include <arm_neon.h>
// vqmovn_high_s16 -> UQXTN2 Vd.16b,Vn.8h
int8x16_t test_vqmovn_high_s16(int8x8_t Vdlow, int16x8_t Vn)
{
return vqmovn_high_s16(Vdlow, Vn);
// CHECK: test_vqmovn_high_s16:
// CHECK: sqxtn2.16b {{v[0-9][0-9]*}}, {{v[0-9][0-9]*}}
}
// vqmovun_high_s16 -> UQXTN2 Vd.16b,Vn.8h
uint8x16_t test_vqmovun_high_s16(uint8x8_t Vdlow, uint16x8_t Vn)
{
return vqmovun_high_s16(Vdlow, Vn);
// CHECK: test_vqmovun_high_s16:
// CHECK: sqxtun2.16b {{v[0-9][0-9]*}}, {{v[0-9][0-9]*}}
}
// vqmovn_high_s32 -> SQXTN2 Vd.8h,Vn.4s
int16x8_t test_vqmovn_high_s32(int16x4_t Vdlow, int32x4_t Vn)
{
return vqmovn_high_s32(Vdlow, Vn);
// CHECK: test_vqmovn_high_s32:
// CHECK: sqxtn2.8h {{v[0-9][0-9]*}}, {{v[0-9][0-9]*}}
}
// vqmovn_high_u32 -> UQXTN2 Vd.8h,Vn.4s
uint16x8_t test_vqmovn_high_u32(uint16x4_t Vdlow, uint32x4_t Vn)
{
return vqmovn_high_u32(Vdlow, Vn);
// CHECK: test_vqmovn_high_u32:
// CHECK: uqxtn2.8h {{v[0-9][0-9]*}}, {{v[0-9][0-9]*}}
}
// vqmovn_high_s64 -> SQXTN2 Vd.4s,Vn.2d
int32x4_t test_vqmovn_high_s64(int32x2_t Vdlow, int64x2_t Vn)
{
return vqmovn_high_s64(Vdlow, Vn);
// CHECK: test_vqmovn_high_s64:
// CHECK: sqxtn2.4s {{v[0-9][0-9]*}}, {{v[0-9][0-9]*}}
}
// vqmovn_high_u64 -> UQXTN2 Vd.4s,Vn.2d
uint32x4_t test_vqmovn_high_u64(uint32x2_t Vdlow, uint64x2_t Vn)
{
return vqmovn_high_u64(Vdlow, Vn);
// CHECK: test_vqmovn_high_u64:
// CHECK: uqxtn2.4s {{v[0-9][0-9]*}}, {{v[0-9][0-9]*}}
}
// vqmovn_high_u16 -> UQXTN2 Vd.16b,Vn.8h
uint8x16_t test_vqmovn_high_u16(uint8x8_t Vdlow, uint16x8_t Vn)
{
return vqmovn_high_u16(Vdlow, Vn);
// CHECK: test_vqmovn_high_u16:
// CHECK: uqxtn2.16b {{v[0-9][0-9]*}}, {{v[0-9][0-9]*}}
}
// vqmovun_high_s32 -> SQXTUN2 Vd.8h,Vn.4s
uint16x8_t test_vqmovun_high_s32(uint16x4_t Vdlow, uint32x4_t Vn)
{
return vqmovun_high_s32(Vdlow, Vn);
// CHECK: test_vqmovun_high_s32:
// CHECK: sqxtun2.8h {{v[0-9][0-9]*}}, {{v[0-9][0-9]*}}
}
// vqmovun_high_s64 -> SQXTUN2 Vd.4s,Vn.2d
uint32x4_t test_vqmovun_high_s64(uint32x2_t Vdlow, uint64x2_t Vn)
{
return vqmovun_high_s64(Vdlow, Vn);
// CHECK: test_vqmovun_high_s64:
// CHECK: sqxtun2.4s {{v[0-9][0-9]*}}, {{v[0-9][0-9]*}}
}

View File

@ -0,0 +1,26 @@
// RUN: %clang -O3 -target arm64-apple-ios7 -ffreestanding -c -S -o - %s | FileCheck %s
// REQUIRES: arm64-registered-target
/// Test vrecpss_f32, vrecpsd_f64 ARM64 intrinsics
#include <arm_neon.h>
// vrecpss_f32 -> FRECPS Sd,Sa,Sb
//
float32_t test_vrecpss_f32(float32_t Vdlow, float32_t Vn)
{
return vrecpss_f32(Vdlow, Vn);
// CHECK: test_vrecpss_f32:
// CHECK: frecps s0, s0, s1
// CHECK-NEXT: ret
}
// vrecpsd_f64 -> FRECPS Dd,Da,Db
//
float64_t test_vrecpsd_f64(float64_t Vdlow, float64_t Vn)
{
return vrecpsd_f64(Vdlow, Vn);
// CHECK: test_vrecpsd_f64:
// CHECK: frecps d0, d0, d1
// CHECK-NEXT: ret
}

View File

@ -0,0 +1,31 @@
// RUN: %clang -O1 -target arm64-apple-ios7 -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
// Test ARM64 SIMD set lane intrinsics INCOMPLETE
#include <arm_neon.h>
float16x4_t test_vset_lane_f16(float16_t *a1, float16x4_t a2) {
// CHECK-LABEL: test_vset_lane_f16
return vset_lane_f16(*a1, a2, 1);
// CHECK insertelement <4 x i16> %a2, i16 %a1, i32 1
}
float16x8_t test_vsetq_lane_f16(float16_t *a1, float16x8_t a2) {
// CHECK-LABEL: test_vsetq_lane_f16
return vsetq_lane_f16(*a1, a2, 4);
// CHECK insertelement <8 x i16> %a2, i16 %a1, i32 4
}
// problem with scalar_to_vector in backend. Punt for now
#if 0
float64x1_t test_vset_lane_f64(float64_t a1, float64x1_t a2) {
// CHECK-LABEL@ test_vset_lane_f64
return vset_lane_f64(a1, a2, 0);
// CHECK@ @llvm.arm64.neon.smaxv.i32.v8i8
}
#endif
float64x2_t test_vsetq_lane_f64(float64_t a1, float64x2_t a2) {
// CHECK-LABEL: test_vsetq_lane_f64
return vsetq_lane_f64(a1, a2, 0);
// CHECK insertelement <2 x double> %a2, double %a1, i32 0
}

View File

@ -0,0 +1,357 @@
// RUN: %clang_cc1 -triple arm64-apple-ios7.0 -ffreestanding -emit-llvm -o - -O1 %s | FileCheck %s
#include <arm_neon.h>
int8x8_t test_vqshl_n_s8(int8x8_t in) {
// CHECK-LABEL: @test_vqshl_n_s8
// CHECK: call <8 x i8> @llvm.arm64.neon.sqshl.v8i8(<8 x i8> %in, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
return vqshl_n_s8(in, 1);
}
int16x4_t test_vqshl_n_s16(int16x4_t in) {
// CHECK-LABEL: @test_vqshl_n_s16
// CHECK: call <4 x i16> @llvm.arm64.neon.sqshl.v4i16(<4 x i16> %in, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
return vqshl_n_s16(in, 1);
}
int32x2_t test_vqshl_n_s32(int32x2_t in) {
// CHECK-LABEL: @test_vqshl_n_s32
// CHECK: call <2 x i32> @llvm.arm64.neon.sqshl.v2i32(<2 x i32> %in, <2 x i32> <i32 1, i32 1>)
return vqshl_n_s32(in, 1);
}
int64x1_t test_vqshl_n_s64(int64x1_t in) {
// CHECK-LABEL: @test_vqshl_n_s64
// CHECK: call <1 x i64> @llvm.arm64.neon.sqshl.v1i64(<1 x i64> %in, <1 x i64> <i64 1>)
return vqshl_n_s64(in, 1);
}
int8x16_t test_vqshlq_n_s8(int8x16_t in) {
// CHECK-LABEL: @test_vqshlq_n_s8
// CHECK: call <16 x i8> @llvm.arm64.neon.sqshl.v16i8(<16 x i8> %in, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
return vqshlq_n_s8(in, 1);
}
int16x8_t test_vqshlq_n_s16(int16x8_t in) {
// CHECK-LABEL: @test_vqshlq_n_s16
// CHECK: call <8 x i16> @llvm.arm64.neon.sqshl.v8i16(<8 x i16> %in, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
return vqshlq_n_s16(in, 1);
}
int32x4_t test_vqshlq_n_s32(int32x4_t in) {
// CHECK-LABEL: @test_vqshlq_n_s32
// CHECK: call <4 x i32> @llvm.arm64.neon.sqshl.v4i32(<4 x i32> %in, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
return vqshlq_n_s32(in, 1);
}
int64x2_t test_vqshlq_n_s64(int64x2_t in) {
// CHECK-LABEL: @test_vqshlq_n_s64
// CHECK: call <2 x i64> @llvm.arm64.neon.sqshl.v2i64(<2 x i64> %in, <2 x i64> <i64 1, i64 1>
return vqshlq_n_s64(in, 1);
}
uint8x8_t test_vqshl_n_u8(uint8x8_t in) {
// CHECK-LABEL: @test_vqshl_n_u8
// CHECK: call <8 x i8> @llvm.arm64.neon.uqshl.v8i8(<8 x i8> %in, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
return vqshl_n_u8(in, 1);
}
uint16x4_t test_vqshl_n_u16(uint16x4_t in) {
// CHECK-LABEL: @test_vqshl_n_u16
// CHECK: call <4 x i16> @llvm.arm64.neon.uqshl.v4i16(<4 x i16> %in, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
return vqshl_n_u16(in, 1);
}
uint32x2_t test_vqshl_n_u32(uint32x2_t in) {
// CHECK-LABEL: @test_vqshl_n_u32
// CHECK: call <2 x i32> @llvm.arm64.neon.uqshl.v2i32(<2 x i32> %in, <2 x i32> <i32 1, i32 1>)
return vqshl_n_u32(in, 1);
}
uint64x1_t test_vqshl_n_u64(uint64x1_t in) {
// CHECK-LABEL: @test_vqshl_n_u64
// CHECK: call <1 x i64> @llvm.arm64.neon.uqshl.v1i64(<1 x i64> %in, <1 x i64> <i64 1>)
return vqshl_n_u64(in, 1);
}
uint8x16_t test_vqshlq_n_u8(uint8x16_t in) {
// CHECK-LABEL: @test_vqshlq_n_u8
// CHECK: call <16 x i8> @llvm.arm64.neon.uqshl.v16i8(<16 x i8> %in, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
return vqshlq_n_u8(in, 1);
}
uint16x8_t test_vqshlq_n_u16(uint16x8_t in) {
// CHECK-LABEL: @test_vqshlq_n_u16
// CHECK: call <8 x i16> @llvm.arm64.neon.uqshl.v8i16(<8 x i16> %in, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
return vqshlq_n_u16(in, 1);
}
uint32x4_t test_vqshlq_n_u32(uint32x4_t in) {
// CHECK-LABEL: @test_vqshlq_n_u32
// CHECK: call <4 x i32> @llvm.arm64.neon.uqshl.v4i32(<4 x i32> %in, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
return vqshlq_n_u32(in, 1);
}
uint64x2_t test_vqshlq_n_u64(uint64x2_t in) {
// CHECK-LABEL: @test_vqshlq_n_u64
// CHECK: call <2 x i64> @llvm.arm64.neon.uqshl.v2i64(<2 x i64> %in, <2 x i64> <i64 1, i64 1>
return vqshlq_n_u64(in, 1);
}
int8x8_t test_vrshr_n_s8(int8x8_t in) {
// CHECK-LABEL: @test_vrshr_n_s8
// CHECK: call <8 x i8> @llvm.arm64.neon.srshl.v8i8(<8 x i8> %in, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
return vrshr_n_s8(in, 1);
}
int16x4_t test_vrshr_n_s16(int16x4_t in) {
// CHECK-LABEL: @test_vrshr_n_s16
// CHECK: call <4 x i16> @llvm.arm64.neon.srshl.v4i16(<4 x i16> %in, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
return vrshr_n_s16(in, 1);
}
int32x2_t test_vrshr_n_s32(int32x2_t in) {
// CHECK-LABEL: @test_vrshr_n_s32
// CHECK: call <2 x i32> @llvm.arm64.neon.srshl.v2i32(<2 x i32> %in, <2 x i32> <i32 -1, i32 -1>)
return vrshr_n_s32(in, 1);
}
int64x1_t test_vrshr_n_s64(int64x1_t in) {
// CHECK-LABEL: @test_vrshr_n_s64
// CHECK: call <1 x i64> @llvm.arm64.neon.srshl.v1i64(<1 x i64> %in, <1 x i64> <i64 -1>)
return vrshr_n_s64(in, 1);
}
int8x16_t test_vrshrq_n_s8(int8x16_t in) {
// CHECK-LABEL: @test_vrshrq_n_s8
// CHECK: call <16 x i8> @llvm.arm64.neon.srshl.v16i8(<16 x i8> %in, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
return vrshrq_n_s8(in, 1);
}
int16x8_t test_vrshrq_n_s16(int16x8_t in) {
// CHECK-LABEL: @test_vrshrq_n_s16
// CHECK: call <8 x i16> @llvm.arm64.neon.srshl.v8i16(<8 x i16> %in, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
return vrshrq_n_s16(in, 1);
}
int32x4_t test_vrshrq_n_s32(int32x4_t in) {
// CHECK-LABEL: @test_vrshrq_n_s32
// CHECK: call <4 x i32> @llvm.arm64.neon.srshl.v4i32(<4 x i32> %in, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
return vrshrq_n_s32(in, 1);
}
int64x2_t test_vrshrq_n_s64(int64x2_t in) {
// CHECK-LABEL: @test_vrshrq_n_s64
// CHECK: call <2 x i64> @llvm.arm64.neon.srshl.v2i64(<2 x i64> %in, <2 x i64> <i64 -1, i64 -1>
return vrshrq_n_s64(in, 1);
}
uint8x8_t test_vrshr_n_u8(uint8x8_t in) {
// CHECK-LABEL: @test_vrshr_n_u8
// CHECK: call <8 x i8> @llvm.arm64.neon.urshl.v8i8(<8 x i8> %in, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
return vrshr_n_u8(in, 1);
}
uint16x4_t test_vrshr_n_u16(uint16x4_t in) {
// CHECK-LABEL: @test_vrshr_n_u16
// CHECK: call <4 x i16> @llvm.arm64.neon.urshl.v4i16(<4 x i16> %in, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
return vrshr_n_u16(in, 1);
}
uint32x2_t test_vrshr_n_u32(uint32x2_t in) {
// CHECK-LABEL: @test_vrshr_n_u32
// CHECK: call <2 x i32> @llvm.arm64.neon.urshl.v2i32(<2 x i32> %in, <2 x i32> <i32 -1, i32 -1>)
return vrshr_n_u32(in, 1);
}
uint64x1_t test_vrshr_n_u64(uint64x1_t in) {
// CHECK-LABEL: @test_vrshr_n_u64
// CHECK: call <1 x i64> @llvm.arm64.neon.urshl.v1i64(<1 x i64> %in, <1 x i64> <i64 -1>)
return vrshr_n_u64(in, 1);
}
uint8x16_t test_vrshrq_n_u8(uint8x16_t in) {
// CHECK-LABEL: @test_vrshrq_n_u8
// CHECK: call <16 x i8> @llvm.arm64.neon.urshl.v16i8(<16 x i8> %in, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
return vrshrq_n_u8(in, 1);
}
uint16x8_t test_vrshrq_n_u16(uint16x8_t in) {
// CHECK-LABEL: @test_vrshrq_n_u16
// CHECK: call <8 x i16> @llvm.arm64.neon.urshl.v8i16(<8 x i16> %in, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
return vrshrq_n_u16(in, 1);
}
uint32x4_t test_vrshrq_n_u32(uint32x4_t in) {
// CHECK-LABEL: @test_vrshrq_n_u32
// CHECK: call <4 x i32> @llvm.arm64.neon.urshl.v4i32(<4 x i32> %in, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
return vrshrq_n_u32(in, 1);
}
uint64x2_t test_vrshrq_n_u64(uint64x2_t in) {
// CHECK-LABEL: @test_vrshrq_n_u64
// CHECK: call <2 x i64> @llvm.arm64.neon.urshl.v2i64(<2 x i64> %in, <2 x i64> <i64 -1, i64 -1>
return vrshrq_n_u64(in, 1);
}
int8x8_t test_vqshlu_n_s8(int8x8_t in) {
// CHECK-LABEL: @test_vqshlu_n_s8
// CHECK: call <8 x i8> @llvm.arm64.neon.sqshlu.v8i8(<8 x i8> %in, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
return vqshlu_n_s8(in, 1);
}
int16x4_t test_vqshlu_n_s16(int16x4_t in) {
// CHECK-LABEL: @test_vqshlu_n_s16
// CHECK: call <4 x i16> @llvm.arm64.neon.sqshlu.v4i16(<4 x i16> %in, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
return vqshlu_n_s16(in, 1);
}
int32x2_t test_vqshlu_n_s32(int32x2_t in) {
// CHECK-LABEL: @test_vqshlu_n_s32
// CHECK: call <2 x i32> @llvm.arm64.neon.sqshlu.v2i32(<2 x i32> %in, <2 x i32> <i32 1, i32 1>)
return vqshlu_n_s32(in, 1);
}
int64x1_t test_vqshlu_n_s64(int64x1_t in) {
// CHECK-LABEL: @test_vqshlu_n_s64
// CHECK: call <1 x i64> @llvm.arm64.neon.sqshlu.v1i64(<1 x i64> %in, <1 x i64> <i64 1>)
return vqshlu_n_s64(in, 1);
}
int8x16_t test_vqshluq_n_s8(int8x16_t in) {
// CHECK-LABEL: @test_vqshluq_n_s8
// CHECK: call <16 x i8> @llvm.arm64.neon.sqshlu.v16i8(<16 x i8> %in, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
return vqshluq_n_s8(in, 1);
}
int16x8_t test_vqshluq_n_s16(int16x8_t in) {
// CHECK-LABEL: @test_vqshluq_n_s16
// CHECK: call <8 x i16> @llvm.arm64.neon.sqshlu.v8i16(<8 x i16> %in, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
return vqshluq_n_s16(in, 1);
}
int32x4_t test_vqshluq_n_s32(int32x4_t in) {
// CHECK-LABEL: @test_vqshluq_n_s32
// CHECK: call <4 x i32> @llvm.arm64.neon.sqshlu.v4i32(<4 x i32> %in, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
return vqshluq_n_s32(in, 1);
}
int64x2_t test_vqshluq_n_s64(int64x2_t in) {
// CHECK-LABEL: @test_vqshluq_n_s64
// CHECK: call <2 x i64> @llvm.arm64.neon.sqshlu.v2i64(<2 x i64> %in, <2 x i64> <i64 1, i64 1>
return vqshluq_n_s64(in, 1);
}
int8x8_t test_vrsra_n_s8(int8x8_t acc, int8x8_t in) {
// CHECK-LABEL: @test_vrsra_n_s8
// CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <8 x i8> @llvm.arm64.neon.srshl.v8i8(<8 x i8> %in, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
// CHECK: add <8 x i8> [[TMP]], %acc
return vrsra_n_s8(acc, in, 1);
}
int16x4_t test_vrsra_n_s16(int16x4_t acc, int16x4_t in) {
// CHECK-LABEL: @test_vrsra_n_s16
// CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <4 x i16> @llvm.arm64.neon.srshl.v4i16(<4 x i16> %in, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
// CHECK: add <4 x i16> [[TMP]], %acc
return vrsra_n_s16(acc, in, 1);
}
int32x2_t test_vrsra_n_s32(int32x2_t acc, int32x2_t in) {
// CHECK-LABEL: @test_vrsra_n_s32
// CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <2 x i32> @llvm.arm64.neon.srshl.v2i32(<2 x i32> %in, <2 x i32> <i32 -1, i32 -1>)
// CHECK: add <2 x i32> [[TMP]], %acc
return vrsra_n_s32(acc, in, 1);
}
int64x1_t test_vrsra_n_s64(int64x1_t acc, int64x1_t in) {
// CHECK-LABEL: @test_vrsra_n_s64
// CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <1 x i64> @llvm.arm64.neon.srshl.v1i64(<1 x i64> %in, <1 x i64> <i64 -1>)
// CHECK: add <1 x i64> [[TMP]], %acc
return vrsra_n_s64(acc, in, 1);
}
int8x16_t test_vrsraq_n_s8(int8x16_t acc, int8x16_t in) {
// CHECK-LABEL: @test_vrsraq_n_s8
// CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <16 x i8> @llvm.arm64.neon.srshl.v16i8(<16 x i8> %in, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
// CHECK: add <16 x i8> [[TMP]], %acc
return vrsraq_n_s8(acc, in, 1);
}
int16x8_t test_vrsraq_n_s16(int16x8_t acc, int16x8_t in) {
// CHECK-LABEL: @test_vrsraq_n_s16
// CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <8 x i16> @llvm.arm64.neon.srshl.v8i16(<8 x i16> %in, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
// CHECK: add <8 x i16> [[TMP]], %acc
return vrsraq_n_s16(acc, in, 1);
}
int32x4_t test_vrsraq_n_s32(int32x4_t acc, int32x4_t in) {
// CHECK-LABEL: @test_vrsraq_n_s32
// CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <4 x i32> @llvm.arm64.neon.srshl.v4i32(<4 x i32> %in, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
// CHECK: add <4 x i32> [[TMP]], %acc
return vrsraq_n_s32(acc, in, 1);
}
int64x2_t test_vrsraq_n_s64(int64x2_t acc, int64x2_t in) {
// CHECK-LABEL: @test_vrsraq_n_s64
// CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <2 x i64> @llvm.arm64.neon.srshl.v2i64(<2 x i64> %in, <2 x i64> <i64 -1, i64 -1>)
// CHECK: add <2 x i64> [[TMP]], %acc
return vrsraq_n_s64(acc, in, 1);
}
uint8x8_t test_vrsra_n_u8(uint8x8_t acc, uint8x8_t in) {
// CHECK-LABEL: @test_vrsra_n_u8
// CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <8 x i8> @llvm.arm64.neon.urshl.v8i8(<8 x i8> %in, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
// CHECK: add <8 x i8> [[TMP]], %acc
return vrsra_n_u8(acc, in, 1);
}
uint16x4_t test_vrsra_n_u16(uint16x4_t acc, uint16x4_t in) {
// CHECK-LABEL: @test_vrsra_n_u16
// CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <4 x i16> @llvm.arm64.neon.urshl.v4i16(<4 x i16> %in, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
// CHECK: add <4 x i16> [[TMP]], %acc
return vrsra_n_u16(acc, in, 1);
}
uint32x2_t test_vrsra_n_u32(uint32x2_t acc, uint32x2_t in) {
// CHECK-LABEL: @test_vrsra_n_u32
// CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <2 x i32> @llvm.arm64.neon.urshl.v2i32(<2 x i32> %in, <2 x i32> <i32 -1, i32 -1>)
// CHECK: add <2 x i32> [[TMP]], %acc
return vrsra_n_u32(acc, in, 1);
}
uint64x1_t test_vrsra_n_u64(uint64x1_t acc, uint64x1_t in) {
// CHECK-LABEL: @test_vrsra_n_u64
// CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <1 x i64> @llvm.arm64.neon.urshl.v1i64(<1 x i64> %in, <1 x i64> <i64 -1>)
// CHECK: add <1 x i64> [[TMP]], %acc
return vrsra_n_u64(acc, in, 1);
}
uint8x16_t test_vrsraq_n_u8(uint8x16_t acc, uint8x16_t in) {
// CHECK-LABEL: @test_vrsraq_n_u8
// CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <16 x i8> @llvm.arm64.neon.urshl.v16i8(<16 x i8> %in, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
// CHECK: add <16 x i8> [[TMP]], %acc
return vrsraq_n_u8(acc, in, 1);
}
uint16x8_t test_vrsraq_n_u16(uint16x8_t acc, uint16x8_t in) {
// CHECK-LABEL: @test_vrsraq_n_u16
// CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <8 x i16> @llvm.arm64.neon.urshl.v8i16(<8 x i16> %in, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
// CHECK: add <8 x i16> [[TMP]], %acc
return vrsraq_n_u16(acc, in, 1);
}
uint32x4_t test_vrsraq_n_u32(uint32x4_t acc, uint32x4_t in) {
// CHECK-LABEL: @test_vrsraq_n_u32
// CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <4 x i32> @llvm.arm64.neon.urshl.v4i32(<4 x i32> %in, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
// CHECK: add <4 x i32> [[TMP]], %acc
return vrsraq_n_u32(acc, in, 1);
}
uint64x2_t test_vrsraq_n_u64(uint64x2_t acc, uint64x2_t in) {
// CHECK-LABEL: @test_vrsraq_n_u64
// CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <2 x i64> @llvm.arm64.neon.urshl.v2i64(<2 x i64> %in, <2 x i64> <i64 -1, i64 -1>)
// CHECK: add <2 x i64> [[TMP]], %acc
return vrsraq_n_u64(acc, in, 1);
}

View File

@ -0,0 +1,148 @@
// RUN: %clang -O1 -target arm64-apple-ios7 -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
// RUN: %clang -O1 -target arm64-apple-ios7 -ffreestanding -S -o - %s | \
// RUN: FileCheck -check-prefix=CHECK_CODEGEN %s
// REQUIRES: arm64-registered-target
// Test
#include <arm_neon.h>
int8x8_t test_vsli_n_s8(int8x8_t a1, int8x8_t a2) {
// CHECK: test_vsli_n_s8
return vsli_n_s8(a1, a2, 3);
// CHECK: llvm.arm64.neon.vsli.v8i8
// CHECK_CODEGEN: sli.8b v0, v1, #3
}
int16x4_t test_vsli_n_s16(int16x4_t a1, int16x4_t a2) {
// CHECK: test_vsli_n_s16
return vsli_n_s16(a1, a2, 3);
// CHECK: llvm.arm64.neon.vsli.v4i16
// CHECK_CODEGEN: sli.4h v0, v1, #3
}
int32x2_t test_vsli_n_s32(int32x2_t a1, int32x2_t a2) {
// CHECK: test_vsli_n_s32
return vsli_n_s32(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsli.v2i32
// CHECK_CODEGEN: sli.2s v0, v1, #1
}
int64x1_t test_vsli_n_s64(int64x1_t a1, int64x1_t a2) {
// CHECK: test_vsli_n_s64
return vsli_n_s64(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsli.v1i64
// CHECK_CODEGEN: sli d0, d1, #1
}
uint8x8_t test_vsli_n_u8(uint8x8_t a1, uint8x8_t a2) {
// CHECK: test_vsli_n_u8
return vsli_n_u8(a1, a2, 3);
// CHECK: llvm.arm64.neon.vsli.v8i8
// CHECK_CODEGEN: sli.8b v0, v1, #3
}
uint16x4_t test_vsli_n_u16(uint16x4_t a1, uint16x4_t a2) {
// CHECK: test_vsli_n_u16
return vsli_n_u16(a1, a2, 3);
// CHECK: llvm.arm64.neon.vsli.v4i16
// CHECK_CODEGEN: sli.4h v0, v1, #3
}
uint32x2_t test_vsli_n_u32(uint32x2_t a1, uint32x2_t a2) {
// CHECK: test_vsli_n_u32
return vsli_n_u32(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsli.v2i32
// CHECK_CODEGEN: sli.2s v0, v1, #1
}
uint64x1_t test_vsli_n_u64(uint64x1_t a1, uint64x1_t a2) {
// CHECK: test_vsli_n_u64
return vsli_n_u64(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsli.v1i64
// CHECK_CODEGEN: sli d0, d1, #1
}
poly8x8_t test_vsli_n_p8(poly8x8_t a1, poly8x8_t a2) {
// CHECK: test_vsli_n_p8
return vsli_n_p8(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsli.v8i8
// CHECK_CODEGEN: sli.8b v0, v1, #1
}
poly16x4_t test_vsli_n_p16(poly16x4_t a1, poly16x4_t a2) {
// CHECK: test_vsli_n_p16
return vsli_n_p16(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsli.v4i16
// CHECK_CODEGEN: sli.4h v0, v1, #1
}
int8x16_t test_vsliq_n_s8(int8x16_t a1, int8x16_t a2) {
// CHECK: test_vsliq_n_s8
return vsliq_n_s8(a1, a2, 3);
// CHECK: llvm.arm64.neon.vsli.v16i8
// CHECK_CODEGEN: sli.16b v0, v1, #3
}
int16x8_t test_vsliq_n_s16(int16x8_t a1, int16x8_t a2) {
// CHECK: test_vsliq_n_s16
return vsliq_n_s16(a1, a2, 3);
// CHECK: llvm.arm64.neon.vsli.v8i16
// CHECK_CODEGEN: sli.8h v0, v1, #3
}
int32x4_t test_vsliq_n_s32(int32x4_t a1, int32x4_t a2) {
// CHECK: test_vsliq_n_s32
return vsliq_n_s32(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsli.v4i32
// CHECK_CODEGEN: sli.4s v0, v1, #1
}
int64x2_t test_vsliq_n_s64(int64x2_t a1, int64x2_t a2) {
// CHECK: test_vsliq_n_s64
return vsliq_n_s64(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsli.v2i64
// CHECK_CODEGEN: sli.2d v0, v1, #1
}
uint8x16_t test_vsliq_n_u8(uint8x16_t a1, uint8x16_t a2) {
// CHECK: test_vsliq_n_u8
return vsliq_n_u8(a1, a2, 3);
// CHECK: llvm.arm64.neon.vsli.v16i8
// CHECK_CODEGEN: sli.16b v0, v1, #3
}
uint16x8_t test_vsliq_n_u16(uint16x8_t a1, uint16x8_t a2) {
// CHECK: test_vsliq_n_u16
return vsliq_n_u16(a1, a2, 3);
// CHECK: llvm.arm64.neon.vsli.v8i16
// CHECK_CODEGEN: sli.8h v0, v1, #3
}
uint32x4_t test_vsliq_n_u32(uint32x4_t a1, uint32x4_t a2) {
// CHECK: test_vsliq_n_u32
return vsliq_n_u32(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsli.v4i32
// CHECK_CODEGEN: sli.4s v0, v1, #1
}
uint64x2_t test_vsliq_n_u64(uint64x2_t a1, uint64x2_t a2) {
// CHECK: test_vsliq_n_u64
return vsliq_n_u64(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsli.v2i64
// CHECK_CODEGEN: sli.2d v0, v1, #1
}
poly8x16_t test_vsliq_n_p8(poly8x16_t a1, poly8x16_t a2) {
// CHECK: test_vsliq_n_p8
return vsliq_n_p8(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsli.v16i8
// CHECK_CODEGEN: sli.16b v0, v1, #1
}
poly16x8_t test_vsliq_n_p16(poly16x8_t a1, poly16x8_t a2) {
// CHECK: test_vsliq_n_p16
return vsliq_n_p16(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsli.v8i16
// CHECK_CODEGEN: sli.8h v0, v1, #1
}

View File

@ -0,0 +1,149 @@
// RUN: %clang -O1 -target arm64-apple-ios7 -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
// RUN: %clang -O1 -target arm64-apple-ios7 -ffreestanding -S -o - %s | \
// RUN: FileCheck -check-prefix=CHECK_CODEGEN %s
// REQUIRES: arm64-registered-target
// Test ARM64 SIMD vector shift right and insert: vsri[q]_n_*
#include <arm_neon.h>
int8x8_t test_vsri_n_s8(int8x8_t a1, int8x8_t a2) {
// CHECK: test_vsri_n_s8
return vsri_n_s8(a1, a2, 3);
// CHECK: llvm.arm64.neon.vsri.v8i8
// CHECK_CODEGEN: sri.8b v0, v1, #3
}
int16x4_t test_vsri_n_s16(int16x4_t a1, int16x4_t a2) {
// CHECK: test_vsri_n_s16
return vsri_n_s16(a1, a2, 3);
// CHECK: llvm.arm64.neon.vsri.v4i16
// CHECK_CODEGEN: sri.4h v0, v1, #3
}
int32x2_t test_vsri_n_s32(int32x2_t a1, int32x2_t a2) {
// CHECK: test_vsri_n_s32
return vsri_n_s32(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsri.v2i32
// CHECK_CODEGEN: sri.2s v0, v1, #1
}
int64x1_t test_vsri_n_s64(int64x1_t a1, int64x1_t a2) {
// CHECK: test_vsri_n_s64
return vsri_n_s64(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsri.v1i64
// CHECK_CODEGEN: sri d0, d1, #1
}
uint8x8_t test_vsri_n_u8(uint8x8_t a1, uint8x8_t a2) {
// CHECK: test_vsri_n_u8
return vsri_n_u8(a1, a2, 3);
// CHECK: llvm.arm64.neon.vsri.v8i8
// CHECK_CODEGEN: sri.8b v0, v1, #3
}
uint16x4_t test_vsri_n_u16(uint16x4_t a1, uint16x4_t a2) {
// CHECK: test_vsri_n_u16
return vsri_n_u16(a1, a2, 3);
// CHECK: llvm.arm64.neon.vsri.v4i16
// CHECK_CODEGEN: sri.4h v0, v1, #3
}
uint32x2_t test_vsri_n_u32(uint32x2_t a1, uint32x2_t a2) {
// CHECK: test_vsri_n_u32
return vsri_n_u32(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsri.v2i32
// CHECK_CODEGEN: sri.2s v0, v1, #1
}
uint64x1_t test_vsri_n_u64(uint64x1_t a1, uint64x1_t a2) {
// CHECK: test_vsri_n_u64
return vsri_n_u64(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsri.v1i64
// CHECK_CODEGEN: sri d0, d1, #1
}
poly8x8_t test_vsri_n_p8(poly8x8_t a1, poly8x8_t a2) {
// CHECK: test_vsri_n_p8
return vsri_n_p8(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsri.v8i8
// CHECK_CODEGEN: sri.8b v0, v1, #1
}
poly16x4_t test_vsri_n_p16(poly16x4_t a1, poly16x4_t a2) {
// CHECK: test_vsri_n_p16
return vsri_n_p16(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsri.v4i16
// CHECK_CODEGEN: sri.4h v0, v1, #1
}
int8x16_t test_vsriq_n_s8(int8x16_t a1, int8x16_t a2) {
// CHECK: test_vsriq_n_s8
return vsriq_n_s8(a1, a2, 3);
// CHECK: llvm.arm64.neon.vsri.v16i8
// CHECK_CODEGEN: sri.16b v0, v1, #3
}
int16x8_t test_vsriq_n_s16(int16x8_t a1, int16x8_t a2) {
// CHECK: test_vsriq_n_s16
return vsriq_n_s16(a1, a2, 3);
// CHECK: llvm.arm64.neon.vsri.v8i16
// CHECK_CODEGEN: sri.8h v0, v1, #3
}
int32x4_t test_vsriq_n_s32(int32x4_t a1, int32x4_t a2) {
// CHECK: test_vsriq_n_s32
return vsriq_n_s32(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsri.v4i32
// CHECK_CODEGEN: sri.4s v0, v1, #1
}
int64x2_t test_vsriq_n_s64(int64x2_t a1, int64x2_t a2) {
// CHECK: test_vsriq_n_s64
return vsriq_n_s64(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsri.v2i64
// CHECK_CODEGEN: sri.2d v0, v1, #1
}
uint8x16_t test_vsriq_n_u8(uint8x16_t a1, uint8x16_t a2) {
// CHECK: test_vsriq_n_u8
return vsriq_n_u8(a1, a2, 3);
// CHECK: llvm.arm64.neon.vsri.v16i8
// CHECK_CODEGEN: sri.16b v0, v1, #3
}
uint16x8_t test_vsriq_n_u16(uint16x8_t a1, uint16x8_t a2) {
// CHECK: test_vsriq_n_u16
return vsriq_n_u16(a1, a2, 3);
// CHECK: llvm.arm64.neon.vsri.v8i16
// CHECK_CODEGEN: sri.8h v0, v1, #3
}
uint32x4_t test_vsriq_n_u32(uint32x4_t a1, uint32x4_t a2) {
// CHECK: test_vsriq_n_u32
return vsriq_n_u32(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsri.v4i32
// CHECK_CODEGEN: sri.4s v0, v1, #1
}
uint64x2_t test_vsriq_n_u64(uint64x2_t a1, uint64x2_t a2) {
// CHECK: test_vsriq_n_u64
return vsriq_n_u64(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsri.v2i64
// CHECK_CODEGEN: sri.2d v0, v1, #1
}
poly8x16_t test_vsriq_n_p8(poly8x16_t a1, poly8x16_t a2) {
// CHECK: test_vsriq_n_p8
return vsriq_n_p8(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsri.v16i8
// CHECK_CODEGEN: sri.16b v0, v1, #1
}
poly16x8_t test_vsriq_n_p16(poly16x8_t a1, poly16x8_t a2) {
// CHECK: test_vsriq_n_p16
return vsriq_n_p16(a1, a2, 1);
// CHECK: llvm.arm64.neon.vsri.v8i16
// CHECK_CODEGEN: sri.8h v0, v1, #1
}

View File

@ -0,0 +1,22 @@
// RUN: %clang -O1 -target arm64-apple-ios7 -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
// Test ARM64 SIMD comparison test intrinsics
#include <arm_neon.h>
uint64x2_t test_vtstq_s64(int64x2_t a1, int64x2_t a2) {
// CHECK: test_vtstq_s64
return vtstq_s64(a1, a2);
// CHECK: [[COMMONBITS:%[A-Za-z0-9.]+]] = and <2 x i64> %a1, %a2
// CHECK: [[MASK:%[A-Za-z0-9.]+]] = icmp ne <2 x i64> [[COMMONBITS]], zeroinitializer
// CHECK: [[RES:%[A-Za-z0-9.]+]] = sext <2 x i1> [[MASK]] to <2 x i64>
// CHECK: ret <2 x i64> [[RES]]
}
uint64x2_t test_vtstq_u64(uint64x2_t a1, uint64x2_t a2) {
// CHECK: test_vtstq_u64
return vtstq_u64(a1, a2);
// CHECK: [[COMMONBITS:%[A-Za-z0-9.]+]] = and <2 x i64> %a1, %a2
// CHECK: [[MASK:%[A-Za-z0-9.]+]] = icmp ne <2 x i64> [[COMMONBITS]], zeroinitializer
// CHECK: [[RES:%[A-Za-z0-9.]+]] = sext <2 x i1> [[MASK]] to <2 x i64>
// CHECK: ret <2 x i64> [[RES]]
}

View File

@ -0,0 +1,45 @@
// RUN: %clang_cc1 -triple arm64-apple-ios -emit-llvm -o - %s | FileCheck %s
// rdar://9167275
int t1()
{
int x;
__asm__("mov %0, 7" : "=r" (x));
return x;
}
long t2()
{
long x;
__asm__("mov %0, 7" : "=r" (x));
return x;
}
long t3()
{
long x;
__asm__("mov %w0, 7" : "=r" (x));
return x;
}
// rdar://9281206
void t4(long op) {
long x1;
asm ("mov x0, %1; svc #0;" : "=r"(x1) :"r"(op),"r"(x1) :"x0" );
}
// rdar://9394290
float t5(float x) {
__asm__("fadd %0, %0, %0" : "+w" (x));
return x;
}
// rdar://9865712
void t6 (void *f, int g) {
// CHECK: t6
// CHECK: call void asm "str $1, $0", "=*Q,r"
asm("str %1, %0" : "=Q"(f) : "r"(g));
}

View File

@ -0,0 +1,73 @@
// RUN: %clang_cc1 %s -emit-llvm -o - -triple=arm64-apple-ios7 | FileCheck %s
// Memory ordering values.
enum {
memory_order_relaxed = 0,
memory_order_consume = 1,
memory_order_acquire = 2,
memory_order_release = 3,
memory_order_acq_rel = 4,
memory_order_seq_cst = 5
};
typedef struct { void *a, *b; } pointer_pair_t;
typedef struct { void *a, *b, *c, *d; } pointer_quad_t;
// rdar://13489679
extern _Atomic(_Bool) a_bool;
extern _Atomic(float) a_float;
extern _Atomic(void*) a_pointer;
extern _Atomic(pointer_pair_t) a_pointer_pair;
extern _Atomic(pointer_quad_t) a_pointer_quad;
// CHECK: define void @test0()
// CHECK: [[TEMP:%.*]] = alloca i8, align 1
// CHECK-NEXT: store i8 1, i8* [[TEMP]]
// CHECK-NEXT: [[T0:%.*]] = load i8* [[TEMP]], align 1
// CHECK-NEXT: store atomic i8 [[T0]], i8* @a_bool seq_cst, align 1
void test0() {
__c11_atomic_store(&a_bool, 1, memory_order_seq_cst);
}
// CHECK: define void @test1()
// CHECK: [[TEMP:%.*]] = alloca float, align 4
// CHECK-NEXT: store float 3.000000e+00, float* [[TEMP]]
// CHECK-NEXT: [[T0:%.*]] = bitcast float* [[TEMP]] to i32*
// CHECK-NEXT: [[T1:%.*]] = load i32* [[T0]], align 4
// CHECK-NEXT: store atomic i32 [[T1]], i32* bitcast (float* @a_float to i32*) seq_cst, align 4
void test1() {
__c11_atomic_store(&a_float, 3, memory_order_seq_cst);
}
// CHECK: define void @test2()
// CHECK: [[TEMP:%.*]] = alloca i8*, align 8
// CHECK-NEXT: store i8* @a_bool, i8** [[TEMP]]
// CHECK-NEXT: [[T0:%.*]] = bitcast i8** [[TEMP]] to i64*
// CHECK-NEXT: [[T1:%.*]] = load i64* [[T0]], align 8
// CHECK-NEXT: store atomic i64 [[T1]], i64* bitcast (i8** @a_pointer to i64*) seq_cst, align 8
void test2() {
__c11_atomic_store(&a_pointer, &a_bool, memory_order_seq_cst);
}
// CHECK: define void @test3(
// CHECK: [[PAIR:%.*]] = alloca [[PAIR_T:%.*]], align 8
// CHECK-NEXT: [[TEMP:%.*]] = alloca [[PAIR_T]], align 8
// CHECK: llvm.memcpy
// CHECK-NEXT: [[T0:%.*]] = bitcast [[PAIR_T]]* [[TEMP]] to i128*
// CHECK-NEXT: [[T1:%.*]] = load i128* [[T0]], align 16
// CHECK-NEXT: store atomic i128 [[T1]], i128* bitcast ([[PAIR_T]]* @a_pointer_pair to i128*) seq_cst, align 16
void test3(pointer_pair_t pair) {
__c11_atomic_store(&a_pointer_pair, pair, memory_order_seq_cst);
}
// CHECK: define void @test4([[QUAD_T:%.*]]*
// CHECK: [[TEMP:%.*]] = alloca [[QUAD_T:%.*]], align 8
// CHECK-NEXT: [[T0:%.*]] = bitcast [[QUAD_T]]* [[TEMP]] to i8*
// CHECK-NEXT: [[T1:%.*]] = bitcast [[QUAD_T]]* {{%.*}} to i8*
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 32, i32 8, i1 false)
// CHECK-NEXT: [[T0:%.*]] = bitcast [[QUAD_T]]* [[TEMP]] to i8*
// CHECK-NEXT: call void @__atomic_store(i64 32, i8* bitcast ([[QUAD_T]]* @a_pointer_quad to i8*), i8* [[T0]], i32 5)
void test4(pointer_quad_t quad) {
__c11_atomic_store(&a_pointer_quad, quad, memory_order_seq_cst);
}

View File

@ -1,13 +1,20 @@
// RUN: %clang_cc1 -fblocks -triple x86_64-apple-darwin9 %s -emit-llvm -o - | FileCheck %s -check-prefix=X64
// RUN: %clang_cc1 -fblocks -triple i686-apple-darwin9 %s -emit-llvm -o - | FileCheck %s -check-prefix=X32
// RUN: %clang_cc1 -fblocks -triple arm64-apple-darwin %s -emit-llvm -o - | FileCheck %s -check-prefix=ARM64
// X64: internal constant {{.*}} { i8** @_NSConcreteGlobalBlock, i32 1879048192
// X64: store i32 1610612736, i32* %want
// X64: store i32 1610612736, i32* %want
// X32: @_NSConcreteGlobalBlock, i32 1879048192, i32 0,
// X32: store i32 1610612736, i32* %want
// rdar://7677537
// ARM64: @_NSConcreteGlobalBlock, i32 1342177280, i32 0,
// ARM64: store i32 1610612736, i32* %want
// rdar://9757126
int printf(const char *, ...);
void *malloc(__SIZE_TYPE__ size);

View File

@ -1,5 +1,6 @@
// REQUIRES: arm-registered-target
// RUN: %clang_cc1 -Wall -Werror -triple thumbv7-linux-gnueabi -fno-signed-char -O3 -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -Wall -Werror -triple arm64-apple-ios7.0 -O3 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-ARM64
// Make sure the canonical use works before going into smaller details:
int atomic_inc(int *addr) {
@ -12,46 +13,80 @@ int atomic_inc(int *addr) {
return OldVal;
}
// CHECK: @atomic_inc
// CHECK-LABEL: @atomic_inc
// CHECK: [[OLDVAL:%.*]] = tail call i32 @llvm.arm.ldrex.p0i32(i32* %addr)
// CHECK: [[INC:%.*]] = add nsw i32 [[OLDVAL]], 1
// CHECK: [[FAILURE:%.*]] = tail call i32 @llvm.arm.strex.p0i32(i32 [[INC]], i32* %addr)
// CHECK: [[TST:%.*]] = icmp eq i32 [[FAILURE]], 0
// CHECK: br i1 [[TST]], label {{%[a-zA-Z0-9.]+}}, label {{%[a-zA-Z0-9.]+}}
// CHECK-ARM64-LABEL: @atomic_inc
// CHECK-ARM64: [[OLDVAL:%.*]] = tail call i64 @llvm.arm64.ldxr.p0i32(i32* %addr)
// CHECK-ARM64: [[INC:%.*]] = add i64 [[OLDVAL]], 1
// CHECK-ARM64: [[TRUNC:%.*]] = and i64 [[INC]], 4294967295
// CHECK-ARM64: [[FAILURE:%.*]] = tail call i32 @llvm.arm64.stxr.p0i32(i64 [[TRUNC]], i32* %addr)
// CHECK-ARM64: [[TST:%.*]] = icmp eq i32 [[FAILURE]], 0
// CHECK-ARM64: br i1 [[TST]], label {{%[a-zA-Z0-9.]+}}, label {{%[a-zA-Z0-9.]+}}
struct Simple {
char a, b;
};
int test_ldrex(char *addr, long long *addr64, float *addrfloat) {
// CHECK: @test_ldrex
// CHECK-LABEL: @test_ldrex
// CHECK-ARM64-LABEL: @test_ldrex
int sum = 0;
sum += __builtin_arm_ldrex(addr);
// CHECK: [[INTRES:%.*]] = tail call i32 @llvm.arm.ldrex.p0i8(i8* %addr)
// CHECK: and i32 [[INTRES]], 255
// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.arm64.ldxr.p0i8(i8* %addr)
// CHECK-ARM64: [[TRUNCRES:%.*]] = trunc i64 [[INTRES]] to i32
// CHECK-ARM64: [[SEXTTMP:%.*]] = shl i32 [[TRUNCRES]], 24
// CHECK-ARM64: ashr exact i32 [[SEXTTMP]], 24
sum += __builtin_arm_ldrex((short *)addr);
// CHECK: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
// CHECK: [[INTRES:%.*]] = tail call i32 @llvm.arm.ldrex.p0i16(i16* [[ADDR16]])
// CHECK: [[TMPSEXT:%.*]] = shl i32 [[INTRES]], 16
// CHECK: ashr exact i32 [[TMPSEXT]], 16
// CHECK-ARM64: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.arm64.ldxr.p0i16(i16* [[ADDR16]])
// CHECK-ARM64: [[TRUNCRES:%.*]] = trunc i64 [[INTRES]] to i32
// CHECK-ARM64: [[TMPSEXT:%.*]] = shl i32 [[TRUNCRES]], 16
// CHECK-ARM64: ashr exact i32 [[TMPSEXT]], 16
sum += __builtin_arm_ldrex((int *)addr);
// CHECK: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
// CHECK: call i32 @llvm.arm.ldrex.p0i32(i32* [[ADDR32]])
// CHECK-ARM64: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.arm64.ldxr.p0i32(i32* [[ADDR32]])
// CHECK-ARM64: trunc i64 [[INTRES]] to i32
sum += __builtin_arm_ldrex((long long *)addr);
// CHECK: call { i32, i32 } @llvm.arm.ldrexd(i8* %addr)
// CHECK-ARM64: [[ADDR64:%.*]] = bitcast i8* %addr to i64*
// CHECK-ARM64: call i64 @llvm.arm64.ldxr.p0i64(i64* [[ADDR64]])
sum += __builtin_arm_ldrex(addr64);
// CHECK: [[ADDR64_AS8:%.*]] = bitcast i64* %addr64 to i8*
// CHECK: call { i32, i32 } @llvm.arm.ldrexd(i8* [[ADDR64_AS8]])
// CHECK-ARM64: call i64 @llvm.arm64.ldxr.p0i64(i64* %addr64)
sum += __builtin_arm_ldrex(addrfloat);
// CHECK: [[INTADDR:%.*]] = bitcast float* %addrfloat to i32*
// CHECK: [[INTRES:%.*]] = tail call i32 @llvm.arm.ldrex.p0i32(i32* [[INTADDR]])
// CHECK: bitcast i32 [[INTRES]] to float
// CHECK-ARM64: [[INTADDR:%.*]] = bitcast float* %addrfloat to i32*
// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.arm64.ldxr.p0i32(i32* [[INTADDR]])
// CHECK-ARM64: [[TRUNCRES:%.*]] = trunc i64 [[INTRES]] to i32
// CHECK-ARM64: bitcast i32 [[TRUNCRES]] to float
sum += __builtin_arm_ldrex((double *)addr);
// CHECK: [[STRUCTRES:%.*]] = tail call { i32, i32 } @llvm.arm.ldrexd(i8* %addr)
// CHECK: [[RESHI:%.*]] = extractvalue { i32, i32 } [[STRUCTRES]], 1
@ -62,51 +97,110 @@ int test_ldrex(char *addr, long long *addr64, float *addrfloat) {
// CHECK: [[INTRES:%.*]] = or i64 [[RESHIHI]], [[RESLO64]]
// CHECK: bitcast i64 [[INTRES]] to double
// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.arm64.ldxr.p0i64(i64* [[ADDR64]])
// CHECK-ARM64: bitcast i64 [[INTRES]] to double
sum += *__builtin_arm_ldrex((int **)addr);
// CHECK: [[INTRES:%.*]] = tail call i32 @llvm.arm.ldrex.p0i32(i32* [[ADDR32]])
// CHECK: inttoptr i32 [[INTRES]] to i32*
// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.arm64.ldxr.p0i64(i64* [[ADDR64]])
// CHECK-ARM64: inttoptr i64 [[INTRES]] to i32*
sum += __builtin_arm_ldrex((struct Simple **)addr)->a;
// CHECK: [[INTRES:%.*]] = tail call i32 @llvm.arm.ldrex.p0i32(i32* [[ADDR32]])
// CHECK: inttoptr i32 [[INTRES]] to %struct.Simple*
// CHECK-ARM64: [[INTRES:%.*]] = tail call i64 @llvm.arm64.ldxr.p0i64(i64* [[ADDR64]])
// CHECK-ARM64: inttoptr i64 [[INTRES]] to %struct.Simple*
return sum;
}
int test_strex(char *addr) {
// CHECK: @test_strex
// CHECK-LABEL: @test_strex
// CHECK-ARM64-LABEL: @test_strex
int res = 0;
struct Simple var = {0};
res |= __builtin_arm_strex(4, addr);
// CHECK: call i32 @llvm.arm.strex.p0i8(i32 4, i8* %addr)
// CHECK-ARM64: call i32 @llvm.arm64.stxr.p0i8(i64 4, i8* %addr)
res |= __builtin_arm_strex(42, (short *)addr);
// CHECK: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
// CHECK: call i32 @llvm.arm.strex.p0i16(i32 42, i16* [[ADDR16]])
// CHECK-ARM64: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
// CHECK-ARM64: call i32 @llvm.arm64.stxr.p0i16(i64 42, i16* [[ADDR16]])
res |= __builtin_arm_strex(42, (int *)addr);
// CHECK: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
// CHECK: call i32 @llvm.arm.strex.p0i32(i32 42, i32* [[ADDR32]])
// CHECK-ARM64: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
// CHECK-ARM64: call i32 @llvm.arm64.stxr.p0i32(i64 42, i32* [[ADDR32]])
res |= __builtin_arm_strex(42, (long long *)addr);
// CHECK: call i32 @llvm.arm.strexd(i32 42, i32 0, i8* %addr)
// CHECK-ARM64: [[ADDR64:%.*]] = bitcast i8* %addr to i64*
// CHECK-ARM64: call i32 @llvm.arm64.stxr.p0i64(i64 42, i64* [[ADDR64]])
res |= __builtin_arm_strex(2.71828f, (float *)addr);
// CHECK: call i32 @llvm.arm.strex.p0i32(i32 1076754509, i32* [[ADDR32]])
// CHECK-ARM64: call i32 @llvm.arm64.stxr.p0i32(i64 1076754509, i32* [[ADDR32]])
res |= __builtin_arm_strex(3.14159, (double *)addr);
// CHECK: call i32 @llvm.arm.strexd(i32 -266631570, i32 1074340345, i8* %addr)
// CHECK-ARM64: call i32 @llvm.arm64.stxr.p0i64(i64 4614256650576692846, i64* [[ADDR64]])
res |= __builtin_arm_strex(&var, (struct Simple **)addr);
// CHECK: [[INTVAL:%.*]] = ptrtoint i16* %var to i32
// CHECK: call i32 @llvm.arm.strex.p0i32(i32 [[INTVAL]], i32* [[ADDR32]])
// CHECK-ARM64: [[INTVAL:%.*]] = ptrtoint i16* %var to i64
// CHECK-ARM64: call i32 @llvm.arm64.stxr.p0i64(i64 [[INTVAL]], i64* [[ADDR64]])
return res;
}
void test_clrex() {
// CHECK: @test_clrex
// CHECK-LABEL: @test_clrex
// CHECK-ARM64-LABEL: @test_clrex
__builtin_arm_clrex();
// CHECK: call void @llvm.arm.clrex()
// CHECK-ARM64: call void @llvm.arm64.clrex()
}
#ifdef __aarch64__
// 128-bit tests
__int128 test_ldrex_128(__int128 *addr) {
// CHECK-ARM64-LABEL: @test_ldrex_128
return __builtin_arm_ldrex(addr);
// CHECK-ARM64: [[ADDR8:%.*]] = bitcast i128* %addr to i8*
// CHECK-ARM64: [[STRUCTRES:%.*]] = tail call { i64, i64 } @llvm.arm64.ldxp(i8* [[ADDR8]])
// CHECK-ARM64: [[RESHI:%.*]] = extractvalue { i64, i64 } [[STRUCTRES]], 1
// CHECK-ARM64: [[RESLO:%.*]] = extractvalue { i64, i64 } [[STRUCTRES]], 0
// CHECK-ARM64: [[RESHI64:%.*]] = zext i64 [[RESHI]] to i128
// CHECK-ARM64: [[RESLO64:%.*]] = zext i64 [[RESLO]] to i128
// CHECK-ARM64: [[RESHIHI:%.*]] = shl nuw i128 [[RESHI64]], 64
// CHECK-ARM64: [[INTRES:%.*]] = or i128 [[RESHIHI]], [[RESLO64]]
// CHECK-ARM64: ret i128 [[INTRES]]
}
int test_strex_128(__int128 *addr, __int128 val) {
// CHECK-ARM64-LABEL: @test_strex_128
return __builtin_arm_strex(val, addr);
// CHECK-ARM64: [[VALLO:%.*]] = trunc i128 %val to i64
// CHECK-ARM64: [[VALHI128:%.*]] = lshr i128 %val, 64
// CHECK-ARM64: [[VALHI:%.*]] = trunc i128 [[VALHI128]] to i64
// CHECK-ARM64: [[ADDR8:%.*]] = bitcast i128* %addr to i8*
// CHECK-ARM64: [[RES:%.*]] = tail call i32 @llvm.arm64.stxp(i64 [[VALLO]], i64 [[VALHI]], i8* [[ADDR8]])
}
#endif

View File

@ -0,0 +1,6 @@
// RUN: %clang_cc1 -triple arm64-apple-ios -O3 -emit-llvm -o - %s | FileCheck %s
void f0(void *a, void *b) {
__clear_cache(a,b);
// CHECK: call {{.*}} @__clear_cache
}

View File

@ -0,0 +1,19 @@
// RUN: %clang_cc1 %s -triple=arm64-apple-ios7.0.0 -emit-llvm -o - | FileCheck %s
// rdar://12162905
struct S {
S();
int iField;
};
S::S() {
iField = 1;
};
// CHECK: %struct.S* @_ZN1SC2Ev(%struct.S* returned %this)
// CHECK: %struct.S* @_ZN1SC1Ev(%struct.S* returned %this)
// CHECK: [[THISADDR:%[a-zA-z0-9.]+]] = alloca %struct.S*
// CHECK: store %struct.S* %this, %struct.S** [[THISADDR]]
// CHECK: [[THIS1:%.*]] = load %struct.S** [[THISADDR]]
// CHECK: ret %struct.S* [[THIS1]]

View File

@ -0,0 +1,15 @@
// RUN: %clang_cc1 -triple arm64-linux-gnu -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple arm64-linux-gnu -emit-llvm -o - %s -target-abi darwinpcs | FileCheck %s --check-prefix=CHECK-DARWIN
void test_extensions(bool a, char b, short c) {}
// CHECK: define void @_Z15test_extensionsbcs(i1 %a, i8 %b, i16 %c)
// CHECK-DARWIN: define void @_Z15test_extensionsbcs(i1 zeroext %a, i8 signext %b, i16 signext %c)
struct Empty {};
void test_empty(Empty e) {}
// CHECK: define void @_Z10test_empty5Empty(i8
// CHECK-DARWIN: define void @_Z10test_empty5Empty()
struct HFA {
float a[3];
};

View File

@ -0,0 +1,27 @@
// RUN: %clang_cc1 -triple arm64-apple-ios -target-abi darwinpcs -emit-llvm -o - %s | FileCheck %s
struct Empty {};
Empty emptyvar;
int take_args(int a, ...) {
__builtin_va_list l;
__builtin_va_start(l, a);
// CHECK: call void @llvm.va_start
emptyvar = __builtin_va_arg(l, Empty);
// CHECK: load i8**
// CHECK-NOT: getelementptr
// CHECK: [[EMPTY_PTR:%[a-zA-Z0-9._]+]] = bitcast i8* {{%[a-zA-Z0-9._]+}} to %struct.Empty*
// It's conceivable that EMPTY_PTR may not actually be a valid pointer
// (e.g. it's at the very bottom of the stack and the next page is
// invalid). This doesn't matter provided it's never loaded (there's no
// well-defined way to tell), but it becomes a problem if we do try to use it.
// CHECK-NOT: load %struct.Empty* [[EMPTY_PTR]]
int i = __builtin_va_arg(l, int);
// CHECK: va_arg i8** {{%[a-zA-Z0-9._]+}}, i32
__builtin_va_end(l);
return i;
}

View File

@ -0,0 +1,88 @@
// RUN: %clang_cc1 %s -triple=arm64-apple-ios -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 %s -triple=arm64-apple-ios -emit-llvm -o - | FileCheck -check-prefix=CHECK-GLOBALS %s
// __cxa_guard_acquire argument is 64-bit
// rdar://11540122
struct A {
A();
};
void f() {
// CHECK: call i32 @__cxa_guard_acquire(i64*
static A a;
}
// ARM64 uses the C++11 definition of POD.
// rdar://12650514
namespace test1 {
// This class is POD in C++11 and cannot have objects allocated in
// its tail-padding.
struct ABase {};
struct A : ABase {
int x;
char c;
};
struct B : A {
char d;
};
int test() {
return sizeof(B);
}
// CHECK: define i32 @_ZN5test14testEv()
// CHECK: ret i32 12
}
namespace std {
class type_info;
}
// ARM64 uses string comparisons for what would otherwise be
// default-visibility weak RTTI. rdar://12650568
namespace test2 {
struct A {
virtual void foo();
};
void A::foo() {}
// Tested below because these globals get kindof oddly rearranged.
struct __attribute__((visibility("hidden"))) B {};
const std::type_info &b0 = typeid(B);
// CHECK-GLOBALS: @_ZTSN5test21BE = linkonce_odr hidden constant
// CHECK-GLOBALS: @_ZTIN5test21BE = linkonce_odr hidden constant { {{.*}}, i8* getelementptr inbounds ([11 x i8]* @_ZTSN5test21BE, i32 0, i32 0) }
const std::type_info &b1 = typeid(B*);
// CHECK-GLOBALS: @_ZTSPN5test21BE = linkonce_odr hidden constant
// CHECK-GLOBALS: @_ZTIPN5test21BE = linkonce_odr hidden constant { {{.*}}, i8* getelementptr inbounds ([12 x i8]* @_ZTSPN5test21BE, i32 0, i32 0), i32 0, i8* bitcast
struct C {};
const std::type_info &c0 = typeid(C);
// CHECK-GLOBALS: @_ZTSN5test21CE = linkonce_odr hidden constant
// CHECK-GLOBALS: @_ZTIN5test21CE = linkonce_odr hidden constant { {{.*}}, i8* inttoptr (i64 add (i64 ptrtoint ([11 x i8]* @_ZTSN5test21CE to i64), i64 -9223372036854775808) to i8*) }
const std::type_info &c1 = typeid(C*);
// CHECK-GLOBALS: @_ZTSPN5test21CE = linkonce_odr hidden constant
// CHECK-GLOBALS: @_ZTIPN5test21CE = linkonce_odr hidden constant { {{.*}}, i8* inttoptr (i64 add (i64 ptrtoint ([12 x i8]* @_ZTSPN5test21CE to i64), i64 -9223372036854775808) to i8*), i32 0, i8* bitcast
// This class is explicitly-instantiated, but that instantiation
// doesn't guarantee to emit RTTI, so we can still demote the visibility.
template <class T> class D {};
template class D<int>;
const std::type_info &d0 = typeid(D<int>);
// CHECK-GLOBALS: @_ZTSN5test21DIiEE = linkonce_odr hidden constant
// CHECK-GLOBALS: @_ZTIN5test21DIiEE = linkonce_odr hidden constant { {{.*}}, i8* inttoptr (i64 add (i64 ptrtoint ([14 x i8]* @_ZTSN5test21DIiEE to i64), i64 -9223372036854775808) to i8*) }
// This class is explicitly-instantiated and *does* guarantee to
// emit RTTI, so we're stuck with having to use default visibility.
template <class T> class E {
virtual void foo() {}
};
template class E<int>;
// CHECK-GLOBALS: @_ZTSN5test21EIiEE = weak_odr constant [14 x i8]
// CHECK-GLOBALS: @_ZTIN5test21EIiEE = weak_odr constant { {{.*}}, i8* inttoptr (i64 add (i64 ptrtoint ([14 x i8]* @_ZTSN5test21EIiEE to i64), i64 -9223372036854775808) to i8*) }
// CHECK-GLOBALS: @_ZTSN5test21AE = constant [11 x i8]
// CHECK-GLOBALS: @_ZTIN5test21AE = constant { {{.*}}, i8* getelementptr inbounds ([11 x i8]* @_ZTSN5test21AE, i32 0, i32 0) }
}

View File

@ -1,4 +1,5 @@
// RUN: %clang_cc1 -triple armv7-apple-ios -x c++ -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple arm64-apple-ios -x c++ -emit-llvm -o - %s | FileCheck %s
// According to the Itanium ABI (3.1.1), types with non-trivial copy
// constructors passed by value should be passed indirectly, with the caller

View File

@ -1,10 +1,18 @@
// RUN: %clang_cc1 -triple arm-none-linux-gnueabi -target-feature +neon %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -triple armv7-apple-ios -target-feature +neon %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -triple arm64-apple-ios %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -triple arm64-linux-gnu %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK-AARCH64
typedef float float32_t;
typedef double float64_t;
typedef __fp16 float16_t;
#if defined(__aarch64__)
typedef unsigned char poly8_t;
typedef unsigned short poly16_t;
#else
typedef signed char poly8_t;
typedef short poly16_t;
typedef unsigned long long uint64_t;
#endif
typedef unsigned __INT64_TYPE__ uint64_t;
typedef __attribute__((neon_vector_type(2))) int int32x2_t;
typedef __attribute__((neon_vector_type(4))) int int32x4_t;
@ -14,26 +22,53 @@ typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t;
typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t;
typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t;
typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t;
typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t;
#ifdef __aarch64__
typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t;
#endif
typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t;
// CHECK: 16__simd64_int32_t
// CHECK-AARCH64: 11__Int32x2_t
void f1(int32x2_t v) { }
// CHECK: 17__simd128_int32_t
// CHECK-AARCH64: 11__Int32x4_t
void f2(int32x4_t v) { }
// CHECK: 17__simd64_uint64_t
// CHECK-AARCH64: 12__Uint64x1_t
void f3(uint64x1_t v) { }
// CHECK: 18__simd128_uint64_t
// CHECK-AARCH64: 12__Uint64x2_t
void f4(uint64x2_t v) { }
// CHECK: 18__simd64_float32_t
// CHECK-AARCH64: 13__Float32x2_t
void f5(float32x2_t v) { }
// CHECK: 19__simd128_float32_t
// CHECK-AARCH64: 13__Float32x4_t
void f6(float32x4_t v) { }
// CHECK: 18__simd64_float16_t
// CHECK-AARCH64: 13__Float16x4_t
void f7(float16x4_t v) {}
// CHECK: 19__simd128_float16_t
// CHECK-AARCH64: 13__Float16x8_t
void f8(float16x8_t v) {}
// CHECK: 17__simd128_poly8_t
// CHECK-AARCH64: 12__Poly8x16_t
void f9(poly8x16_t v) {}
// CHECK: 18__simd128_poly16_t
// CHECK-AARCH64: 12__Poly16x8_t
void f10(poly16x8_t v) {}
#ifdef __aarch64__
// CHECK-AARCH64: 13__Float64x2_t
void f11(float64x2_t v) { }
#endif

View File

@ -0,0 +1,20 @@
// RUN: %clang_cc1 -triple arm64-apple-ios -ffreestanding -S -emit-llvm -o - %s | FileCheck --check-prefix=CHECK-UNSIGNED-POLY %s
// RUN: %clang_cc1 -triple arm64-linux-gnu -ffreestanding -S -emit-llvm -o - %s | FileCheck --check-prefix=CHECK-UNSIGNED-POLY %s
// RUN: %clang_cc1 -triple armv7-apple-ios -ffreestanding -target-cpu cortex-a8 -S -emit-llvm -o - %s | FileCheck --check-prefix=CHECK-SIGNED-POLY %s
#include <arm_neon.h>
// Polynomial types really should be universally unsigned, otherwise casting
// (say) poly8_t "x^7" to poly16_t would change it to "x^15 + x^14 + ... +
// x^7". Unfortunately 32-bit ARM ended up in a slightly delicate ABI situation
// so for now it got that wrong.
poly16_t test_poly8(poly8_t pIn) {
// CHECK-UNSIGNED-POLY: @_Z10test_poly8h
// CHECK-UNSIGNED-POLY: zext i8 {{.*}} to i16
// CHECK-SIGNED-POLY: @_Z10test_poly8a
// CHECK-SIGNED-POLY: sext i8 {{.*}} to i16
return pIn;
}

View File

@ -1,19 +1,22 @@
// RUN: %clang_cc1 -triple armv7-apple-darwin10 -emit-llvm -fblocks -fobjc-arc -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple arm64-apple-ios -emit-llvm -fblocks -fobjc-arc -o - %s | FileCheck %s
// <rdar://12438598>: use an autorelease marker on ARM64.
id test0(void) {
extern id test0_helper(void);
// CHECK: [[T0:%.*]] = call arm_aapcscc i8* @test0_helper()
// CHECK: [[T0:%.*]] = call [[CC:(arm_aapcscc )?]]i8* @test0_helper()
// CHECK-NEXT: ret i8* [[T0]]
return test0_helper();
}
void test1(void) {
extern id test1_helper(void);
// CHECK: [[T0:%.*]] = call arm_aapcscc i8* @test1_helper()
// CHECK-NEXT: call void asm sideeffect "mov\09r7, r7
// CHECK-NEXT: [[T1:%.*]] = call arm_aapcscc i8* @objc_retainAutoreleasedReturnValue(i8* [[T0]])
// CHECK: [[T0:%.*]] = call [[CC]]i8* @test1_helper()
// CHECK-NEXT: call void asm sideeffect "mov
// CHECK-NEXT: [[T1:%.*]] = call [[CC]]i8* @objc_retainAutoreleasedReturnValue(i8* [[T0]])
// CHECK-NEXT: store i8* [[T1]],
// CHECK-NEXT: call arm_aapcscc void @objc_storeStrong(
// CHECK-NEXT: call [[CC]]void @objc_storeStrong(
// CHECK-NEXT: ret void
id x = test1_helper();
}
@ -22,14 +25,14 @@ void test1(void) {
@class A;
A *test2(void) {
extern A *test2_helper(void);
// CHECK: [[T0:%.*]] = call arm_aapcscc [[A:%.*]]* @test2_helper()
// CHECK: [[T0:%.*]] = call [[CC]][[A:%.*]]* @test2_helper()
// CHECK-NEXT: ret [[A]]* [[T0]]
return test2_helper();
}
id test3(void) {
extern A *test3_helper(void);
// CHECK: [[T0:%.*]] = call arm_aapcscc [[A:%.*]]* @test3_helper()
// CHECK: [[T0:%.*]] = call [[CC]][[A:%.*]]* @test3_helper()
// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8*
// CHECK-NEXT: ret i8* [[T1]]
return test3_helper();

View File

@ -0,0 +1,17 @@
// RUN: %clang_cc1 -triple arm64-apple-ios -emit-llvm -o - %s | FileCheck %s
// rdar://12617764
// CHECK: @"OBJC_IVAR_$_I.IVAR2" = global i32 8
// CHECK: @"OBJC_IVAR_$_I.IVAR1" = global i32 0
@interface I
{
id IVAR1;
id IVAR2;
}
@end
@implementation I
// CHECK: [[IVAR:%.*]] = load i32* @"OBJC_IVAR_$_I.IVAR2"
// CHECK: [[CONV:%.*]] = sext i32 [[IVAR]] to i64
- (id) METH { return IVAR2; }
@end

View File

@ -0,0 +1,20 @@
// RUN: %clang_cc1 -fblocks -triple arm64-apple-darwin %s -emit-llvm -o - | FileCheck %s -check-prefix=CHECK-ARM64
// rdar://12416433
struct stret { int x[100]; };
struct stret zero;
struct stret one = {{1}};
@interface Test @end
@implementation Test
+(struct stret) method { return one; }
@end
int main(int argc, const char **argv)
{
struct stret st2 = one;
if (argc) st2 = [(id)(argc&~255) method];
}
// CHECK-ARM64: call void @llvm.memset.p0i8.i64(i8* [[T0:%.*]], i8 0, i64 400, i32 4, i1 false)

View File

@ -0,0 +1,22 @@
// RUN: %clang_cc1 -fblocks -triple x86_64-apple-darwin9 %s -emit-llvm -o - | FileCheck %s -check-prefix=X86
// RUN: %clang_cc1 -fblocks -triple arm-apple-darwin %s -emit-llvm -o - | FileCheck %s -check-prefix=ARM
// RUN: %clang_cc1 -fblocks -triple arm64-apple-darwin %s -emit-llvm -o - | FileCheck %s -check-prefix=ARM64
// <rdar://problem/9757015>: Don't use 'stret' variants on ARM64.
// X86: @main
// X86: @objc_msgSend_stret
// ARM: @main
// ARM: @objc_msgSend_stret
// ARM64: @main
// ARM64-NOT: @objc_msgSend_stret
struct st { int i[1000]; };
@interface Test
+(struct st)method;
@end
int main() {
[Test method];
}

View File

@ -0,0 +1,3 @@
// RUN: %clang -target arm64-apple-ios7.0 -### %s 2>&1 | FileCheck %s
// CHECK: "-target-abi" "darwinpcs"

View File

@ -142,11 +142,24 @@
// RUN: FileCheck -check-prefix=LINK_NO_IOS_CRT1 %s < %t.log
// LINK_NO_IOS_CRT1-NOT: crt
// RUN: %clang -target arm64-apple-ios5.0 -miphoneos-version-min=5.0 -### %t.o 2> %t.log
// RUN: FileCheck -check-prefix=LINK_NO_IOS_ARM64_CRT1 %s < %t.log
// LINK_NO_IOS_ARM64_CRT1-NOT: crt
// RUN: %clang -target i386-apple-darwin12 -pg -### %t.o 2> %t.log
// RUN: FileCheck -check-prefix=LINK_PG %s < %t.log
// LINK_PG: -lgcrt1.o
// LINK_PG: -no_new_main
// Check that clang links with libgcc_s.1 for iOS 4 and earlier, but not arm64.
// RUN: %clang -target armv7-apple-ios4.0 -miphoneos-version-min=4.0 -### %t.o 2> %t.log
// RUN: FileCheck -check-prefix=LINK_IOS_LIBGCC_S %s < %t.log
// LINK_IOS_LIBGCC_S: lgcc_s.1
// RUN: %clang -target arm64-apple-ios4.0 -miphoneos-version-min=4.0 -### %t.o 2> %t.log
// RUN: FileCheck -check-prefix=LINK_NO_IOS_ARM64_LIBGCC_S %s < %t.log
// LINK_NO_IOS_ARM64_LIBGCC_S-NOT: lgcc_s.1
// RUN: %clang -target x86_64-apple-darwin12 -rdynamic -### %t.o \
// RUN: -mlinker-version=100 2> %t.log
// RUN: FileCheck -check-prefix=LINK_NO_EXPORT_DYNAMIC %s < %t.log

View File

@ -0,0 +1,9 @@
// RUN: %clang -target x86_64-apple-darwin -mios-simulator-version-min=7 -fsyntax-only %s -Xclang -verify
// RUN: %clang -target x86_64-apple-darwin -arch arm64 -target x86_64-apple-darwin -mios-version-min=7 -fsyntax-only %s -Xclang -verify
// For 64-bit iOS, automatically promote -Wimplicit-function-declaration
// to an error.
void radar_10894044() {
radar_10894044_not_declared(); // expected-error {{implicit declaration of function 'radar_10894044_not_declared' is invalid in C99}}
}

View File

@ -6,7 +6,9 @@
// RUN: %clang -target armv7-apple-ios -### %t.o 2>> %t.log
// RUN: %clang -target armv7-apple-ios0.0 -### %t.o 2>> %t.log
// RUN: %clang -target armv7-apple-ios1.2.3 -### %t.o 2>> %t.log
// RUN: %clang -target armv7-apple-ios5.0 -### %t.o 2>> %t.log
// RUN: %clang -target armv7-apple-ios7.0 -### %t.o 2>> %t.log
// RUN: %clang -target arm64-apple-ios -### %t.o 2>> %t.log
//
// RUN: FileCheck %s < %t.log
@ -30,4 +32,10 @@
// CHECK: 1.2.3
// CHECK: {{ld(.exe)?"}}
// CHECK: -iphoneos_version_min
// CHECK: 5.0.0
// CHECK: {{ld(.exe)?"}}
// CHECK: -iphoneos_version_min
// CHECK: 7.0.0
// CHECK: {{ld(.exe)?"}}
// CHECK: -iphoneos_version_min
// CHECK: 7.0.0

View File

@ -0,0 +1,8 @@
// RUN: %clang_cc1 -triple arm64-apple-ios7.1 -fsyntax-only -verify %s
void foo() {
asm volatile("USE(%0)" :: "z"(0LL));
asm volatile("USE(%x0)" :: "z"(0LL));
asm volatile("USE(%w0)" :: "z"(0));
asm volatile("USE(%0)" :: "z"(0)); // expected-warning {{value size does not match register size specified by the constraint and modifier}}
}

View File

@ -0,0 +1,13 @@
// RUN: %clang_cc1 -triple arm64-apple-darwin -fsyntax-only -ffreestanding -verify %s
#include <arm_neon.h>
// rdar://13527900
void vcopy_reject(float32x4_t vOut0, float32x4_t vAlpha, int t) {
vcopyq_laneq_f32(vOut0, 1, vAlpha, t); // expected-error {{argument to '__builtin_neon_vgetq_lane_f32' must be a constant integer}} expected-error {{initializing 'float32_t' (aka 'float') with an expression of incompatible type 'void'}}
}
// rdar://problem/15256199
float32x4_t test_vmlsq_lane(float32x4_t accum, float32x4_t lhs, float32x2_t rhs) {
return vmlsq_lane_f32(accum, lhs, rhs, 1);
}

View File

@ -0,0 +1,59 @@
// RUN: %clang_cc1 -triple arm64-apple-ios7.0 -fsyntax-only -verify %s
struct Simple {
char a, b;
};
int test_ldrex(char *addr) {
int sum = 0;
sum += __builtin_arm_ldrex(addr);
sum += __builtin_arm_ldrex((short *)addr);
sum += __builtin_arm_ldrex((int *)addr);
sum += __builtin_arm_ldrex((long long *)addr);
sum += __builtin_arm_ldrex((__int128 *)addr);
sum += __builtin_arm_ldrex((float *)addr);
sum += __builtin_arm_ldrex((double *)addr);
sum += *__builtin_arm_ldrex((int **)addr);
sum += __builtin_arm_ldrex((struct Simple **)addr)->a;
sum += __builtin_arm_ldrex((volatile char *)addr);
sum += __builtin_arm_ldrex((const volatile char *)addr);
// In principle this might be valid, but stick to ints and floats for scalar
// types at the moment.
sum += __builtin_arm_ldrex((struct Simple *)addr).a; // expected-error {{address argument to atomic builtin must be a pointer to}}
__builtin_arm_ldrex(); // expected-error {{too few arguments to function call}}
__builtin_arm_ldrex(1, 2); // expected-error {{too many arguments to function call}}
return sum;
}
int test_strex(char *addr) {
int res = 0;
struct Simple var = {0};
res |= __builtin_arm_strex(4, addr);
res |= __builtin_arm_strex(42, (short *)addr);
res |= __builtin_arm_strex(42, (int *)addr);
res |= __builtin_arm_strex(42, (long long *)addr);
res |= __builtin_arm_strex(42, (__int128 *)addr);
res |= __builtin_arm_strex(2.71828f, (float *)addr);
res |= __builtin_arm_strex(3.14159, (double *)addr);
res |= __builtin_arm_strex(&var, (struct Simple **)addr);
res |= __builtin_arm_strex(42, (volatile char *)addr);
res |= __builtin_arm_strex(42, (char *const)addr);
res |= __builtin_arm_strex(42, (const char *)addr); // expected-warning {{passing 'const char *' to parameter of type 'volatile char *' discards qualifiers}}
res |= __builtin_arm_strex(var, (struct Simple *)addr); // expected-error {{address argument to atomic builtin must be a pointer to}}
res |= __builtin_arm_strex(var, (struct Simple **)addr); // expected-error {{passing 'struct Simple' to parameter of incompatible type 'struct Simple *'}}
res |= __builtin_arm_strex(&var, (struct Simple **)addr).a; // expected-error {{is not a structure or union}}
__builtin_arm_strex(1); // expected-error {{too few arguments to function call}}
__builtin_arm_strex(1, 2, 3); // expected-error {{too many arguments to function call}}
return res;
}
void test_clrex() {
__builtin_arm_clrex();
__builtin_arm_clrex(1); // expected-error {{too many arguments to function call}}
}

View File

@ -0,0 +1,18 @@
// RUN: %clang_cc1 -triple arm64-apple-ios -fsyntax-only -verify %s
// RUN: %clang_cc1 -triple arm64-apple-ios -DTEST1 -fsyntax-only -verify %s
#ifdef TEST1
void __clear_cache(void *start, void *end);
#endif
void test_clear_cache_chars(char *start, char *end) {
__clear_cache(start, end);
}
void test_clear_cache_voids(void *start, void *end) {
__clear_cache(start, end);
}
void test_clear_cache_no_args() {
__clear_cache(); // expected-error {{too few arguments to function call}}
}

View File

@ -0,0 +1,8 @@
// RUN: %clang_cc1 -triple arm64-apple-macosx10.8.0 -fsyntax-only -verify %s
unsigned t, r, *p;
int foo (void) {
__asm__ __volatile__( "stxr %w[_t], %[_r], [%[_p]]" : [_t] "=&r" (t) : [_p] "p" (p), [_r] "r" (r) : "memory"); // expected-warning {{value size does not match register size specified by the constraint and modifier}}
return 1;
}

View File

@ -4,7 +4,7 @@
typedef float float32_t;
typedef signed char poly8_t;
typedef short poly16_t;
typedef unsigned long long uint64_t;
typedef unsigned __INT64_TYPE__ uint64_t;
// Define some valid Neon types.
typedef __attribute__((neon_vector_type(2))) int int32x2_t;
@ -23,7 +23,6 @@ typedef __attribute__((neon_vector_type(2, 4))) int only_one_arg; // expected-er
typedef __attribute__((neon_vector_type(2.0))) int non_int_width; // expected-error{{'neon_vector_type' attribute requires an integer constant}}
// Only certain element types are allowed.
typedef __attribute__((neon_vector_type(2))) double double_elt; // expected-error{{invalid vector element type}}
typedef __attribute__((neon_vector_type(4))) void* ptr_elt; // expected-error{{invalid vector element type}}
typedef __attribute__((neon_polyvector_type(4))) float32_t bad_poly_elt; // expected-error{{invalid vector element type}}
struct aggr { signed char c; };

Some files were not shown because too many files have changed in this diff Show More