[SystemZ] Add support for z13 and its vector facility

This patch adds support for the z13 architecture type.  For compatibility
with GCC, a pair of options -mvx / -mno-vx can be used to selectively
enable/disable use of the vector facility.

When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
  (except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.

The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.

However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.

These alignment rules are not only implemented at the C language level,
but also at the LLVM IR level.  This is done by selecting a different
DataLayout string depending on whether the vector ABI is in effect or not.

Based on a patch by Richard Sandiford.

llvm-svn: 236531
This commit is contained in:
Ulrich Weigand 2015-05-05 19:35:52 +00:00
parent 9958c489bb
commit 66ff51b4ea
8 changed files with 475 additions and 29 deletions

View File

@ -1310,6 +1310,9 @@ def fno_altivec : Flag<["-"], "fno-altivec">, Group<f_Group>, Flags<[CC1Option]>
def maltivec : Flag<["-"], "maltivec">, Alias<faltivec>;
def mno_altivec : Flag<["-"], "mno-altivec">, Alias<fno_altivec>;
def mvx : Flag<["-"], "mvx">, Group<m_Group>;
def mno_vx : Flag<["-"], "mno-vx">, Group<m_Group>;
def mno_warn_nonportable_cfstrings : Flag<["-"], "mno-warn-nonportable-cfstrings">, Group<m_Group>;
def mno_omit_leaf_frame_pointer : Flag<["-"], "mno-omit-leaf-frame-pointer">, Group<m_Group>;
def momit_leaf_frame_pointer : Flag<["-"], "momit-leaf-frame-pointer">, Group<m_Group>,

View File

@ -5531,10 +5531,11 @@ class SystemZTargetInfo : public TargetInfo {
static const char *const GCCRegNames[];
std::string CPU;
bool HasTransactionalExecution;
bool HasVector;
public:
SystemZTargetInfo(const llvm::Triple &Triple)
: TargetInfo(Triple), CPU("z10"), HasTransactionalExecution(false) {
: TargetInfo(Triple), CPU("z10"), HasTransactionalExecution(false), HasVector(false) {
IntMaxType = SignedLong;
Int64Type = SignedLong;
TLSSupported = true;
@ -5587,6 +5588,7 @@ public:
.Case("z10", true)
.Case("z196", true)
.Case("zEC12", true)
.Case("z13", true)
.Default(false);
return CPUKnown;
@ -5594,6 +5596,10 @@ public:
void getDefaultFeatures(llvm::StringMap<bool> &Features) const override {
if (CPU == "zEC12")
Features["transactional-execution"] = true;
if (CPU == "z13") {
Features["transactional-execution"] = true;
Features["vector"] = true;
}
}
bool handleTargetFeatures(std::vector<std::string> &Features,
@ -5602,6 +5608,14 @@ public:
for (unsigned i = 0, e = Features.size(); i != e; ++i) {
if (Features[i] == "+transactional-execution")
HasTransactionalExecution = true;
if (Features[i] == "+vector")
HasVector = true;
}
// If we use the vector ABI, vector types are 64-bit aligned.
if (HasVector) {
MaxVectorAlign = 64;
DescriptionString = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64"
"-v128:64-a:8:16-n32:64";
}
return true;
}
@ -5610,8 +5624,15 @@ public:
return llvm::StringSwitch<bool>(Feature)
.Case("systemz", true)
.Case("htm", HasTransactionalExecution)
.Case("vx", HasVector)
.Default(false);
}
StringRef getABI() const override {
if (HasVector)
return "vector";
return "";
}
};
const Builtin::Info SystemZTargetInfo::BuiltinInfo[] = {

View File

@ -5146,12 +5146,17 @@ void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
namespace {
class SystemZABIInfo : public ABIInfo {
bool HasVector;
public:
SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
SystemZABIInfo(CodeGenTypes &CGT, bool HV)
: ABIInfo(CGT), HasVector(HV) {}
bool isPromotableIntegerType(QualType Ty) const;
bool isCompoundType(QualType Ty) const;
bool isVectorArgumentType(QualType Ty) const;
bool isFPArgumentType(QualType Ty) const;
QualType GetSingleElementType(QualType Ty) const;
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType ArgTy) const;
@ -5169,8 +5174,8 @@ public:
class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
public:
SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
: TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector)
: TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {}
};
}
@ -5202,6 +5207,12 @@ bool SystemZABIInfo::isCompoundType(QualType Ty) const {
isAggregateTypeForABI(Ty));
}
bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
return (HasVector &&
Ty->isVectorType() &&
getContext().getTypeSize(Ty) <= 128);
}
bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
switch (BT->getKind()) {
@ -5212,9 +5223,13 @@ bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
return false;
}
return false;
}
QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
if (const RecordType *RT = Ty->getAsStructureType()) {
const RecordDecl *RD = RT->getDecl();
bool Found = false;
QualType Found;
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
@ -5225,11 +5240,9 @@ bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
if (isEmptyRecord(getContext(), Base, true))
continue;
if (Found)
return false;
Found = isFPArgumentType(Base);
if (!Found)
return false;
if (!Found.isNull())
return Ty;
Found = GetSingleElementType(Base);
}
// Check the fields.
@ -5242,20 +5255,19 @@ bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
continue;
// Unlike isSingleElementStruct(), arrays do not count.
// Nested isFPArgumentType structures still do though.
if (Found)
return false;
Found = isFPArgumentType(FD->getType());
if (!Found)
return false;
// Nested structures still do though.
if (!Found.isNull())
return Ty;
Found = GetSingleElementType(FD->getType());
}
// Unlike isSingleElementStruct(), trailing padding is allowed.
// An 8-byte aligned struct s { float f; } is passed as a double.
return Found;
if (!Found.isNull())
return Found;
}
return false;
return Ty;
}
llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@ -5268,14 +5280,16 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// i8 *__reg_save_area;
// };
// Every argument occupies 8 bytes and is passed by preference in either
// GPRs or FPRs.
// Every non-vector argument occupies 8 bytes and is passed by preference
// in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
// always passed on the stack.
Ty = CGF.getContext().getCanonicalType(Ty);
llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
llvm::Type *APTy = llvm::PointerType::getUnqual(ArgTy);
ABIArgInfo AI = classifyArgumentType(Ty);
bool IsIndirect = AI.isIndirect();
bool InFPRs = false;
bool IsVector = false;
unsigned UnpaddedBitSize;
if (IsIndirect) {
APTy = llvm::PointerType::getUnqual(APTy);
@ -5284,14 +5298,38 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
if (AI.getCoerceToType())
ArgTy = AI.getCoerceToType();
InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
IsVector = ArgTy->isVectorTy();
UnpaddedBitSize = getContext().getTypeSize(Ty);
}
unsigned PaddedBitSize = 64;
unsigned PaddedBitSize = (IsVector && UnpaddedBitSize > 64) ? 128 : 64;
assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size.");
unsigned PaddedSize = PaddedBitSize / 8;
unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8;
llvm::Type *IndexTy = CGF.Int64Ty;
llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
if (IsVector) {
// Work out the address of a vector argument on the stack.
// Vector arguments are always passed in the high bits of a
// single (8 byte) or double (16 byte) stack slot.
llvm::Value *OverflowArgAreaPtr =
CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 2,
"overflow_arg_area_ptr");
llvm::Value *OverflowArgArea =
CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
llvm::Value *MemAddr =
CGF.Builder.CreateBitCast(OverflowArgArea, APTy, "mem_addr");
// Update overflow_arg_area_ptr pointer
llvm::Value *NewOverflowArgArea =
CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
return MemAddr;
}
unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding;
if (InFPRs) {
MaxRegs = 4; // Maximum of 4 FPR arguments
@ -5308,7 +5346,6 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
llvm::Value *RegCountPtr = CGF.Builder.CreateStructGEP(
nullptr, VAListAddr, RegCountField, "reg_count_ptr");
llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
llvm::Type *IndexTy = RegCount->getType();
llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
"fits_in_regs");
@ -5322,7 +5359,6 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CGF.EmitBlock(InRegBlock);
// Work out the address of an argument register.
llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
llvm::Value *ScaledRegCount =
CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
llvm::Value *RegBase =
@ -5380,6 +5416,8 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
if (isVectorArgumentType(RetTy))
return ABIArgInfo::getDirect();
if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
return ABIArgInfo::getIndirect(0);
return (isPromotableIntegerType(RetTy) ?
@ -5395,8 +5433,16 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
if (isPromotableIntegerType(Ty))
return ABIArgInfo::getExtend();
// Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
// Handle vector types and vector-like structure types. Note that
// as opposed to float-like structure types, we do not allow any
// padding for vector-like structures, so verify the sizes match.
uint64_t Size = getContext().getTypeSize(Ty);
QualType SingleElementTy = GetSingleElementType(Ty);
if (isVectorArgumentType(SingleElementTy) &&
getContext().getTypeSize(SingleElementTy) == Size)
return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
// Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
@ -5410,7 +5456,7 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
// The structure is passed as an unextended integer, a float, or a double.
llvm::Type *PassTy;
if (isFPArgumentType(Ty)) {
if (isFPArgumentType(SingleElementTy)) {
assert(Size == 32 || Size == 64);
if (Size == 32)
PassTy = llvm::Type::getFloatTy(getVMContext());
@ -7067,8 +7113,11 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::msp430:
return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
case llvm::Triple::systemz:
return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
case llvm::Triple::systemz: {
bool HasVector = getTarget().getABI() == "vector";
return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types,
HasVector));
}
case llvm::Triple::tce:
return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));

View File

@ -1442,6 +1442,14 @@ static void getSystemZTargetFeatures(const ArgList &Args,
else
Features.push_back("-transactional-execution");
}
// -m(no-)vx overrides use of the vector facility.
if (Arg *A = Args.getLastArg(options::OPT_mvx,
options::OPT_mno_vx)) {
if (A->getOption().matches(options::OPT_mvx))
Features.push_back("+vector");
else
Features.push_back("-vector");
}
}
static const char *getX86TargetCPU(const ArgList &Args,

View File

@ -1,4 +1,9 @@
// RUN: %clang_cc1 -triple s390x-linux-gnu -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple s390x-linux-gnu \
// RUN: -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple s390x-linux-gnu -target-feature +vector \
// RUN: -emit-llvm -o - %s | FileCheck --check-prefix=CHECK-VECTOR %s
// RUN: %clang_cc1 -triple s390x-linux-gnu -target-cpu z13 \
// RUN: -emit-llvm -o - %s | FileCheck --check-prefix=CHECK-VECTOR %s
// Vector types
@ -30,71 +35,153 @@ typedef __attribute__((vector_size(16))) long double v1f128;
typedef __attribute__((vector_size(32))) char v32i8;
unsigned int align = __alignof__ (v16i8);
// CHECK: @align = global i32 16
// CHECK-VECTOR: @align = global i32 8
v1i8 pass_v1i8(v1i8 arg) { return arg; }
// CHECK-LABEL: define void @pass_v1i8(<1 x i8>* noalias sret %{{.*}}, <1 x i8>*)
// CHECK-VECTOR-LABEL: define <1 x i8> @pass_v1i8(<1 x i8> %{{.*}})
v2i8 pass_v2i8(v2i8 arg) { return arg; }
// CHECK-LABEL: define void @pass_v2i8(<2 x i8>* noalias sret %{{.*}}, <2 x i8>*)
// CHECK-VECTOR-LABEL: define <2 x i8> @pass_v2i8(<2 x i8> %{{.*}})
v4i8 pass_v4i8(v4i8 arg) { return arg; }
// CHECK-LABEL: define void @pass_v4i8(<4 x i8>* noalias sret %{{.*}}, <4 x i8>*)
// CHECK-VECTOR-LABEL: define <4 x i8> @pass_v4i8(<4 x i8> %{{.*}})
v8i8 pass_v8i8(v8i8 arg) { return arg; }
// CHECK-LABEL: define void @pass_v8i8(<8 x i8>* noalias sret %{{.*}}, <8 x i8>*)
// CHECK-VECTOR-LABEL: define <8 x i8> @pass_v8i8(<8 x i8> %{{.*}})
v16i8 pass_v16i8(v16i8 arg) { return arg; }
// CHECK-LABEL: define void @pass_v16i8(<16 x i8>* noalias sret %{{.*}}, <16 x i8>*)
// CHECK-VECTOR-LABEL: define <16 x i8> @pass_v16i8(<16 x i8> %{{.*}})
v32i8 pass_v32i8(v32i8 arg) { return arg; }
// CHECK-LABEL: define void @pass_v32i8(<32 x i8>* noalias sret %{{.*}}, <32 x i8>*)
// CHECK-VECTOR-LABEL: define void @pass_v32i8(<32 x i8>* noalias sret %{{.*}}, <32 x i8>*)
v1i16 pass_v1i16(v1i16 arg) { return arg; }
// CHECK-LABEL: define void @pass_v1i16(<1 x i16>* noalias sret %{{.*}}, <1 x i16>*)
// CHECK-VECTOR-LABEL: define <1 x i16> @pass_v1i16(<1 x i16> %{{.*}})
v2i16 pass_v2i16(v2i16 arg) { return arg; }
// CHECK-LABEL: define void @pass_v2i16(<2 x i16>* noalias sret %{{.*}}, <2 x i16>*)
// CHECK-VECTOR-LABEL: define <2 x i16> @pass_v2i16(<2 x i16> %{{.*}})
v4i16 pass_v4i16(v4i16 arg) { return arg; }
// CHECK-LABEL: define void @pass_v4i16(<4 x i16>* noalias sret %{{.*}}, <4 x i16>*)
// CHECK-VECTOR-LABEL: define <4 x i16> @pass_v4i16(<4 x i16> %{{.*}})
v8i16 pass_v8i16(v8i16 arg) { return arg; }
// CHECK-LABEL: define void @pass_v8i16(<8 x i16>* noalias sret %{{.*}}, <8 x i16>*)
// CHECK-VECTOR-LABEL: define <8 x i16> @pass_v8i16(<8 x i16> %{{.*}})
v1i32 pass_v1i32(v1i32 arg) { return arg; }
// CHECK-LABEL: define void @pass_v1i32(<1 x i32>* noalias sret %{{.*}}, <1 x i32>*)
// CHECK-VECTOR-LABEL: define <1 x i32> @pass_v1i32(<1 x i32> %{{.*}})
v2i32 pass_v2i32(v2i32 arg) { return arg; }
// CHECK-LABEL: define void @pass_v2i32(<2 x i32>* noalias sret %{{.*}}, <2 x i32>*)
// CHECK-VECTOR-LABEL: define <2 x i32> @pass_v2i32(<2 x i32> %{{.*}})
v4i32 pass_v4i32(v4i32 arg) { return arg; }
// CHECK-LABEL: define void @pass_v4i32(<4 x i32>* noalias sret %{{.*}}, <4 x i32>*)
// CHECK-VECTOR-LABEL: define <4 x i32> @pass_v4i32(<4 x i32> %{{.*}})
v1i64 pass_v1i64(v1i64 arg) { return arg; }
// CHECK-LABEL: define void @pass_v1i64(<1 x i64>* noalias sret %{{.*}}, <1 x i64>*)
// CHECK-VECTOR-LABEL: define <1 x i64> @pass_v1i64(<1 x i64> %{{.*}})
v2i64 pass_v2i64(v2i64 arg) { return arg; }
// CHECK-LABEL: define void @pass_v2i64(<2 x i64>* noalias sret %{{.*}}, <2 x i64>*)
// CHECK-VECTOR-LABEL: define <2 x i64> @pass_v2i64(<2 x i64> %{{.*}})
v1i128 pass_v1i128(v1i128 arg) { return arg; }
// CHECK-LABEL: define void @pass_v1i128(<1 x i128>* noalias sret %{{.*}}, <1 x i128>*)
// CHECK-VECTOR-LABEL: define <1 x i128> @pass_v1i128(<1 x i128> %{{.*}})
v1f32 pass_v1f32(v1f32 arg) { return arg; }
// CHECK-LABEL: define void @pass_v1f32(<1 x float>* noalias sret %{{.*}}, <1 x float>*)
// CHECK-VECTOR-LABEL: define <1 x float> @pass_v1f32(<1 x float> %{{.*}})
v2f32 pass_v2f32(v2f32 arg) { return arg; }
// CHECK-LABEL: define void @pass_v2f32(<2 x float>* noalias sret %{{.*}}, <2 x float>*)
// CHECK-VECTOR-LABEL: define <2 x float> @pass_v2f32(<2 x float> %{{.*}})
v4f32 pass_v4f32(v4f32 arg) { return arg; }
// CHECK-LABEL: define void @pass_v4f32(<4 x float>* noalias sret %{{.*}}, <4 x float>*)
// CHECK-VECTOR-LABEL: define <4 x float> @pass_v4f32(<4 x float> %{{.*}})
v1f64 pass_v1f64(v1f64 arg) { return arg; }
// CHECK-LABEL: define void @pass_v1f64(<1 x double>* noalias sret %{{.*}}, <1 x double>*)
// CHECK-VECTOR-LABEL: define <1 x double> @pass_v1f64(<1 x double> %{{.*}})
v2f64 pass_v2f64(v2f64 arg) { return arg; }
// CHECK-LABEL: define void @pass_v2f64(<2 x double>* noalias sret %{{.*}}, <2 x double>*)
// CHECK-VECTOR-LABEL: define <2 x double> @pass_v2f64(<2 x double> %{{.*}})
v1f128 pass_v1f128(v1f128 arg) { return arg; }
// CHECK-LABEL: define void @pass_v1f128(<1 x fp128>* noalias sret %{{.*}}, <1 x fp128>*)
// CHECK-VECTOR-LABEL: define <1 x fp128> @pass_v1f128(<1 x fp128> %{{.*}})
// Vector-like aggregate types
struct agg_v1i8 { v1i8 a; };
struct agg_v1i8 pass_agg_v1i8(struct agg_v1i8 arg) { return arg; }
// CHECK-LABEL: define void @pass_agg_v1i8(%struct.agg_v1i8* noalias sret %{{.*}}, i8 %{{.*}})
// CHECK-VECTOR-LABEL: define void @pass_agg_v1i8(%struct.agg_v1i8* noalias sret %{{.*}}, <1 x i8> %{{.*}})
struct agg_v2i8 { v2i8 a; };
struct agg_v2i8 pass_agg_v2i8(struct agg_v2i8 arg) { return arg; }
// CHECK-LABEL: define void @pass_agg_v2i8(%struct.agg_v2i8* noalias sret %{{.*}}, i16 %{{.*}})
// CHECK-VECTOR-LABEL: define void @pass_agg_v2i8(%struct.agg_v2i8* noalias sret %{{.*}}, <2 x i8> %{{.*}})
struct agg_v4i8 { v4i8 a; };
struct agg_v4i8 pass_agg_v4i8(struct agg_v4i8 arg) { return arg; }
// CHECK-LABEL: define void @pass_agg_v4i8(%struct.agg_v4i8* noalias sret %{{.*}}, i32 %{{.*}})
// CHECK-VECTOR-LABEL: define void @pass_agg_v4i8(%struct.agg_v4i8* noalias sret %{{.*}}, <4 x i8> %{{.*}})
struct agg_v8i8 { v8i8 a; };
struct agg_v8i8 pass_agg_v8i8(struct agg_v8i8 arg) { return arg; }
// CHECK-LABEL: define void @pass_agg_v8i8(%struct.agg_v8i8* noalias sret %{{.*}}, i64 %{{.*}})
// CHECK-VECTOR-LABEL: define void @pass_agg_v8i8(%struct.agg_v8i8* noalias sret %{{.*}}, <8 x i8> %{{.*}})
struct agg_v16i8 { v16i8 a; };
struct agg_v16i8 pass_agg_v16i8(struct agg_v16i8 arg) { return arg; }
// CHECK-LABEL: define void @pass_agg_v16i8(%struct.agg_v16i8* noalias sret %{{.*}}, %struct.agg_v16i8* %{{.*}})
// CHECK-VECTOR-LABEL: define void @pass_agg_v16i8(%struct.agg_v16i8* noalias sret %{{.*}}, <16 x i8> %{{.*}})
struct agg_v32i8 { v32i8 a; };
struct agg_v32i8 pass_agg_v32i8(struct agg_v32i8 arg) { return arg; }
// CHECK-LABEL: define void @pass_agg_v32i8(%struct.agg_v32i8* noalias sret %{{.*}}, %struct.agg_v32i8* %{{.*}})
// CHECK-VECTOR-LABEL: define void @pass_agg_v32i8(%struct.agg_v32i8* noalias sret %{{.*}}, %struct.agg_v32i8* %{{.*}})
// Verify that the following are *not* vector-like aggregate types
struct agg_novector1 { v4i8 a; v4i8 b; };
struct agg_novector1 pass_agg_novector1(struct agg_novector1 arg) { return arg; }
// CHECK-LABEL: define void @pass_agg_novector1(%struct.agg_novector1* noalias sret %{{.*}}, i64 %{{.*}})
// CHECK-VECTOR-LABEL: define void @pass_agg_novector1(%struct.agg_novector1* noalias sret %{{.*}}, i64 %{{.*}})
struct agg_novector2 { v4i8 a; float b; };
struct agg_novector2 pass_agg_novector2(struct agg_novector2 arg) { return arg; }
// CHECK-LABEL: define void @pass_agg_novector2(%struct.agg_novector2* noalias sret %{{.*}}, i64 %{{.*}})
// CHECK-VECTOR-LABEL: define void @pass_agg_novector2(%struct.agg_novector2* noalias sret %{{.*}}, i64 %{{.*}})
struct agg_novector3 { v4i8 a; int : 0; };
struct agg_novector3 pass_agg_novector3(struct agg_novector3 arg) { return arg; }
// CHECK-LABEL: define void @pass_agg_novector3(%struct.agg_novector3* noalias sret %{{.*}}, i32 %{{.*}})
// CHECK-VECTOR-LABEL: define void @pass_agg_novector3(%struct.agg_novector3* noalias sret %{{.*}}, i32 %{{.*}})
struct agg_novector4 { v4i8 a __attribute__((aligned (8))); };
struct agg_novector4 pass_agg_novector4(struct agg_novector4 arg) { return arg; }
// CHECK-LABEL: define void @pass_agg_novector4(%struct.agg_novector4* noalias sret %{{.*}}, i64 %{{.*}})
// CHECK-VECTOR-LABEL: define void @pass_agg_novector4(%struct.agg_novector4* noalias sret %{{.*}}, i64 %{{.*}})
// Accessing variable argument lists
@ -122,6 +209,14 @@ v1i8 va_v1i8(__builtin_va_list l) { return __builtin_va_arg(l, v1i8); }
// CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi <1 x i8>** [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ]
// CHECK: [[INDIRECT_ARG:%[^ ]+]] = load <1 x i8>*, <1 x i8>** [[VA_ARG_ADDR]]
// CHECK: ret void
// CHECK-VECTOR-LABEL: define <1 x i8> @va_v1i8(%struct.__va_list_tag* %{{.*}})
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[OVERFLOW_ARG_AREA]] to <1 x i8>*
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA1:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8
// CHECK-VECTOR: store i8* [[OVERFLOW_ARG_AREA1]], i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[RET:%[^ ]+]] = load <1 x i8>, <1 x i8>* [[MEM_ADDR]]
// CHECK-VECTOR: ret <1 x i8> [[RET]]
v2i8 va_v2i8(__builtin_va_list l) { return __builtin_va_arg(l, v2i8); }
// CHECK-LABEL: define void @va_v2i8(<2 x i8>* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}})
@ -146,6 +241,14 @@ v2i8 va_v2i8(__builtin_va_list l) { return __builtin_va_arg(l, v2i8); }
// CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi <2 x i8>** [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ]
// CHECK: [[INDIRECT_ARG:%[^ ]+]] = load <2 x i8>*, <2 x i8>** [[VA_ARG_ADDR]]
// CHECK: ret void
// CHECK-VECTOR-LABEL: define <2 x i8> @va_v2i8(%struct.__va_list_tag* %{{.*}})
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[OVERFLOW_ARG_AREA]] to <2 x i8>*
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA1:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8
// CHECK-VECTOR: store i8* [[OVERFLOW_ARG_AREA1]], i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[RET:%[^ ]+]] = load <2 x i8>, <2 x i8>* [[MEM_ADDR]]
// CHECK-VECTOR: ret <2 x i8> [[RET]]
v4i8 va_v4i8(__builtin_va_list l) { return __builtin_va_arg(l, v4i8); }
// CHECK-LABEL: define void @va_v4i8(<4 x i8>* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}})
@ -170,6 +273,14 @@ v4i8 va_v4i8(__builtin_va_list l) { return __builtin_va_arg(l, v4i8); }
// CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi <4 x i8>** [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ]
// CHECK: [[INDIRECT_ARG:%[^ ]+]] = load <4 x i8>*, <4 x i8>** [[VA_ARG_ADDR]]
// CHECK: ret void
// CHECK-VECTOR-LABEL: define <4 x i8> @va_v4i8(%struct.__va_list_tag* %{{.*}})
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[OVERFLOW_ARG_AREA]] to <4 x i8>*
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA1:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8
// CHECK-VECTOR: store i8* [[OVERFLOW_ARG_AREA1]], i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[RET:%[^ ]+]] = load <4 x i8>, <4 x i8>* [[MEM_ADDR]]
// CHECK-VECTOR: ret <4 x i8> [[RET]]
v8i8 va_v8i8(__builtin_va_list l) { return __builtin_va_arg(l, v8i8); }
// CHECK-LABEL: define void @va_v8i8(<8 x i8>* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}})
@ -194,6 +305,14 @@ v8i8 va_v8i8(__builtin_va_list l) { return __builtin_va_arg(l, v8i8); }
// CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi <8 x i8>** [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ]
// CHECK: [[INDIRECT_ARG:%[^ ]+]] = load <8 x i8>*, <8 x i8>** [[VA_ARG_ADDR]]
// CHECK: ret void
// CHECK-VECTOR-LABEL: define <8 x i8> @va_v8i8(%struct.__va_list_tag* %{{.*}})
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[OVERFLOW_ARG_AREA]] to <8 x i8>*
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA1:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8
// CHECK-VECTOR: store i8* [[OVERFLOW_ARG_AREA1]], i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[RET:%[^ ]+]] = load <8 x i8>, <8 x i8>* [[MEM_ADDR]]
// CHECK-VECTOR: ret <8 x i8> [[RET]]
v16i8 va_v16i8(__builtin_va_list l) { return __builtin_va_arg(l, v16i8); }
// CHECK-LABEL: define void @va_v16i8(<16 x i8>* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}})
@ -218,6 +337,14 @@ v16i8 va_v16i8(__builtin_va_list l) { return __builtin_va_arg(l, v16i8); }
// CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi <16 x i8>** [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ]
// CHECK: [[INDIRECT_ARG:%[^ ]+]] = load <16 x i8>*, <16 x i8>** [[VA_ARG_ADDR]]
// CHECK: ret void
// CHECK-VECTOR-LABEL: define <16 x i8> @va_v16i8(%struct.__va_list_tag* %{{.*}})
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[OVERFLOW_ARG_AREA]] to <16 x i8>*
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA1:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 16
// CHECK-VECTOR: store i8* [[OVERFLOW_ARG_AREA1]], i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[RET:%[^ ]+]] = load <16 x i8>, <16 x i8>* [[MEM_ADDR]]
// CHECK-VECTOR: ret <16 x i8> [[RET]]
v32i8 va_v32i8(__builtin_va_list l) { return __builtin_va_arg(l, v32i8); }
// CHECK-LABEL: define void @va_v32i8(<32 x i8>* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}})
@ -242,4 +369,222 @@ v32i8 va_v32i8(__builtin_va_list l) { return __builtin_va_arg(l, v32i8); }
// CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi <32 x i8>** [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ]
// CHECK: [[INDIRECT_ARG:%[^ ]+]] = load <32 x i8>*, <32 x i8>** [[VA_ARG_ADDR]]
// CHECK: ret void
// CHECK-VECTOR-LABEL: define void @va_v32i8(<32 x i8>* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}})
// CHECK-VECTOR: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0
// CHECK-VECTOR: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]]
// CHECK-VECTOR: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5
// CHECK-VECTOR: br i1 [[FITS_IN_REGS]],
// CHECK-VECTOR: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8
// CHECK-VECTOR: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 16
// CHECK-VECTOR: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3
// CHECK-VECTOR: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]]
// CHECK-VECTOR: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]]
// CHECK-VECTOR: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to <32 x i8>**
// CHECK-VECTOR: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1
// CHECK-VECTOR: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]]
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 0
// CHECK-VECTOR: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to <32 x i8>**
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8
// CHECK-VECTOR: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[VA_ARG_ADDR:%[^ ]+]] = phi <32 x i8>** [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ]
// CHECK-VECTOR: [[INDIRECT_ARG:%[^ ]+]] = load <32 x i8>*, <32 x i8>** [[VA_ARG_ADDR]]
// CHECK-VECTOR: ret void
struct agg_v1i8 va_agg_v1i8(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_v1i8); }
// CHECK-LABEL: define void @va_agg_v1i8(%struct.agg_v1i8* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}})
// CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0
// CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]]
// CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5
// CHECK: br i1 [[FITS_IN_REGS]],
// CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8
// CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 23
// CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3
// CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]]
// CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]]
// CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_v1i8*
// CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1
// CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]]
// CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2
// CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 7
// CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_v1i8*
// CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8
// CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_v1i8* [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ]
// CHECK: ret void
// CHECK-VECTOR-LABEL: define void @va_agg_v1i8(%struct.agg_v1i8* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}})
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[OVERFLOW_ARG_AREA]] to %struct.agg_v1i8*
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA1:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8
// CHECK-VECTOR: store i8* [[OVERFLOW_ARG_AREA1]], i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: ret void
struct agg_v2i8 va_agg_v2i8(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_v2i8); }
// CHECK-LABEL: define void @va_agg_v2i8(%struct.agg_v2i8* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}})
// CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0
// CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]]
// CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5
// CHECK: br i1 [[FITS_IN_REGS]],
// CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8
// CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 22
// CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3
// CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]]
// CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]]
// CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_v2i8*
// CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1
// CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]]
// CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2
// CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 6
// CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_v2i8*
// CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8
// CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_v2i8* [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ]
// CHECK: ret void
// CHECK-VECTOR-LABEL: define void @va_agg_v2i8(%struct.agg_v2i8* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}})
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[OVERFLOW_ARG_AREA]] to %struct.agg_v2i8*
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA1:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8
// CHECK-VECTOR: store i8* [[OVERFLOW_ARG_AREA1]], i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: ret void
struct agg_v4i8 va_agg_v4i8(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_v4i8); }
// CHECK-LABEL: define void @va_agg_v4i8(%struct.agg_v4i8* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}})
// CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0
// CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]]
// CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5
// CHECK: br i1 [[FITS_IN_REGS]],
// CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8
// CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 20
// CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3
// CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]]
// CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]]
// CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_v4i8*
// CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1
// CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]]
// CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2
// CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 4
// CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_v4i8*
// CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8
// CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_v4i8* [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ]
// CHECK: ret void
// CHECK-VECTOR-LABEL: define void @va_agg_v4i8(%struct.agg_v4i8* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}})
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[OVERFLOW_ARG_AREA]] to %struct.agg_v4i8*
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA1:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8
// CHECK-VECTOR: store i8* [[OVERFLOW_ARG_AREA1]], i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: ret void
struct agg_v8i8 va_agg_v8i8(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_v8i8); }
// CHECK-LABEL: define void @va_agg_v8i8(%struct.agg_v8i8* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}})
// CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0
// CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]]
// CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5
// CHECK: br i1 [[FITS_IN_REGS]],
// CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8
// CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 16
// CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3
// CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]]
// CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]]
// CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_v8i8*
// CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1
// CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]]
// CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2
// CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 0
// CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_v8i8*
// CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8
// CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_v8i8* [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ]
// CHECK: ret void
// CHECK-VECTOR-LABEL: define void @va_agg_v8i8(%struct.agg_v8i8* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}})
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[OVERFLOW_ARG_AREA]] to %struct.agg_v8i8*
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA1:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8
// CHECK-VECTOR: store i8* [[OVERFLOW_ARG_AREA1]], i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: ret void
struct agg_v16i8 va_agg_v16i8(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_v16i8); }
// CHECK-LABEL: define void @va_agg_v16i8(%struct.agg_v16i8* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}})
// CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0
// CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]]
// CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5
// CHECK: br i1 [[FITS_IN_REGS]],
// CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8
// CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 16
// CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3
// CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]]
// CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]]
// CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_v16i8**
// CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1
// CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]]
// CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2
// CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 0
// CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_v16i8**
// CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8
// CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_v16i8** [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ]
// CHECK: [[INDIRECT_ARG:%[^ ]+]] = load %struct.agg_v16i8*, %struct.agg_v16i8** [[VA_ARG_ADDR]]
// CHECK: ret void
// CHECK-VECTOR-LABEL: define void @va_agg_v16i8(%struct.agg_v16i8* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}})
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[OVERFLOW_ARG_AREA]] to %struct.agg_v16i8*
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA1:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 16
// CHECK-VECTOR: store i8* [[OVERFLOW_ARG_AREA1]], i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: ret void
struct agg_v32i8 va_agg_v32i8(__builtin_va_list l) { return __builtin_va_arg(l, struct agg_v32i8); }
// CHECK-LABEL: define void @va_agg_v32i8(%struct.agg_v32i8* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}})
// CHECK: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0
// CHECK: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]]
// CHECK: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5
// CHECK: br i1 [[FITS_IN_REGS]],
// CHECK: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8
// CHECK: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 16
// CHECK: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3
// CHECK: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]]
// CHECK: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]]
// CHECK: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_v32i8**
// CHECK: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1
// CHECK: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]]
// CHECK: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2
// CHECK: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 0
// CHECK: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_v32i8**
// CHECK: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8
// CHECK: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_v32i8** [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ]
// CHECK: [[INDIRECT_ARG:%[^ ]+]] = load %struct.agg_v32i8*, %struct.agg_v32i8** [[VA_ARG_ADDR]]
// CHECK: ret void
// CHECK-VECTOR-LABEL: define void @va_agg_v32i8(%struct.agg_v32i8* noalias sret %{{.*}}, %struct.__va_list_tag* %{{.*}})
// CHECK-VECTOR: [[REG_COUNT_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 0
// CHECK-VECTOR: [[REG_COUNT:%[^ ]+]] = load i64, i64* [[REG_COUNT_PTR]]
// CHECK-VECTOR: [[FITS_IN_REGS:%[^ ]+]] = icmp ult i64 [[REG_COUNT]], 5
// CHECK-VECTOR: br i1 [[FITS_IN_REGS]],
// CHECK-VECTOR: [[SCALED_REG_COUNT:%[^ ]+]] = mul i64 [[REG_COUNT]], 8
// CHECK-VECTOR: [[REG_OFFSET:%[^ ]+]] = add i64 [[SCALED_REG_COUNT]], 16
// CHECK-VECTOR: [[REG_SAVE_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 3
// CHECK-VECTOR: [[REG_SAVE_AREA:%[^ ]+]] = load i8*, i8** [[REG_SAVE_AREA_PTR:[^ ]+]]
// CHECK-VECTOR: [[RAW_REG_ADDR:%[^ ]+]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i64 [[REG_OFFSET]]
// CHECK-VECTOR: [[REG_ADDR:%[^ ]+]] = bitcast i8* [[RAW_REG_ADDR]] to %struct.agg_v32i8**
// CHECK-VECTOR: [[REG_COUNT1:%[^ ]+]] = add i64 [[REG_COUNT]], 1
// CHECK-VECTOR: store i64 [[REG_COUNT1]], i64* [[REG_COUNT_PTR]]
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA_PTR:%[^ ]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* %{{.*}}, i32 0, i32 2
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA:%[^ ]+]] = load i8*, i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[RAW_MEM_ADDR:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 0
// CHECK-VECTOR: [[MEM_ADDR:%[^ ]+]] = bitcast i8* [[RAW_MEM_ADDR]] to %struct.agg_v32i8**
// CHECK-VECTOR: [[OVERFLOW_ARG_AREA2:%[^ ]+]] = getelementptr i8, i8* [[OVERFLOW_ARG_AREA]], i64 8
// CHECK-VECTOR: store i8* [[OVERFLOW_ARG_AREA2]], i8** [[OVERFLOW_ARG_AREA_PTR]]
// CHECK-VECTOR: [[VA_ARG_ADDR:%[^ ]+]] = phi %struct.agg_v32i8** [ [[REG_ADDR]], %{{.*}} ], [ [[MEM_ADDR]], %{{.*}} ]
// CHECK-VECTOR: [[INDIRECT_ARG:%[^ ]+]] = load %struct.agg_v32i8*, %struct.agg_v32i8** [[VA_ARG_ADDR]]
// CHECK-VECTOR: ret void

View File

@ -1,4 +1,9 @@
// RUN: %clang_cc1 -triple s390x-linux-gnu -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple s390x-linux-gnu \
// RUN: -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple s390x-linux-gnu -target-feature +vector \
// RUN: -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple s390x-linux-gnu -target-cpu z13 \
// RUN: -emit-llvm -o - %s | FileCheck %s
// Scalar types

View File

@ -155,6 +155,10 @@
// RUN: FileCheck %s -check-prefix=SYSTEMZ
// SYSTEMZ: target datalayout = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64"
// RUN: %clang_cc1 -triple s390x-unknown -target-cpu z13 -o - -emit-llvm %s | \
// RUN: FileCheck %s -check-prefix=SYSTEMZ-VECTOR
// SYSTEMZ-VECTOR: target datalayout = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64"
// RUN: %clang_cc1 -triple msp430-unknown -o - -emit-llvm %s | \
// RUN: FileCheck %s -check-prefix=MSP430
// MSP430: target datalayout = "e-m:e-p:16:16-i32:16:32-a:16-n8:16"

View File

@ -2,6 +2,8 @@
// RUN: %clang -target s390x-unknown-linux-gnu %s -### -o %t.o 2>&1 | FileCheck -check-prefix=CHECK-DEFAULT %s
// CHECK-DEFAULT-NOT: "-target-feature" "+transactional-execution"
// CHECK-DEFAULT-NOT: "-target-feature" "-transactional-execution"
// CHECK-DEFAULT-NOT: "-target-feature" "+vector"
// CHECK-DEFAULT-NOT: "-target-feature" "-vector"
// RUN: %clang -target s390x-unknown-linux-gnu %s -mhtm -### -o %t.o 2>&1 | FileCheck -check-prefix=CHECK-HTM %s
// RUN: %clang -target s390x-unknown-linux-gnu %s -mno-htm -mhtm -### -o %t.o 2>&1 | FileCheck -check-prefix=CHECK-HTM %s
@ -13,3 +15,12 @@
// CHECK-NOHTM: "-target-feature" "-transactional-execution"
// CHECK-NOHTM-NOT: "-target-feature" "+transactional-execution"
// RUN: %clang -target s390x-unknown-linux-gnu %s -mvx -### -o %t.o 2>&1 | FileCheck -check-prefix=CHECK-VX %s
// RUN: %clang -target s390x-unknown-linux-gnu %s -mno-vx -mvx -### -o %t.o 2>&1 | FileCheck -check-prefix=CHECK-VX %s
// CHECK-VX: "-target-feature" "+vector"
// CHECK-VX-NOT: "-target-feature" "-vector"
//
// RUN: %clang -target s390x-unknown-linux-gnu %s -mno-vx -### -o %t.o 2>&1 | FileCheck -check-prefix=CHECK-NOVX %s
// RUN: %clang -target s390x-unknown-linux-gnu %s -mvx -mno-vx -### -o %t.o 2>&1 | FileCheck -check-prefix=CHECK-NOVX %s
// CHECK-NOVX: "-target-feature" "-vector"
// CHECK-NOVX-NOT: "-target-feature" "+vector"