2008-09-09 05:33:45 +08:00
|
|
|
//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// These classes wrap the information about a call or function
|
|
|
|
// definition used to handle ABI compliancy.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "CGCall.h"
|
|
|
|
#include "CodeGenFunction.h"
|
2008-09-10 08:41:16 +08:00
|
|
|
#include "CodeGenModule.h"
|
2008-10-14 01:02:26 +08:00
|
|
|
#include "clang/Basic/TargetInfo.h"
|
2008-09-09 05:33:45 +08:00
|
|
|
#include "clang/AST/Decl.h"
|
2009-04-04 06:48:58 +08:00
|
|
|
#include "clang/AST/DeclCXX.h"
|
2008-09-09 05:33:45 +08:00
|
|
|
#include "clang/AST/DeclObjC.h"
|
2010-06-16 07:19:56 +08:00
|
|
|
#include "clang/Frontend/CodeGenOptions.h"
|
2008-09-24 09:01:36 +08:00
|
|
|
#include "llvm/Attributes.h"
|
2009-03-02 12:32:35 +08:00
|
|
|
#include "llvm/Support/CallSite.h"
|
2009-01-27 09:36:03 +08:00
|
|
|
#include "llvm/Target/TargetData.h"
|
2009-02-03 09:05:53 +08:00
|
|
|
|
|
|
|
#include "ABIInfo.h"
|
|
|
|
|
2008-09-09 05:33:45 +08:00
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
|
|
|
/***/
|
|
|
|
|
|
|
|
// FIXME: Use iterator and sidestep silly type array creation.
|
|
|
|
|
2010-02-06 05:31:56 +08:00
|
|
|
static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
|
|
|
|
switch (CC) {
|
|
|
|
default: return llvm::CallingConv::C;
|
|
|
|
case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
|
|
|
|
case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
|
2010-05-19 00:57:00 +08:00
|
|
|
case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
|
2010-02-06 05:31:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-02-24 15:14:12 +08:00
|
|
|
/// Derives the 'this' type for codegen purposes, i.e. ignoring method
|
|
|
|
/// qualification.
|
|
|
|
/// FIXME: address space qualification?
|
2010-02-26 08:48:12 +08:00
|
|
|
static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
|
|
|
|
QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
|
|
|
|
return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
|
2010-02-24 15:14:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the canonical formal type of the given C++ method.
|
2010-02-26 08:48:12 +08:00
|
|
|
static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
|
|
|
|
return MD->getType()->getCanonicalTypeUnqualified()
|
|
|
|
.getAs<FunctionProtoType>();
|
2010-02-24 15:14:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the "extra-canonicalized" return type, which discards
|
|
|
|
/// qualifiers on the return type. Codegen doesn't care about them,
|
|
|
|
/// and it makes ABI code a little easier to be able to assume that
|
|
|
|
/// all parameter and return types are top-level unqualified.
|
2010-02-26 08:48:12 +08:00
|
|
|
static CanQualType GetReturnType(QualType RetTy) {
|
|
|
|
return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
|
2010-02-24 15:14:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
const CGFunctionInfo &
|
2010-02-26 08:48:12 +08:00
|
|
|
CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
|
|
|
|
return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
|
|
|
|
llvm::SmallVector<CanQualType, 16>(),
|
2010-03-31 04:24:48 +08:00
|
|
|
FTNP->getExtInfo());
|
2008-09-10 12:01:49 +08:00
|
|
|
}
|
|
|
|
|
2010-02-24 15:14:12 +08:00
|
|
|
/// \param Args - contains any initial parameters besides those
|
|
|
|
/// in the formal type
|
|
|
|
static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
|
2010-02-26 08:48:12 +08:00
|
|
|
llvm::SmallVectorImpl<CanQualType> &ArgTys,
|
|
|
|
CanQual<FunctionProtoType> FTP) {
|
2009-02-03 07:23:47 +08:00
|
|
|
// FIXME: Kill copy.
|
2008-09-10 12:01:49 +08:00
|
|
|
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
|
2009-02-03 07:23:47 +08:00
|
|
|
ArgTys.push_back(FTP->getArgType(i));
|
2010-02-26 08:48:12 +08:00
|
|
|
CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
|
|
|
|
return CGT.getFunctionInfo(ResTy, ArgTys,
|
2010-03-31 04:24:48 +08:00
|
|
|
FTP->getExtInfo());
|
2010-02-24 15:14:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
const CGFunctionInfo &
|
2010-02-26 08:48:12 +08:00
|
|
|
CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
|
|
|
|
llvm::SmallVector<CanQualType, 16> ArgTys;
|
2010-02-24 15:14:12 +08:00
|
|
|
return ::getFunctionInfo(*this, ArgTys, FTP);
|
2009-09-12 06:24:53 +08:00
|
|
|
}
|
|
|
|
|
2010-02-06 05:31:56 +08:00
|
|
|
static CallingConv getCallingConventionForDecl(const Decl *D) {
|
2009-09-12 06:24:53 +08:00
|
|
|
// Set the appropriate calling convention for the Function.
|
|
|
|
if (D->hasAttr<StdCallAttr>())
|
2010-02-06 05:31:56 +08:00
|
|
|
return CC_X86StdCall;
|
2009-09-12 06:24:53 +08:00
|
|
|
|
|
|
|
if (D->hasAttr<FastCallAttr>())
|
2010-02-06 05:31:56 +08:00
|
|
|
return CC_X86FastCall;
|
2009-09-12 06:24:53 +08:00
|
|
|
|
2010-05-19 00:57:00 +08:00
|
|
|
if (D->hasAttr<ThisCallAttr>())
|
|
|
|
return CC_X86ThisCall;
|
|
|
|
|
2010-02-06 05:31:56 +08:00
|
|
|
return CC_C;
|
2008-09-10 12:01:49 +08:00
|
|
|
}
|
|
|
|
|
2009-10-04 03:43:08 +08:00
|
|
|
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
|
|
|
|
const FunctionProtoType *FTP) {
|
2010-02-26 08:48:12 +08:00
|
|
|
llvm::SmallVector<CanQualType, 16> ArgTys;
|
2010-02-24 15:14:12 +08:00
|
|
|
|
2009-10-04 03:43:08 +08:00
|
|
|
// Add the 'this' pointer.
|
2010-02-24 15:14:12 +08:00
|
|
|
ArgTys.push_back(GetThisType(Context, RD));
|
|
|
|
|
|
|
|
return ::getFunctionInfo(*this, ArgTys,
|
2010-02-26 08:48:12 +08:00
|
|
|
FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
|
2009-10-04 03:43:08 +08:00
|
|
|
}
|
|
|
|
|
2009-04-04 06:48:58 +08:00
|
|
|
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
|
2010-02-26 08:48:12 +08:00
|
|
|
llvm::SmallVector<CanQualType, 16> ArgTys;
|
2010-02-24 15:14:12 +08:00
|
|
|
|
2009-05-13 04:27:19 +08:00
|
|
|
// Add the 'this' pointer unless this is a static method.
|
|
|
|
if (MD->isInstance())
|
2010-02-24 15:14:12 +08:00
|
|
|
ArgTys.push_back(GetThisType(Context, MD->getParent()));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-02-24 15:14:12 +08:00
|
|
|
return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
|
2009-04-04 06:48:58 +08:00
|
|
|
}
|
|
|
|
|
2009-11-25 11:15:49 +08:00
|
|
|
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
|
|
|
|
CXXCtorType Type) {
|
2010-02-26 08:48:12 +08:00
|
|
|
llvm::SmallVector<CanQualType, 16> ArgTys;
|
2009-11-25 11:15:49 +08:00
|
|
|
|
|
|
|
// Add the 'this' pointer.
|
2010-02-24 15:14:12 +08:00
|
|
|
ArgTys.push_back(GetThisType(Context, D->getParent()));
|
2009-11-25 11:15:49 +08:00
|
|
|
|
|
|
|
// Check if we need to add a VTT parameter (which has type void **).
|
|
|
|
if (Type == Ctor_Base && D->getParent()->getNumVBases() != 0)
|
|
|
|
ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
|
2010-02-24 15:14:12 +08:00
|
|
|
|
|
|
|
return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
|
2009-11-25 11:15:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
|
|
|
|
CXXDtorType Type) {
|
2010-02-26 08:48:12 +08:00
|
|
|
llvm::SmallVector<CanQualType, 16> ArgTys;
|
2009-11-25 11:15:49 +08:00
|
|
|
|
|
|
|
// Add the 'this' pointer.
|
2010-02-26 08:48:12 +08:00
|
|
|
ArgTys.push_back(GetThisType(Context, D->getParent()));
|
2009-11-25 11:15:49 +08:00
|
|
|
|
|
|
|
// Check if we need to add a VTT parameter (which has type void **).
|
|
|
|
if (Type == Dtor_Base && D->getParent()->getNumVBases() != 0)
|
|
|
|
ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
|
2010-02-24 15:14:12 +08:00
|
|
|
|
|
|
|
return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
|
2009-11-25 11:15:49 +08:00
|
|
|
}
|
|
|
|
|
2009-02-03 07:23:47 +08:00
|
|
|
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
|
2009-05-13 04:27:19 +08:00
|
|
|
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
|
2009-04-04 06:48:58 +08:00
|
|
|
if (MD->isInstance())
|
|
|
|
return getFunctionInfo(MD);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-02-26 08:48:12 +08:00
|
|
|
CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
|
|
|
|
assert(isa<FunctionType>(FTy));
|
2010-02-24 15:14:12 +08:00
|
|
|
if (isa<FunctionNoProtoType>(FTy))
|
2010-02-26 08:48:12 +08:00
|
|
|
return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
|
|
|
|
assert(isa<FunctionProtoType>(FTy));
|
|
|
|
return getFunctionInfo(FTy.getAs<FunctionProtoType>());
|
2008-09-09 05:33:45 +08:00
|
|
|
}
|
|
|
|
|
2009-02-03 07:23:47 +08:00
|
|
|
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
|
2010-02-26 08:48:12 +08:00
|
|
|
llvm::SmallVector<CanQualType, 16> ArgTys;
|
|
|
|
ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
|
|
|
|
ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
|
2009-02-03 07:23:47 +08:00
|
|
|
// FIXME: Kill copy?
|
2009-02-20 14:23:21 +08:00
|
|
|
for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
|
2010-02-24 15:14:12 +08:00
|
|
|
e = MD->param_end(); i != e; ++i) {
|
|
|
|
ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
|
|
|
|
}
|
|
|
|
return getFunctionInfo(GetReturnType(MD->getResultType()),
|
|
|
|
ArgTys,
|
2010-03-31 04:24:48 +08:00
|
|
|
FunctionType::ExtInfo(
|
|
|
|
/*NoReturn*/ false,
|
2010-03-31 06:15:11 +08:00
|
|
|
/*RegParm*/ 0,
|
2010-03-31 04:24:48 +08:00
|
|
|
getCallingConventionForDecl(MD)));
|
2008-09-09 05:33:45 +08:00
|
|
|
}
|
|
|
|
|
2010-02-06 10:44:09 +08:00
|
|
|
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
|
|
|
|
// FIXME: Do we need to handle ObjCMethodDecl?
|
|
|
|
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
|
|
|
|
|
|
|
|
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
|
|
|
|
return getFunctionInfo(CD, GD.getCtorType());
|
|
|
|
|
|
|
|
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
|
|
|
|
return getFunctionInfo(DD, GD.getDtorType());
|
|
|
|
|
|
|
|
return getFunctionInfo(FD);
|
|
|
|
}
|
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
|
2009-09-12 06:24:53 +08:00
|
|
|
const CallArgList &Args,
|
2010-03-31 04:24:48 +08:00
|
|
|
const FunctionType::ExtInfo &Info) {
|
2009-02-03 07:23:47 +08:00
|
|
|
// FIXME: Kill copy.
|
2010-02-26 08:48:12 +08:00
|
|
|
llvm::SmallVector<CanQualType, 16> ArgTys;
|
2009-09-09 23:08:12 +08:00
|
|
|
for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
|
2009-01-31 10:19:00 +08:00
|
|
|
i != e; ++i)
|
2010-02-24 15:14:12 +08:00
|
|
|
ArgTys.push_back(Context.getCanonicalParamType(i->second));
|
2010-03-31 04:24:48 +08:00
|
|
|
return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
|
2008-09-09 05:33:45 +08:00
|
|
|
}
|
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
|
2009-09-12 06:24:53 +08:00
|
|
|
const FunctionArgList &Args,
|
2010-03-31 04:24:48 +08:00
|
|
|
const FunctionType::ExtInfo &Info) {
|
2009-02-03 07:23:47 +08:00
|
|
|
// FIXME: Kill copy.
|
2010-02-26 08:48:12 +08:00
|
|
|
llvm::SmallVector<CanQualType, 16> ArgTys;
|
2009-09-09 23:08:12 +08:00
|
|
|
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
|
2009-02-03 05:43:58 +08:00
|
|
|
i != e; ++i)
|
2010-02-24 15:14:12 +08:00
|
|
|
ArgTys.push_back(Context.getCanonicalParamType(i->second));
|
2010-03-31 04:24:48 +08:00
|
|
|
return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
|
2009-02-03 07:23:47 +08:00
|
|
|
}
|
|
|
|
|
2010-02-26 08:48:12 +08:00
|
|
|
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
|
|
|
|
const llvm::SmallVectorImpl<CanQualType> &ArgTys,
|
2010-03-31 04:24:48 +08:00
|
|
|
const FunctionType::ExtInfo &Info) {
|
2010-02-26 08:48:12 +08:00
|
|
|
#ifndef NDEBUG
|
|
|
|
for (llvm::SmallVectorImpl<CanQualType>::const_iterator
|
|
|
|
I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
|
|
|
|
assert(I->isCanonicalAsParam());
|
|
|
|
#endif
|
|
|
|
|
2010-03-31 06:15:11 +08:00
|
|
|
unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
|
2010-02-06 05:31:56 +08:00
|
|
|
|
2009-02-03 08:07:12 +08:00
|
|
|
// Lookup or create unique function info.
|
|
|
|
llvm::FoldingSetNodeID ID;
|
2010-03-31 04:24:48 +08:00
|
|
|
CGFunctionInfo::Profile(ID, Info, ResTy,
|
2009-09-12 06:24:53 +08:00
|
|
|
ArgTys.begin(), ArgTys.end());
|
2009-02-03 08:07:12 +08:00
|
|
|
|
|
|
|
void *InsertPos = 0;
|
|
|
|
CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
|
|
|
|
if (FI)
|
|
|
|
return *FI;
|
|
|
|
|
2009-02-03 13:31:23 +08:00
|
|
|
// Construct the function info.
|
2010-03-31 06:15:11 +08:00
|
|
|
FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy, ArgTys);
|
2009-02-05 08:00:23 +08:00
|
|
|
FunctionInfos.InsertNode(FI, InsertPos);
|
2009-02-03 07:23:47 +08:00
|
|
|
|
2009-02-03 13:31:23 +08:00
|
|
|
// Compute ABI information.
|
2009-07-15 07:10:40 +08:00
|
|
|
getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext());
|
2009-02-03 07:23:47 +08:00
|
|
|
|
2009-02-03 13:31:23 +08:00
|
|
|
return *FI;
|
2008-09-09 05:33:45 +08:00
|
|
|
}
|
2008-09-10 07:27:19 +08:00
|
|
|
|
2009-09-12 06:24:53 +08:00
|
|
|
CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
|
2010-02-06 05:31:56 +08:00
|
|
|
bool _NoReturn,
|
2010-03-31 06:15:11 +08:00
|
|
|
unsigned _RegParm,
|
2010-02-26 08:48:12 +08:00
|
|
|
CanQualType ResTy,
|
|
|
|
const llvm::SmallVectorImpl<CanQualType> &ArgTys)
|
2009-09-12 08:59:20 +08:00
|
|
|
: CallingConvention(_CallingConvention),
|
2010-02-06 05:31:56 +08:00
|
|
|
EffectiveCallingConvention(_CallingConvention),
|
2010-03-31 06:15:11 +08:00
|
|
|
NoReturn(_NoReturn), RegParm(_RegParm)
|
2009-09-12 06:24:53 +08:00
|
|
|
{
|
2009-02-03 13:31:23 +08:00
|
|
|
NumArgs = ArgTys.size();
|
|
|
|
Args = new ArgInfo[1 + NumArgs];
|
|
|
|
Args[0].type = ResTy;
|
|
|
|
for (unsigned i = 0; i < NumArgs; ++i)
|
|
|
|
Args[1 + i].type = ArgTys[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
/***/
|
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
void CodeGenTypes::GetExpandedTypes(QualType Ty,
|
2008-09-17 08:51:38 +08:00
|
|
|
std::vector<const llvm::Type*> &ArgTys) {
|
|
|
|
const RecordType *RT = Ty->getAsStructureType();
|
|
|
|
assert(RT && "Can only expand structure types.");
|
|
|
|
const RecordDecl *RD = RT->getDecl();
|
2009-09-09 23:08:12 +08:00
|
|
|
assert(!RD->hasFlexibleArrayMember() &&
|
2008-09-17 08:51:38 +08:00
|
|
|
"Cannot expand structure with flexible array.");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-06-30 10:36:12 +08:00
|
|
|
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
|
|
|
|
i != e; ++i) {
|
2008-09-17 08:51:38 +08:00
|
|
|
const FieldDecl *FD = *i;
|
2009-09-09 23:08:12 +08:00
|
|
|
assert(!FD->isBitField() &&
|
2008-09-17 08:51:38 +08:00
|
|
|
"Cannot expand structure with bit-field members.");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-09-17 08:51:38 +08:00
|
|
|
QualType FT = FD->getType();
|
|
|
|
if (CodeGenFunction::hasAggregateLLVMType(FT)) {
|
|
|
|
GetExpandedTypes(FT, ArgTys);
|
|
|
|
} else {
|
|
|
|
ArgTys.push_back(ConvertType(FT));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
llvm::Function::arg_iterator
|
2008-09-17 08:51:38 +08:00
|
|
|
CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
|
|
|
|
llvm::Function::arg_iterator AI) {
|
|
|
|
const RecordType *RT = Ty->getAsStructureType();
|
|
|
|
assert(RT && "Can only expand structure types.");
|
|
|
|
|
|
|
|
RecordDecl *RD = RT->getDecl();
|
2009-09-09 23:08:12 +08:00
|
|
|
assert(LV.isSimple() &&
|
|
|
|
"Unexpected non-simple lvalue during struct expansion.");
|
2008-09-17 08:51:38 +08:00
|
|
|
llvm::Value *Addr = LV.getAddress();
|
2009-06-30 10:36:12 +08:00
|
|
|
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
|
|
|
|
i != e; ++i) {
|
2009-09-09 23:08:12 +08:00
|
|
|
FieldDecl *FD = *i;
|
2008-09-17 08:51:38 +08:00
|
|
|
QualType FT = FD->getType();
|
|
|
|
|
|
|
|
// FIXME: What are the right qualifiers here?
|
2010-01-29 13:05:36 +08:00
|
|
|
LValue LV = EmitLValueForField(Addr, FD, 0);
|
2008-09-17 08:51:38 +08:00
|
|
|
if (CodeGenFunction::hasAggregateLLVMType(FT)) {
|
|
|
|
AI = ExpandTypeFromArgs(FT, LV, AI);
|
|
|
|
} else {
|
|
|
|
EmitStoreThroughLValue(RValue::get(AI), LV, FT);
|
|
|
|
++AI;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return AI;
|
|
|
|
}
|
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
void
|
|
|
|
CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
|
2008-09-17 08:51:38 +08:00
|
|
|
llvm::SmallVector<llvm::Value*, 16> &Args) {
|
|
|
|
const RecordType *RT = Ty->getAsStructureType();
|
|
|
|
assert(RT && "Can only expand structure types.");
|
|
|
|
|
|
|
|
RecordDecl *RD = RT->getDecl();
|
|
|
|
assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
|
|
|
|
llvm::Value *Addr = RV.getAggregateAddr();
|
2009-06-30 10:36:12 +08:00
|
|
|
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
|
|
|
|
i != e; ++i) {
|
2009-09-09 23:08:12 +08:00
|
|
|
FieldDecl *FD = *i;
|
2008-09-17 08:51:38 +08:00
|
|
|
QualType FT = FD->getType();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-09-17 08:51:38 +08:00
|
|
|
// FIXME: What are the right qualifiers here?
|
2010-01-29 13:05:36 +08:00
|
|
|
LValue LV = EmitLValueForField(Addr, FD, 0);
|
2008-09-17 08:51:38 +08:00
|
|
|
if (CodeGenFunction::hasAggregateLLVMType(FT)) {
|
|
|
|
ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
|
|
|
|
} else {
|
|
|
|
RValue RV = EmitLoadOfLValue(LV, FT);
|
2009-09-09 23:08:12 +08:00
|
|
|
assert(RV.isScalar() &&
|
2008-09-17 08:51:38 +08:00
|
|
|
"Unexpected non-scalar rvalue during struct expansion.");
|
|
|
|
Args.push_back(RV.getScalarVal());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-03 03:06:38 +08:00
|
|
|
/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
|
|
|
|
/// a pointer to an object of type \arg Ty.
|
|
|
|
///
|
|
|
|
/// This safely handles the case when the src type is smaller than the
|
|
|
|
/// destination type; in this situation the values of bits which not
|
|
|
|
/// present in the src are undefined.
|
|
|
|
static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
|
|
|
|
const llvm::Type *Ty,
|
|
|
|
CodeGenFunction &CGF) {
|
2009-09-09 23:08:12 +08:00
|
|
|
const llvm::Type *SrcTy =
|
2009-02-03 03:06:38 +08:00
|
|
|
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
|
2009-05-09 15:08:47 +08:00
|
|
|
uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
|
|
|
|
uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
|
2009-02-03 03:06:38 +08:00
|
|
|
|
2009-02-03 13:59:18 +08:00
|
|
|
// If load is legal, just bitcast the src pointer.
|
2009-05-14 02:54:26 +08:00
|
|
|
if (SrcSize >= DstSize) {
|
2009-05-16 15:57:57 +08:00
|
|
|
// Generally SrcSize is never greater than DstSize, since this means we are
|
|
|
|
// losing bits. However, this can happen in cases where the structure has
|
|
|
|
// additional padding, for example due to a user specified alignment.
|
2009-05-14 02:54:26 +08:00
|
|
|
//
|
2009-05-16 15:57:57 +08:00
|
|
|
// FIXME: Assert that we aren't truncating non-padding bits when have access
|
|
|
|
// to that information.
|
2009-02-03 03:06:38 +08:00
|
|
|
llvm::Value *Casted =
|
|
|
|
CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
|
2009-02-07 10:46:03 +08:00
|
|
|
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
|
|
|
|
// FIXME: Use better alignment / avoid requiring aligned load.
|
|
|
|
Load->setAlignment(1);
|
|
|
|
return Load;
|
2009-02-03 03:06:38 +08:00
|
|
|
}
|
Change IR generation for return (in the simple case) to avoid doing silly
load/store nonsense in the epilog. For example, for:
int foo(int X) {
int A[100];
return A[X];
}
we used to generate:
%arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i64 %idxprom ; <i32*> [#uses=1]
%tmp1 = load i32* %arrayidx ; <i32> [#uses=1]
store i32 %tmp1, i32* %retval
%0 = load i32* %retval ; <i32> [#uses=1]
ret i32 %0
}
which codegen'd to this code:
_foo: ## @foo
## BB#0: ## %entry
subq $408, %rsp ## imm = 0x198
movl %edi, 400(%rsp)
movl 400(%rsp), %edi
movslq %edi, %rax
movl (%rsp,%rax,4), %edi
movl %edi, 404(%rsp)
movl 404(%rsp), %eax
addq $408, %rsp ## imm = 0x198
ret
Now we generate:
%arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i64 %idxprom ; <i32*> [#uses=1]
%tmp1 = load i32* %arrayidx ; <i32> [#uses=1]
ret i32 %tmp1
}
and:
_foo: ## @foo
## BB#0: ## %entry
subq $408, %rsp ## imm = 0x198
movl %edi, 404(%rsp)
movl 404(%rsp), %edi
movslq %edi, %rax
movl (%rsp,%rax,4), %eax
addq $408, %rsp ## imm = 0x198
ret
This actually does matter, cutting out 2000 lines of IR from CGStmt.ll
for example.
Another interesting effect is that altivec.h functions which are dead
now get dce'd by the inliner. Hence all the changes to
builtins-ppc-altivec.c to ensure the calls aren't dead.
llvm-svn: 106970
2010-06-27 09:06:27 +08:00
|
|
|
|
|
|
|
// Otherwise do coercion through memory. This is stupid, but
|
|
|
|
// simple.
|
|
|
|
llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
|
|
|
|
llvm::Value *Casted =
|
|
|
|
CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
|
|
|
|
llvm::StoreInst *Store =
|
|
|
|
CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
|
|
|
|
// FIXME: Use better alignment / avoid requiring aligned store.
|
|
|
|
Store->setAlignment(1);
|
|
|
|
return CGF.Builder.CreateLoad(Tmp);
|
2009-02-03 03:06:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
|
|
|
|
/// where the source and destination may have different types.
|
|
|
|
///
|
|
|
|
/// This safely handles the case when the src type is larger than the
|
|
|
|
/// destination type; the upper bits of the src will be lost.
|
|
|
|
static void CreateCoercedStore(llvm::Value *Src,
|
|
|
|
llvm::Value *DstPtr,
|
2009-12-25 04:40:36 +08:00
|
|
|
bool DstIsVolatile,
|
2009-02-03 03:06:38 +08:00
|
|
|
CodeGenFunction &CGF) {
|
|
|
|
const llvm::Type *SrcTy = Src->getType();
|
2009-09-09 23:08:12 +08:00
|
|
|
const llvm::Type *DstTy =
|
2009-02-03 03:06:38 +08:00
|
|
|
cast<llvm::PointerType>(DstPtr->getType())->getElementType();
|
|
|
|
|
2009-05-09 15:08:47 +08:00
|
|
|
uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
|
|
|
|
uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
|
2009-02-03 03:06:38 +08:00
|
|
|
|
2009-02-03 13:31:23 +08:00
|
|
|
// If store is legal, just bitcast the src pointer.
|
2009-06-05 15:58:54 +08:00
|
|
|
if (SrcSize <= DstSize) {
|
2009-02-03 03:06:38 +08:00
|
|
|
llvm::Value *Casted =
|
|
|
|
CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
|
2009-02-07 10:46:03 +08:00
|
|
|
// FIXME: Use better alignment / avoid requiring aligned store.
|
2009-12-25 04:40:36 +08:00
|
|
|
CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1);
|
2009-02-03 03:06:38 +08:00
|
|
|
} else {
|
|
|
|
// Otherwise do coercion through memory. This is stupid, but
|
|
|
|
// simple.
|
2009-06-05 15:58:54 +08:00
|
|
|
|
|
|
|
// Generally SrcSize is never greater than DstSize, since this means we are
|
|
|
|
// losing bits. However, this can happen in cases where the structure has
|
|
|
|
// additional padding, for example due to a user specified alignment.
|
|
|
|
//
|
|
|
|
// FIXME: Assert that we aren't truncating non-padding bits when have access
|
|
|
|
// to that information.
|
2009-02-03 03:06:38 +08:00
|
|
|
llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
|
|
|
|
CGF.Builder.CreateStore(Src, Tmp);
|
2009-09-09 23:08:12 +08:00
|
|
|
llvm::Value *Casted =
|
2009-02-03 03:06:38 +08:00
|
|
|
CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
|
2009-02-07 10:46:03 +08:00
|
|
|
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
|
|
|
|
// FIXME: Use better alignment / avoid requiring aligned load.
|
|
|
|
Load->setAlignment(1);
|
2009-12-25 04:40:36 +08:00
|
|
|
CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
|
2009-02-03 03:06:38 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-09-17 08:51:38 +08:00
|
|
|
/***/
|
|
|
|
|
2009-02-03 06:03:45 +08:00
|
|
|
bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
|
2009-02-05 16:00:50 +08:00
|
|
|
return FI.getReturnInfo().isIndirect();
|
2009-02-03 05:43:58 +08:00
|
|
|
}
|
|
|
|
|
2010-02-23 08:48:20 +08:00
|
|
|
const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
|
|
|
|
const CGFunctionInfo &FI = getFunctionInfo(GD);
|
|
|
|
|
|
|
|
// For definition purposes, don't consider a K&R function variadic.
|
|
|
|
bool Variadic = false;
|
|
|
|
if (const FunctionProtoType *FPT =
|
|
|
|
cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
|
|
|
|
Variadic = FPT->isVariadic();
|
|
|
|
|
|
|
|
return GetFunctionType(FI, Variadic);
|
|
|
|
}
|
|
|
|
|
2008-09-10 12:01:49 +08:00
|
|
|
const llvm::FunctionType *
|
2009-02-03 05:43:58 +08:00
|
|
|
CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
|
2008-09-10 12:01:49 +08:00
|
|
|
std::vector<const llvm::Type*> ArgTys;
|
|
|
|
|
|
|
|
const llvm::Type *ResultType = 0;
|
|
|
|
|
2009-02-03 07:43:58 +08:00
|
|
|
QualType RetTy = FI.getReturnType();
|
2009-02-03 13:59:18 +08:00
|
|
|
const ABIArgInfo &RetAI = FI.getReturnInfo();
|
2008-09-11 09:48:57 +08:00
|
|
|
switch (RetAI.getKind()) {
|
|
|
|
case ABIArgInfo::Expand:
|
|
|
|
assert(0 && "Invalid ABI kind for return argument");
|
|
|
|
|
2009-06-06 17:36:29 +08:00
|
|
|
case ABIArgInfo::Extend:
|
2009-02-03 14:17:37 +08:00
|
|
|
case ABIArgInfo::Direct:
|
|
|
|
ResultType = ConvertType(RetTy);
|
|
|
|
break;
|
|
|
|
|
2009-02-05 16:00:50 +08:00
|
|
|
case ABIArgInfo::Indirect: {
|
|
|
|
assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
|
2009-08-14 05:57:51 +08:00
|
|
|
ResultType = llvm::Type::getVoidTy(getLLVMContext());
|
2008-09-10 15:00:50 +08:00
|
|
|
const llvm::Type *STy = ConvertType(RetTy);
|
2008-09-10 12:01:49 +08:00
|
|
|
ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-01-27 05:26:08 +08:00
|
|
|
case ABIArgInfo::Ignore:
|
2009-08-14 05:57:51 +08:00
|
|
|
ResultType = llvm::Type::getVoidTy(getLLVMContext());
|
2009-01-27 05:26:08 +08:00
|
|
|
break;
|
|
|
|
|
2008-09-10 12:01:49 +08:00
|
|
|
case ABIArgInfo::Coerce:
|
2008-09-10 15:04:09 +08:00
|
|
|
ResultType = RetAI.getCoerceToType();
|
2008-09-10 12:01:49 +08:00
|
|
|
break;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
|
|
|
for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
|
2009-02-03 13:31:23 +08:00
|
|
|
ie = FI.arg_end(); it != ie; ++it) {
|
|
|
|
const ABIArgInfo &AI = it->info;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-09-11 09:48:57 +08:00
|
|
|
switch (AI.getKind()) {
|
2009-01-27 05:26:08 +08:00
|
|
|
case ABIArgInfo::Ignore:
|
|
|
|
break;
|
|
|
|
|
2008-09-17 08:51:38 +08:00
|
|
|
case ABIArgInfo::Coerce:
|
2009-02-04 03:12:28 +08:00
|
|
|
ArgTys.push_back(AI.getCoerceToType());
|
|
|
|
break;
|
|
|
|
|
2009-02-10 09:51:39 +08:00
|
|
|
case ABIArgInfo::Indirect: {
|
2009-02-05 16:00:50 +08:00
|
|
|
// indirect arguments are always on the stack, which is addr space #0.
|
2009-02-10 09:51:39 +08:00
|
|
|
const llvm::Type *LTy = ConvertTypeForMem(it->type);
|
|
|
|
ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
|
2008-09-11 09:48:57 +08:00
|
|
|
break;
|
2009-02-10 09:51:39 +08:00
|
|
|
}
|
2009-06-06 17:36:29 +08:00
|
|
|
|
|
|
|
case ABIArgInfo::Extend:
|
2009-02-03 14:17:37 +08:00
|
|
|
case ABIArgInfo::Direct:
|
2009-02-05 17:16:39 +08:00
|
|
|
ArgTys.push_back(ConvertType(it->type));
|
2008-09-11 09:48:57 +08:00
|
|
|
break;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-09-11 09:48:57 +08:00
|
|
|
case ABIArgInfo::Expand:
|
2009-02-03 13:31:23 +08:00
|
|
|
GetExpandedTypes(it->type, ArgTys);
|
2008-09-11 09:48:57 +08:00
|
|
|
break;
|
|
|
|
}
|
2008-09-10 12:01:49 +08:00
|
|
|
}
|
|
|
|
|
2009-02-03 05:43:58 +08:00
|
|
|
return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
|
2008-09-10 07:48:28 +08:00
|
|
|
}
|
|
|
|
|
2009-11-24 13:08:52 +08:00
|
|
|
const llvm::Type *
|
2010-04-18 04:15:18 +08:00
|
|
|
CodeGenTypes::GetFunctionTypeForVTable(const CXXMethodDecl *MD) {
|
2009-11-24 13:08:52 +08:00
|
|
|
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
|
|
|
|
|
2010-05-30 14:03:20 +08:00
|
|
|
if (!VerifyFuncTypeComplete(FPT))
|
2009-11-24 13:08:52 +08:00
|
|
|
return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic());
|
|
|
|
|
|
|
|
return llvm::OpaqueType::get(getLLVMContext());
|
|
|
|
}
|
|
|
|
|
2009-02-03 07:43:58 +08:00
|
|
|
void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
|
2009-02-03 06:03:45 +08:00
|
|
|
const Decl *TargetDecl,
|
2009-09-12 08:59:20 +08:00
|
|
|
AttributeListType &PAL,
|
|
|
|
unsigned &CallingConv) {
|
2008-09-10 08:32:18 +08:00
|
|
|
unsigned FuncAttrs = 0;
|
2008-09-27 06:53:57 +08:00
|
|
|
unsigned RetAttrs = 0;
|
2008-09-10 08:32:18 +08:00
|
|
|
|
2009-09-12 08:59:20 +08:00
|
|
|
CallingConv = FI.getEffectiveCallingConvention();
|
|
|
|
|
2010-02-06 05:31:56 +08:00
|
|
|
if (FI.isNoReturn())
|
|
|
|
FuncAttrs |= llvm::Attribute::NoReturn;
|
|
|
|
|
2009-04-04 08:49:24 +08:00
|
|
|
// FIXME: handle sseregparm someday...
|
2008-09-10 08:32:18 +08:00
|
|
|
if (TargetDecl) {
|
2009-06-30 10:34:44 +08:00
|
|
|
if (TargetDecl->hasAttr<NoThrowAttr>())
|
2008-09-26 05:02:23 +08:00
|
|
|
FuncAttrs |= llvm::Attribute::NoUnwind;
|
2009-06-30 10:34:44 +08:00
|
|
|
if (TargetDecl->hasAttr<NoReturnAttr>())
|
2008-09-26 05:02:23 +08:00
|
|
|
FuncAttrs |= llvm::Attribute::NoReturn;
|
2009-06-30 10:34:44 +08:00
|
|
|
if (TargetDecl->hasAttr<ConstAttr>())
|
2008-10-06 07:32:53 +08:00
|
|
|
FuncAttrs |= llvm::Attribute::ReadNone;
|
2009-06-30 10:34:44 +08:00
|
|
|
else if (TargetDecl->hasAttr<PureAttr>())
|
2009-04-11 06:14:52 +08:00
|
|
|
FuncAttrs |= llvm::Attribute::ReadOnly;
|
2009-08-10 04:07:29 +08:00
|
|
|
if (TargetDecl->hasAttr<MallocAttr>())
|
|
|
|
RetAttrs |= llvm::Attribute::NoAlias;
|
2008-09-10 08:32:18 +08:00
|
|
|
}
|
|
|
|
|
2009-11-13 01:24:48 +08:00
|
|
|
if (CodeGenOpts.OptimizeSize)
|
2009-10-28 03:48:08 +08:00
|
|
|
FuncAttrs |= llvm::Attribute::OptimizeForSize;
|
2009-11-13 01:24:48 +08:00
|
|
|
if (CodeGenOpts.DisableRedZone)
|
2009-06-05 07:32:02 +08:00
|
|
|
FuncAttrs |= llvm::Attribute::NoRedZone;
|
2009-11-13 01:24:48 +08:00
|
|
|
if (CodeGenOpts.NoImplicitFloat)
|
2009-06-06 06:05:48 +08:00
|
|
|
FuncAttrs |= llvm::Attribute::NoImplicitFloat;
|
2009-06-05 07:32:02 +08:00
|
|
|
|
2009-02-03 07:43:58 +08:00
|
|
|
QualType RetTy = FI.getReturnType();
|
2008-09-10 08:32:18 +08:00
|
|
|
unsigned Index = 1;
|
2009-02-03 13:59:18 +08:00
|
|
|
const ABIArgInfo &RetAI = FI.getReturnInfo();
|
2008-09-10 12:01:49 +08:00
|
|
|
switch (RetAI.getKind()) {
|
2009-06-06 17:36:29 +08:00
|
|
|
case ABIArgInfo::Extend:
|
|
|
|
if (RetTy->isSignedIntegerType()) {
|
|
|
|
RetAttrs |= llvm::Attribute::SExt;
|
|
|
|
} else if (RetTy->isUnsignedIntegerType()) {
|
|
|
|
RetAttrs |= llvm::Attribute::ZExt;
|
|
|
|
}
|
|
|
|
// FALLTHROUGH
|
2009-02-03 14:17:37 +08:00
|
|
|
case ABIArgInfo::Direct:
|
2008-09-10 10:41:04 +08:00
|
|
|
break;
|
|
|
|
|
2009-02-05 16:00:50 +08:00
|
|
|
case ABIArgInfo::Indirect:
|
2009-09-09 23:08:12 +08:00
|
|
|
PAL.push_back(llvm::AttributeWithIndex::get(Index,
|
2010-04-20 13:44:43 +08:00
|
|
|
llvm::Attribute::StructRet));
|
2008-09-10 08:32:18 +08:00
|
|
|
++Index;
|
2009-03-19 03:51:01 +08:00
|
|
|
// sret disables readnone and readonly
|
|
|
|
FuncAttrs &= ~(llvm::Attribute::ReadOnly |
|
|
|
|
llvm::Attribute::ReadNone);
|
2008-09-10 10:41:04 +08:00
|
|
|
break;
|
|
|
|
|
2009-01-27 05:26:08 +08:00
|
|
|
case ABIArgInfo::Ignore:
|
2008-09-10 10:41:04 +08:00
|
|
|
case ABIArgInfo::Coerce:
|
|
|
|
break;
|
2008-09-11 09:48:57 +08:00
|
|
|
|
|
|
|
case ABIArgInfo::Expand:
|
2009-09-09 23:08:12 +08:00
|
|
|
assert(0 && "Invalid ABI kind for return argument");
|
2008-09-10 08:32:18 +08:00
|
|
|
}
|
2008-09-10 10:41:04 +08:00
|
|
|
|
2008-09-27 06:53:57 +08:00
|
|
|
if (RetAttrs)
|
|
|
|
PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
|
2009-04-04 08:49:24 +08:00
|
|
|
|
|
|
|
// FIXME: we need to honour command line settings also...
|
|
|
|
// FIXME: RegParm should be reduced in case of nested functions and/or global
|
|
|
|
// register variable.
|
2010-03-31 06:15:11 +08:00
|
|
|
signed RegParm = FI.getRegParm();
|
2009-04-04 08:49:24 +08:00
|
|
|
|
|
|
|
unsigned PointerWidth = getContext().Target.getPointerWidth(0);
|
2009-09-09 23:08:12 +08:00
|
|
|
for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
|
2009-02-03 13:31:23 +08:00
|
|
|
ie = FI.arg_end(); it != ie; ++it) {
|
|
|
|
QualType ParamType = it->type;
|
|
|
|
const ABIArgInfo &AI = it->info;
|
2008-09-26 05:02:23 +08:00
|
|
|
unsigned Attributes = 0;
|
2009-04-04 08:49:24 +08:00
|
|
|
|
2010-03-27 08:47:27 +08:00
|
|
|
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
|
|
|
|
// have the corresponding parameter variable. It doesn't make
|
|
|
|
// sense to do it here because parameters are so fucked up.
|
2009-12-08 02:30:06 +08:00
|
|
|
|
2008-09-11 09:48:57 +08:00
|
|
|
switch (AI.getKind()) {
|
2009-02-04 03:12:28 +08:00
|
|
|
case ABIArgInfo::Coerce:
|
|
|
|
break;
|
|
|
|
|
2009-02-05 16:00:50 +08:00
|
|
|
case ABIArgInfo::Indirect:
|
2009-09-16 23:53:40 +08:00
|
|
|
if (AI.getIndirectByVal())
|
|
|
|
Attributes |= llvm::Attribute::ByVal;
|
|
|
|
|
2009-04-04 08:49:24 +08:00
|
|
|
Attributes |=
|
2009-02-05 16:00:50 +08:00
|
|
|
llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
|
2009-03-19 03:51:01 +08:00
|
|
|
// byval disables readnone and readonly.
|
|
|
|
FuncAttrs &= ~(llvm::Attribute::ReadOnly |
|
|
|
|
llvm::Attribute::ReadNone);
|
2008-09-11 09:48:57 +08:00
|
|
|
break;
|
2009-06-06 17:36:29 +08:00
|
|
|
|
|
|
|
case ABIArgInfo::Extend:
|
|
|
|
if (ParamType->isSignedIntegerType()) {
|
|
|
|
Attributes |= llvm::Attribute::SExt;
|
|
|
|
} else if (ParamType->isUnsignedIntegerType()) {
|
|
|
|
Attributes |= llvm::Attribute::ZExt;
|
|
|
|
}
|
|
|
|
// FALLS THROUGH
|
2009-02-03 14:17:37 +08:00
|
|
|
case ABIArgInfo::Direct:
|
2009-04-04 08:49:24 +08:00
|
|
|
if (RegParm > 0 &&
|
|
|
|
(ParamType->isIntegerType() || ParamType->isPointerType())) {
|
|
|
|
RegParm -=
|
|
|
|
(Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
|
|
|
|
if (RegParm >= 0)
|
|
|
|
Attributes |= llvm::Attribute::InReg;
|
|
|
|
}
|
|
|
|
// FIXME: handle sseregparm someday...
|
2008-09-11 09:48:57 +08:00
|
|
|
break;
|
2009-04-04 08:49:24 +08:00
|
|
|
|
2009-01-27 05:26:08 +08:00
|
|
|
case ABIArgInfo::Ignore:
|
|
|
|
// Skip increment, no matching LLVM parameter.
|
2009-09-09 23:08:12 +08:00
|
|
|
continue;
|
2009-01-27 05:26:08 +08:00
|
|
|
|
2008-09-17 08:51:38 +08:00
|
|
|
case ABIArgInfo::Expand: {
|
2009-09-09 23:08:12 +08:00
|
|
|
std::vector<const llvm::Type*> Tys;
|
2009-05-16 15:57:57 +08:00
|
|
|
// FIXME: This is rather inefficient. Do we ever actually need to do
|
|
|
|
// anything here? The result should be just reconstructed on the other
|
|
|
|
// side, so extension should be a non-issue.
|
2008-09-17 08:51:38 +08:00
|
|
|
getTypes().GetExpandedTypes(ParamType, Tys);
|
|
|
|
Index += Tys.size();
|
|
|
|
continue;
|
|
|
|
}
|
2008-09-10 08:32:18 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-09-26 05:02:23 +08:00
|
|
|
if (Attributes)
|
|
|
|
PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
|
2008-09-17 08:51:38 +08:00
|
|
|
++Index;
|
2008-09-10 08:32:18 +08:00
|
|
|
}
|
2008-09-27 06:53:57 +08:00
|
|
|
if (FuncAttrs)
|
|
|
|
PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
|
2008-09-10 08:32:18 +08:00
|
|
|
}
|
|
|
|
|
2009-02-03 06:03:45 +08:00
|
|
|
void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
|
|
|
|
llvm::Function *Fn,
|
2008-09-10 07:27:19 +08:00
|
|
|
const FunctionArgList &Args) {
|
2009-07-28 09:00:58 +08:00
|
|
|
// If this is an implicit-return-zero function, go ahead and
|
|
|
|
// initialize the return value. TODO: it might be nice to have
|
|
|
|
// a more general mechanism for this that didn't require synthesized
|
|
|
|
// return statements.
|
2009-08-09 07:24:23 +08:00
|
|
|
if (const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
|
2009-07-28 09:00:58 +08:00
|
|
|
if (FD->hasImplicitReturnZero()) {
|
|
|
|
QualType RetTy = FD->getResultType().getUnqualifiedType();
|
|
|
|
const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
|
2009-08-01 04:28:54 +08:00
|
|
|
llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
|
2009-07-28 09:00:58 +08:00
|
|
|
Builder.CreateStore(Zero, ReturnValue);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-16 15:57:57 +08:00
|
|
|
// FIXME: We no longer need the types from FunctionArgList; lift up and
|
|
|
|
// simplify.
|
2009-02-03 14:02:10 +08:00
|
|
|
|
2008-09-10 07:27:19 +08:00
|
|
|
// Emit allocs for param decls. Give the LLVM Argument nodes names.
|
|
|
|
llvm::Function::arg_iterator AI = Fn->arg_begin();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-09-10 07:27:19 +08:00
|
|
|
// Name the struct return argument.
|
2009-02-03 06:03:45 +08:00
|
|
|
if (CGM.ReturnTypeUsesSret(FI)) {
|
2008-09-10 07:27:19 +08:00
|
|
|
AI->setName("agg.result");
|
|
|
|
++AI;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-05 05:17:21 +08:00
|
|
|
assert(FI.arg_size() == Args.size() &&
|
|
|
|
"Mismatch between function signature & arguments.");
|
2009-02-03 13:59:18 +08:00
|
|
|
CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
|
2008-09-10 07:27:19 +08:00
|
|
|
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
|
2009-02-03 13:59:18 +08:00
|
|
|
i != e; ++i, ++info_it) {
|
2008-09-10 07:27:19 +08:00
|
|
|
const VarDecl *Arg = i->first;
|
2009-02-03 13:59:18 +08:00
|
|
|
QualType Ty = info_it->type;
|
|
|
|
const ABIArgInfo &ArgI = info_it->info;
|
2008-09-11 09:48:57 +08:00
|
|
|
|
|
|
|
switch (ArgI.getKind()) {
|
2009-02-05 17:16:39 +08:00
|
|
|
case ABIArgInfo::Indirect: {
|
|
|
|
llvm::Value* V = AI;
|
|
|
|
if (hasAggregateLLVMType(Ty)) {
|
|
|
|
// Do nothing, aggregates and complex variables are accessed by
|
|
|
|
// reference.
|
|
|
|
} else {
|
|
|
|
// Load scalar value from indirect argument.
|
2009-02-10 09:51:39 +08:00
|
|
|
V = EmitLoadOfScalar(V, false, Ty);
|
2009-02-05 17:16:39 +08:00
|
|
|
if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
|
|
|
|
// This must be a promotion, for something like
|
|
|
|
// "void a(x) short x; {..."
|
|
|
|
V = EmitScalarConversion(V, Ty, Arg->getType());
|
|
|
|
}
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
EmitParmDecl(*Arg, V);
|
2009-02-05 17:16:39 +08:00
|
|
|
break;
|
|
|
|
}
|
2009-06-06 17:36:29 +08:00
|
|
|
|
|
|
|
case ABIArgInfo::Extend:
|
2009-02-03 14:17:37 +08:00
|
|
|
case ABIArgInfo::Direct: {
|
2008-09-11 09:48:57 +08:00
|
|
|
assert(AI != Fn->arg_end() && "Argument mismatch!");
|
|
|
|
llvm::Value* V = AI;
|
2009-02-05 19:13:54 +08:00
|
|
|
if (hasAggregateLLVMType(Ty)) {
|
|
|
|
// Create a temporary alloca to hold the argument; the rest of
|
|
|
|
// codegen expects to access aggregates & complex values by
|
|
|
|
// reference.
|
2010-02-09 10:48:28 +08:00
|
|
|
V = CreateMemTemp(Ty);
|
2009-02-05 19:13:54 +08:00
|
|
|
Builder.CreateStore(AI, V);
|
|
|
|
} else {
|
2010-03-27 08:47:27 +08:00
|
|
|
if (Arg->getType().isRestrictQualified())
|
|
|
|
AI->addAttr(llvm::Attribute::NoAlias);
|
|
|
|
|
2009-02-05 19:13:54 +08:00
|
|
|
if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
|
|
|
|
// This must be a promotion, for something like
|
|
|
|
// "void a(x) short x; {..."
|
|
|
|
V = EmitScalarConversion(V, Ty, Arg->getType());
|
|
|
|
}
|
2008-09-10 07:27:19 +08:00
|
|
|
}
|
2008-09-11 09:48:57 +08:00
|
|
|
EmitParmDecl(*Arg, V);
|
|
|
|
break;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-09-17 08:51:38 +08:00
|
|
|
case ABIArgInfo::Expand: {
|
2009-02-03 13:59:18 +08:00
|
|
|
// If this structure was expanded into multiple arguments then
|
2008-09-17 08:51:38 +08:00
|
|
|
// we need to create a temporary and reconstruct it from the
|
|
|
|
// arguments.
|
2010-02-09 10:48:28 +08:00
|
|
|
llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
|
2008-09-17 08:51:38 +08:00
|
|
|
// FIXME: What are the right qualifiers here?
|
2009-09-09 23:08:12 +08:00
|
|
|
llvm::Function::arg_iterator End =
|
2009-09-25 03:53:00 +08:00
|
|
|
ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI);
|
2008-09-17 08:51:38 +08:00
|
|
|
EmitParmDecl(*Arg, Temp);
|
|
|
|
|
|
|
|
// Name the arguments used in expansion and increment AI.
|
|
|
|
unsigned Index = 0;
|
|
|
|
for (; AI != End; ++AI, ++Index)
|
2009-10-19 09:21:05 +08:00
|
|
|
AI->setName(Arg->getName() + "." + llvm::Twine(Index));
|
2008-09-17 08:51:38 +08:00
|
|
|
continue;
|
|
|
|
}
|
2009-01-27 05:26:08 +08:00
|
|
|
|
|
|
|
case ABIArgInfo::Ignore:
|
2009-02-10 08:06:49 +08:00
|
|
|
// Initialize the local variable appropriately.
|
2009-09-09 23:08:12 +08:00
|
|
|
if (hasAggregateLLVMType(Ty)) {
|
2010-02-09 10:48:28 +08:00
|
|
|
EmitParmDecl(*Arg, CreateMemTemp(Ty));
|
2009-02-10 08:06:49 +08:00
|
|
|
} else {
|
|
|
|
EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 04:00:13 +08:00
|
|
|
// Skip increment, no matching LLVM parameter.
|
2009-09-09 23:08:12 +08:00
|
|
|
continue;
|
2009-01-27 05:26:08 +08:00
|
|
|
|
2009-02-04 03:12:28 +08:00
|
|
|
case ABIArgInfo::Coerce: {
|
|
|
|
assert(AI != Fn->arg_end() && "Argument mismatch!");
|
2009-05-16 15:57:57 +08:00
|
|
|
// FIXME: This is very wasteful; EmitParmDecl is just going to drop the
|
|
|
|
// result in a new alloca anyway, so we could just store into that
|
|
|
|
// directly if we broke the abstraction down more.
|
2010-02-09 10:48:28 +08:00
|
|
|
llvm::Value *V = CreateMemTemp(Ty, "coerce");
|
2009-12-25 04:40:36 +08:00
|
|
|
CreateCoercedStore(AI, V, /*DestIsVolatile=*/false, *this);
|
2009-02-04 03:12:28 +08:00
|
|
|
// Match to what EmitParmDecl is expecting for this type.
|
2009-02-04 15:22:24 +08:00
|
|
|
if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
|
2009-02-10 09:51:39 +08:00
|
|
|
V = EmitLoadOfScalar(V, false, Ty);
|
2009-02-04 15:22:24 +08:00
|
|
|
if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
|
|
|
|
// This must be a promotion, for something like
|
|
|
|
// "void a(x) short x; {..."
|
|
|
|
V = EmitScalarConversion(V, Ty, Arg->getType());
|
|
|
|
}
|
|
|
|
}
|
2009-02-04 03:12:28 +08:00
|
|
|
EmitParmDecl(*Arg, V);
|
|
|
|
break;
|
|
|
|
}
|
2008-09-11 09:48:57 +08:00
|
|
|
}
|
2008-09-17 08:51:38 +08:00
|
|
|
|
|
|
|
++AI;
|
2008-09-10 07:27:19 +08:00
|
|
|
}
|
|
|
|
assert(AI == Fn->arg_end() && "Argument mismatch!");
|
|
|
|
}
|
|
|
|
|
Change IR generation for return (in the simple case) to avoid doing silly
load/store nonsense in the epilog. For example, for:
int foo(int X) {
int A[100];
return A[X];
}
we used to generate:
%arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i64 %idxprom ; <i32*> [#uses=1]
%tmp1 = load i32* %arrayidx ; <i32> [#uses=1]
store i32 %tmp1, i32* %retval
%0 = load i32* %retval ; <i32> [#uses=1]
ret i32 %0
}
which codegen'd to this code:
_foo: ## @foo
## BB#0: ## %entry
subq $408, %rsp ## imm = 0x198
movl %edi, 400(%rsp)
movl 400(%rsp), %edi
movslq %edi, %rax
movl (%rsp,%rax,4), %edi
movl %edi, 404(%rsp)
movl 404(%rsp), %eax
addq $408, %rsp ## imm = 0x198
ret
Now we generate:
%arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i64 %idxprom ; <i32*> [#uses=1]
%tmp1 = load i32* %arrayidx ; <i32> [#uses=1]
ret i32 %tmp1
}
and:
_foo: ## @foo
## BB#0: ## %entry
subq $408, %rsp ## imm = 0x198
movl %edi, 404(%rsp)
movl 404(%rsp), %edi
movslq %edi, %rax
movl (%rsp,%rax,4), %eax
addq $408, %rsp ## imm = 0x198
ret
This actually does matter, cutting out 2000 lines of IR from CGStmt.ll
for example.
Another interesting effect is that altivec.h functions which are dead
now get dce'd by the inliner. Hence all the changes to
builtins-ppc-altivec.c to ensure the calls aren't dead.
llvm-svn: 106970
2010-06-27 09:06:27 +08:00
|
|
|
void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
|
2008-09-10 10:41:04 +08:00
|
|
|
// Functions with no result always return void.
|
2010-06-27 07:13:19 +08:00
|
|
|
if (ReturnValue == 0) {
|
|
|
|
Builder.CreateRetVoid();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *RV = 0;
|
|
|
|
QualType RetTy = FI.getReturnType();
|
|
|
|
const ABIArgInfo &RetAI = FI.getReturnInfo();
|
2009-06-06 17:36:29 +08:00
|
|
|
|
2010-06-27 07:13:19 +08:00
|
|
|
switch (RetAI.getKind()) {
|
|
|
|
case ABIArgInfo::Indirect:
|
|
|
|
if (RetTy->isAnyComplexType()) {
|
|
|
|
ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
|
|
|
|
StoreComplexToAddr(RT, CurFn->arg_begin(), false);
|
|
|
|
} else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
|
|
|
|
// Do nothing; aggregrates get evaluated directly into the destination.
|
|
|
|
} else {
|
|
|
|
EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
|
|
|
|
false, RetTy);
|
|
|
|
}
|
|
|
|
break;
|
2008-09-11 09:48:57 +08:00
|
|
|
|
2010-06-27 07:13:19 +08:00
|
|
|
case ABIArgInfo::Extend:
|
Change IR generation for return (in the simple case) to avoid doing silly
load/store nonsense in the epilog. For example, for:
int foo(int X) {
int A[100];
return A[X];
}
we used to generate:
%arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i64 %idxprom ; <i32*> [#uses=1]
%tmp1 = load i32* %arrayidx ; <i32> [#uses=1]
store i32 %tmp1, i32* %retval
%0 = load i32* %retval ; <i32> [#uses=1]
ret i32 %0
}
which codegen'd to this code:
_foo: ## @foo
## BB#0: ## %entry
subq $408, %rsp ## imm = 0x198
movl %edi, 400(%rsp)
movl 400(%rsp), %edi
movslq %edi, %rax
movl (%rsp,%rax,4), %edi
movl %edi, 404(%rsp)
movl 404(%rsp), %eax
addq $408, %rsp ## imm = 0x198
ret
Now we generate:
%arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i64 %idxprom ; <i32*> [#uses=1]
%tmp1 = load i32* %arrayidx ; <i32> [#uses=1]
ret i32 %tmp1
}
and:
_foo: ## @foo
## BB#0: ## %entry
subq $408, %rsp ## imm = 0x198
movl %edi, 404(%rsp)
movl 404(%rsp), %edi
movslq %edi, %rax
movl (%rsp,%rax,4), %eax
addq $408, %rsp ## imm = 0x198
ret
This actually does matter, cutting out 2000 lines of IR from CGStmt.ll
for example.
Another interesting effect is that altivec.h functions which are dead
now get dce'd by the inliner. Hence all the changes to
builtins-ppc-altivec.c to ensure the calls aren't dead.
llvm-svn: 106970
2010-06-27 09:06:27 +08:00
|
|
|
case ABIArgInfo::Direct: {
|
|
|
|
// The internal return value temp always will have pointer-to-return-type
|
|
|
|
// type, just do a load.
|
|
|
|
|
|
|
|
// If the instruction right before the insertion point is a store to the
|
|
|
|
// return value, we can elide the load, zap the store, and usually zap the
|
|
|
|
// alloca.
|
|
|
|
llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
|
|
|
|
llvm::StoreInst *SI = 0;
|
|
|
|
if (InsertBB->empty() ||
|
|
|
|
!(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
|
|
|
|
SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
|
|
|
|
RV = Builder.CreateLoad(ReturnValue);
|
|
|
|
} else {
|
|
|
|
// Get the stored value and nuke the now-dead store.
|
|
|
|
RV = SI->getValueOperand();
|
|
|
|
SI->eraseFromParent();
|
|
|
|
|
|
|
|
// If that was the only use of the return value, nuke it as well now.
|
|
|
|
if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
|
|
|
|
cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
|
|
|
|
ReturnValue = 0;
|
|
|
|
}
|
|
|
|
}
|
2010-06-27 07:13:19 +08:00
|
|
|
break;
|
Change IR generation for return (in the simple case) to avoid doing silly
load/store nonsense in the epilog. For example, for:
int foo(int X) {
int A[100];
return A[X];
}
we used to generate:
%arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i64 %idxprom ; <i32*> [#uses=1]
%tmp1 = load i32* %arrayidx ; <i32> [#uses=1]
store i32 %tmp1, i32* %retval
%0 = load i32* %retval ; <i32> [#uses=1]
ret i32 %0
}
which codegen'd to this code:
_foo: ## @foo
## BB#0: ## %entry
subq $408, %rsp ## imm = 0x198
movl %edi, 400(%rsp)
movl 400(%rsp), %edi
movslq %edi, %rax
movl (%rsp,%rax,4), %edi
movl %edi, 404(%rsp)
movl 404(%rsp), %eax
addq $408, %rsp ## imm = 0x198
ret
Now we generate:
%arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i64 %idxprom ; <i32*> [#uses=1]
%tmp1 = load i32* %arrayidx ; <i32> [#uses=1]
ret i32 %tmp1
}
and:
_foo: ## @foo
## BB#0: ## %entry
subq $408, %rsp ## imm = 0x198
movl %edi, 404(%rsp)
movl 404(%rsp), %edi
movslq %edi, %rax
movl (%rsp,%rax,4), %eax
addq $408, %rsp ## imm = 0x198
ret
This actually does matter, cutting out 2000 lines of IR from CGStmt.ll
for example.
Another interesting effect is that altivec.h functions which are dead
now get dce'd by the inliner. Hence all the changes to
builtins-ppc-altivec.c to ensure the calls aren't dead.
llvm-svn: 106970
2010-06-27 09:06:27 +08:00
|
|
|
}
|
2010-06-27 07:13:19 +08:00
|
|
|
case ABIArgInfo::Ignore:
|
|
|
|
break;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-06-27 07:13:19 +08:00
|
|
|
case ABIArgInfo::Coerce:
|
|
|
|
RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
|
|
|
|
break;
|
2008-09-11 09:48:57 +08:00
|
|
|
|
2010-06-27 07:13:19 +08:00
|
|
|
case ABIArgInfo::Expand:
|
|
|
|
assert(0 && "Invalid ABI kind for return argument");
|
2008-09-10 07:27:19 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-06-27 07:13:19 +08:00
|
|
|
if (RV)
|
2008-09-10 10:41:04 +08:00
|
|
|
Builder.CreateRet(RV);
|
2010-06-27 07:13:19 +08:00
|
|
|
else
|
2008-09-10 10:41:04 +08:00
|
|
|
Builder.CreateRetVoid();
|
2008-09-10 07:27:19 +08:00
|
|
|
}
|
|
|
|
|
2010-05-27 06:34:26 +08:00
|
|
|
RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
|
|
|
|
// StartFunction converted the ABI-lowered parameter(s) into a
|
|
|
|
// local alloca. We need to turn that into an r-value suitable
|
|
|
|
// for EmitCall.
|
|
|
|
llvm::Value *Local = GetAddrOfLocalVar(Param);
|
|
|
|
|
|
|
|
QualType ArgType = Param->getType();
|
|
|
|
|
|
|
|
// For the most part, we just need to load the alloca, except:
|
|
|
|
// 1) aggregate r-values are actually pointers to temporaries, and
|
|
|
|
// 2) references to aggregates are pointers directly to the aggregate.
|
|
|
|
// I don't know why references to non-aggregates are different here.
|
|
|
|
if (const ReferenceType *RefType = ArgType->getAs<ReferenceType>()) {
|
|
|
|
if (hasAggregateLLVMType(RefType->getPointeeType()))
|
|
|
|
return RValue::getAggregate(Local);
|
|
|
|
|
|
|
|
// Locals which are references to scalars are represented
|
|
|
|
// with allocas holding the pointer.
|
|
|
|
return RValue::get(Builder.CreateLoad(Local));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ArgType->isAnyComplexType())
|
|
|
|
return RValue::getComplex(LoadComplexFromAddr(Local, /*volatile*/ false));
|
|
|
|
|
|
|
|
if (hasAggregateLLVMType(ArgType))
|
|
|
|
return RValue::getAggregate(Local);
|
|
|
|
|
|
|
|
return RValue::get(EmitLoadOfScalar(Local, false, ArgType));
|
|
|
|
}
|
|
|
|
|
2009-04-09 04:47:54 +08:00
|
|
|
RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
|
2009-05-20 08:24:07 +08:00
|
|
|
if (ArgType->isReferenceType())
|
2010-06-27 00:35:32 +08:00
|
|
|
return EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-04-09 04:47:54 +08:00
|
|
|
return EmitAnyExprToTemp(E);
|
|
|
|
}
|
|
|
|
|
2009-02-03 06:03:45 +08:00
|
|
|
RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
|
2009-09-09 23:08:12 +08:00
|
|
|
llvm::Value *Callee,
|
2009-12-25 03:25:24 +08:00
|
|
|
ReturnValueSlot ReturnValue,
|
2009-02-21 02:06:48 +08:00
|
|
|
const CallArgList &CallArgs,
|
2010-05-01 19:15:56 +08:00
|
|
|
const Decl *TargetDecl,
|
2010-05-02 21:41:58 +08:00
|
|
|
llvm::Instruction **callOrInvoke) {
|
2009-05-16 15:57:57 +08:00
|
|
|
// FIXME: We no longer need the types from CallArgs; lift up and simplify.
|
2008-09-10 07:27:19 +08:00
|
|
|
llvm::SmallVector<llvm::Value*, 16> Args;
|
|
|
|
|
|
|
|
// Handle struct-return functions by passing a pointer to the
|
|
|
|
// location that we would like to return into.
|
2009-02-03 05:43:58 +08:00
|
|
|
QualType RetTy = CallInfo.getReturnType();
|
2009-02-03 13:59:18 +08:00
|
|
|
const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
|
|
|
|
2009-06-13 08:26:38 +08:00
|
|
|
// If the call returns a temporary with struct return, create a temporary
|
2009-12-25 04:40:36 +08:00
|
|
|
// alloca to hold the result, unless one is given to us.
|
|
|
|
if (CGM.ReturnTypeUsesSret(CallInfo)) {
|
|
|
|
llvm::Value *Value = ReturnValue.getValue();
|
|
|
|
if (!Value)
|
2010-02-09 10:48:28 +08:00
|
|
|
Value = CreateMemTemp(RetTy);
|
2009-12-25 04:40:36 +08:00
|
|
|
Args.push_back(Value);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-05 05:17:21 +08:00
|
|
|
assert(CallInfo.arg_size() == CallArgs.size() &&
|
|
|
|
"Mismatch between function signature & arguments.");
|
2009-02-03 13:59:18 +08:00
|
|
|
CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
|
2009-09-09 23:08:12 +08:00
|
|
|
for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
|
2009-02-03 13:59:18 +08:00
|
|
|
I != E; ++I, ++info_it) {
|
|
|
|
const ABIArgInfo &ArgInfo = info_it->info;
|
2008-09-10 07:27:19 +08:00
|
|
|
RValue RV = I->first;
|
2008-09-17 08:51:38 +08:00
|
|
|
|
|
|
|
switch (ArgInfo.getKind()) {
|
2009-02-05 16:00:50 +08:00
|
|
|
case ABIArgInfo::Indirect:
|
2009-02-05 17:16:39 +08:00
|
|
|
if (RV.isScalar() || RV.isComplex()) {
|
|
|
|
// Make a temporary alloca to pass the argument.
|
2010-02-09 10:48:28 +08:00
|
|
|
Args.push_back(CreateMemTemp(I->second));
|
2009-02-05 17:16:39 +08:00
|
|
|
if (RV.isScalar())
|
2009-05-20 02:50:41 +08:00
|
|
|
EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
|
2009-02-05 17:16:39 +08:00
|
|
|
else
|
2009-09-09 23:08:12 +08:00
|
|
|
StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
|
2009-02-05 17:16:39 +08:00
|
|
|
} else {
|
|
|
|
Args.push_back(RV.getAggregateAddr());
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2009-06-06 17:36:29 +08:00
|
|
|
case ABIArgInfo::Extend:
|
2009-02-03 14:17:37 +08:00
|
|
|
case ABIArgInfo::Direct:
|
2008-09-17 08:51:38 +08:00
|
|
|
if (RV.isScalar()) {
|
|
|
|
Args.push_back(RV.getScalarVal());
|
|
|
|
} else if (RV.isComplex()) {
|
2009-02-05 19:13:54 +08:00
|
|
|
llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
|
|
|
|
Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
|
|
|
|
Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
|
|
|
|
Args.push_back(Tmp);
|
2008-09-17 08:51:38 +08:00
|
|
|
} else {
|
2009-02-05 19:13:54 +08:00
|
|
|
Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
|
2008-09-17 08:51:38 +08:00
|
|
|
}
|
|
|
|
break;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-27 05:26:08 +08:00
|
|
|
case ABIArgInfo::Ignore:
|
|
|
|
break;
|
|
|
|
|
2009-02-04 03:12:28 +08:00
|
|
|
case ABIArgInfo::Coerce: {
|
|
|
|
// FIXME: Avoid the conversion through memory if possible.
|
|
|
|
llvm::Value *SrcPtr;
|
|
|
|
if (RV.isScalar()) {
|
2010-02-09 10:48:28 +08:00
|
|
|
SrcPtr = CreateMemTemp(I->second, "coerce");
|
2009-05-20 02:50:41 +08:00
|
|
|
EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
|
2009-02-04 03:12:28 +08:00
|
|
|
} else if (RV.isComplex()) {
|
2010-02-09 10:48:28 +08:00
|
|
|
SrcPtr = CreateMemTemp(I->second, "coerce");
|
2009-02-04 03:12:28 +08:00
|
|
|
StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
|
2009-09-09 23:08:12 +08:00
|
|
|
} else
|
2009-02-04 03:12:28 +08:00
|
|
|
SrcPtr = RV.getAggregateAddr();
|
2009-09-09 23:08:12 +08:00
|
|
|
Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
|
2009-02-04 03:12:28 +08:00
|
|
|
*this));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2008-09-17 08:51:38 +08:00
|
|
|
case ABIArgInfo::Expand:
|
|
|
|
ExpandTypeToArgs(I->second, RV, Args);
|
|
|
|
break;
|
2008-09-10 07:27:19 +08:00
|
|
|
}
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-06-13 08:26:38 +08:00
|
|
|
// If the callee is a bitcast of a function to a varargs pointer to function
|
|
|
|
// type, check to see if we can remove the bitcast. This handles some cases
|
|
|
|
// with unprototyped functions.
|
|
|
|
if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
|
|
|
|
if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
|
|
|
|
const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
|
|
|
|
const llvm::FunctionType *CurFT =
|
|
|
|
cast<llvm::FunctionType>(CurPT->getElementType());
|
|
|
|
const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-06-13 08:26:38 +08:00
|
|
|
if (CE->getOpcode() == llvm::Instruction::BitCast &&
|
|
|
|
ActualFT->getReturnType() == CurFT->getReturnType() &&
|
2009-06-23 09:38:41 +08:00
|
|
|
ActualFT->getNumParams() == CurFT->getNumParams() &&
|
|
|
|
ActualFT->getNumParams() == Args.size()) {
|
2009-06-13 08:26:38 +08:00
|
|
|
bool ArgsMatch = true;
|
|
|
|
for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
|
|
|
|
if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
|
|
|
|
ArgsMatch = false;
|
|
|
|
break;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-06-13 08:26:38 +08:00
|
|
|
// Strip the cast if we can get away with it. This is a nice cleanup,
|
|
|
|
// but also allows us to inline the function at -O0 if it is marked
|
|
|
|
// always_inline.
|
|
|
|
if (ArgsMatch)
|
|
|
|
Callee = CalleeF;
|
|
|
|
}
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-09-10 07:27:19 +08:00
|
|
|
|
2009-02-24 01:26:39 +08:00
|
|
|
llvm::BasicBlock *InvokeDest = getInvokeDest();
|
2009-09-12 08:59:20 +08:00
|
|
|
unsigned CallingConv;
|
2008-09-26 05:02:23 +08:00
|
|
|
CodeGen::AttributeListType AttributeList;
|
2009-09-12 08:59:20 +08:00
|
|
|
CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
|
2009-02-24 01:26:39 +08:00
|
|
|
llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
|
|
|
|
AttributeList.end());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-03-02 12:32:35 +08:00
|
|
|
llvm::CallSite CS;
|
|
|
|
if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
|
2009-05-21 17:52:38 +08:00
|
|
|
CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
|
2009-02-24 01:26:39 +08:00
|
|
|
} else {
|
|
|
|
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
|
2009-09-09 23:08:12 +08:00
|
|
|
CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
|
2009-05-21 17:52:38 +08:00
|
|
|
Args.data(), Args.data()+Args.size());
|
2009-02-24 01:26:39 +08:00
|
|
|
EmitBlock(Cont);
|
2009-02-21 02:54:31 +08:00
|
|
|
}
|
2010-05-02 21:41:58 +08:00
|
|
|
if (callOrInvoke) {
|
|
|
|
*callOrInvoke = CS.getInstruction();
|
2010-05-01 19:15:56 +08:00
|
|
|
}
|
2009-02-21 02:54:31 +08:00
|
|
|
|
2009-03-02 12:32:35 +08:00
|
|
|
CS.setAttributes(Attrs);
|
2009-09-12 08:59:20 +08:00
|
|
|
CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
|
2009-03-02 12:32:35 +08:00
|
|
|
|
|
|
|
// If the call doesn't return, finish the basic block and clear the
|
|
|
|
// insertion point; this allows the rest of IRgen to discard
|
|
|
|
// unreachable code.
|
|
|
|
if (CS.doesNotReturn()) {
|
|
|
|
Builder.CreateUnreachable();
|
|
|
|
Builder.ClearInsertionPoint();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-16 15:57:57 +08:00
|
|
|
// FIXME: For now, emit a dummy basic block because expr emitters in
|
|
|
|
// generally are not ready to handle emitting expressions at unreachable
|
|
|
|
// points.
|
2009-03-02 12:32:35 +08:00
|
|
|
EnsureInsertPoint();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-03-02 12:32:35 +08:00
|
|
|
// Return a reasonable RValue.
|
|
|
|
return GetUndefRValue(RetTy);
|
2009-09-09 23:08:12 +08:00
|
|
|
}
|
2009-03-02 12:32:35 +08:00
|
|
|
|
|
|
|
llvm::Instruction *CI = CS.getInstruction();
|
2009-10-05 21:47:21 +08:00
|
|
|
if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
|
2008-09-10 07:27:19 +08:00
|
|
|
CI->setName("call");
|
2008-09-10 10:41:04 +08:00
|
|
|
|
|
|
|
switch (RetAI.getKind()) {
|
2009-02-05 16:00:50 +08:00
|
|
|
case ABIArgInfo::Indirect:
|
2008-09-10 10:41:04 +08:00
|
|
|
if (RetTy->isAnyComplexType())
|
2008-09-17 08:51:38 +08:00
|
|
|
return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
|
2009-03-22 08:32:22 +08:00
|
|
|
if (CodeGenFunction::hasAggregateLLVMType(RetTy))
|
2008-09-17 08:51:38 +08:00
|
|
|
return RValue::getAggregate(Args[0]);
|
2009-03-22 08:32:22 +08:00
|
|
|
return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
|
2008-09-11 09:48:57 +08:00
|
|
|
|
2009-06-06 17:36:29 +08:00
|
|
|
case ABIArgInfo::Extend:
|
2009-02-03 14:17:37 +08:00
|
|
|
case ABIArgInfo::Direct:
|
2009-02-05 19:13:54 +08:00
|
|
|
if (RetTy->isAnyComplexType()) {
|
|
|
|
llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
|
|
|
|
llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
|
|
|
|
return RValue::getComplex(std::make_pair(Real, Imag));
|
2009-03-22 08:32:22 +08:00
|
|
|
}
|
|
|
|
if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
|
2009-12-25 04:40:36 +08:00
|
|
|
llvm::Value *DestPtr = ReturnValue.getValue();
|
|
|
|
bool DestIsVolatile = ReturnValue.isVolatile();
|
|
|
|
|
|
|
|
if (!DestPtr) {
|
2010-02-09 10:48:28 +08:00
|
|
|
DestPtr = CreateMemTemp(RetTy, "agg.tmp");
|
2009-12-25 04:40:36 +08:00
|
|
|
DestIsVolatile = false;
|
|
|
|
}
|
|
|
|
Builder.CreateStore(CI, DestPtr, DestIsVolatile);
|
|
|
|
return RValue::getAggregate(DestPtr);
|
2009-03-22 08:32:22 +08:00
|
|
|
}
|
|
|
|
return RValue::get(CI);
|
2008-09-10 10:41:04 +08:00
|
|
|
|
2009-01-27 05:26:08 +08:00
|
|
|
case ABIArgInfo::Ignore:
|
2009-02-03 14:30:17 +08:00
|
|
|
// If we are ignoring an argument that had a result, make sure to
|
|
|
|
// construct the appropriate return value for our caller.
|
2009-02-05 15:09:07 +08:00
|
|
|
return GetUndefRValue(RetTy);
|
2009-01-27 05:26:08 +08:00
|
|
|
|
2008-09-10 15:04:09 +08:00
|
|
|
case ABIArgInfo::Coerce: {
|
2009-12-25 04:40:36 +08:00
|
|
|
llvm::Value *DestPtr = ReturnValue.getValue();
|
|
|
|
bool DestIsVolatile = ReturnValue.isVolatile();
|
|
|
|
|
|
|
|
if (!DestPtr) {
|
2010-02-09 10:48:28 +08:00
|
|
|
DestPtr = CreateMemTemp(RetTy, "coerce");
|
2009-12-25 04:40:36 +08:00
|
|
|
DestIsVolatile = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this);
|
2008-11-26 06:21:48 +08:00
|
|
|
if (RetTy->isAnyComplexType())
|
2009-12-25 04:40:36 +08:00
|
|
|
return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
|
2009-03-22 08:32:22 +08:00
|
|
|
if (CodeGenFunction::hasAggregateLLVMType(RetTy))
|
2009-12-25 04:40:36 +08:00
|
|
|
return RValue::getAggregate(DestPtr);
|
|
|
|
return RValue::get(EmitLoadOfScalar(DestPtr, false, RetTy));
|
2008-09-10 15:04:09 +08:00
|
|
|
}
|
2008-09-11 09:48:57 +08:00
|
|
|
|
|
|
|
case ABIArgInfo::Expand:
|
2009-09-09 23:08:12 +08:00
|
|
|
assert(0 && "Invalid ABI kind for return argument");
|
2008-09-10 07:27:19 +08:00
|
|
|
}
|
2008-09-10 10:41:04 +08:00
|
|
|
|
|
|
|
assert(0 && "Unhandled ABIArgInfo::Kind");
|
|
|
|
return RValue::get(0);
|
2008-09-10 07:27:19 +08:00
|
|
|
}
|
2009-02-11 04:44:09 +08:00
|
|
|
|
|
|
|
/* VarArg handling */
|
|
|
|
|
|
|
|
llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
|
|
|
|
return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
|
|
|
|
}
|