forked from OSchip/llvm-project
Fix the required args count for variadic blocks.
We were emitting calls to blocks as if all arguments were required --- i.e. with signature (A,B,C,D,...) rather than (A,B,...). This patch fixes that and accounts for the implicit block-context argument as a required argument. In addition, this patch changes the function type under which we call unprototyped functions on platforms like x86-64 that guarantee compatibility of variadic functions with unprototyped function types; previously we would always call such functions under the LLVM type T (...)*, but now we will call them under the type T (A,B,C,D,...)*. This last change should have no material effect except for making the type conventions more explicit; it was a side-effect of the most convenient implementation. llvm-svn: 169588
This commit is contained in:
parent
3f2f792673
commit
c818bbb8b2
|
@ -905,7 +905,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
|
|||
|
||||
const FunctionType *FuncTy = FnType->castAs<FunctionType>();
|
||||
const CGFunctionInfo &FnInfo =
|
||||
CGM.getTypes().arrangeFreeFunctionCall(Args, FuncTy);
|
||||
CGM.getTypes().arrangeBlockFunctionCall(Args, FuncTy);
|
||||
|
||||
// Cast the function pointer to the right type.
|
||||
llvm::Type *BlockFTy = CGM.getTypes().GetFunctionType(FnInfo);
|
||||
|
|
|
@ -316,6 +316,37 @@ CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
|
|||
return arrangeFunctionDeclaration(FD);
|
||||
}
|
||||
|
||||
/// Arrange a call as unto a free function, except possibly with an
|
||||
/// additional number of formal parameters considered required.
|
||||
static const CGFunctionInfo &
|
||||
arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
|
||||
const CallArgList &args,
|
||||
const FunctionType *fnType,
|
||||
unsigned numExtraRequiredArgs) {
|
||||
assert(args.size() >= numExtraRequiredArgs);
|
||||
|
||||
// In most cases, there are no optional arguments.
|
||||
RequiredArgs required = RequiredArgs::All;
|
||||
|
||||
// If we have a variadic prototype, the required arguments are the
|
||||
// extra prefix plus the arguments in the prototype.
|
||||
if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
|
||||
if (proto->isVariadic())
|
||||
required = RequiredArgs(proto->getNumArgs() + numExtraRequiredArgs);
|
||||
|
||||
// If we don't have a prototype at all, but we're supposed to
|
||||
// explicitly use the variadic convention for unprototyped calls,
|
||||
// treat all of the arguments as required but preserve the nominal
|
||||
// possibility of variadics.
|
||||
} else if (CGT.CGM.getTargetCodeGenInfo()
|
||||
.isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
|
||||
required = RequiredArgs(args.size());
|
||||
}
|
||||
|
||||
return CGT.arrangeFreeFunctionCall(fnType->getResultType(), args,
|
||||
fnType->getExtInfo(), required);
|
||||
}
|
||||
|
||||
/// Figure out the rules for calling a function with the given formal
|
||||
/// type using the given arguments. The arguments are necessary
|
||||
/// because the function might be unprototyped, in which case it's
|
||||
|
@ -323,17 +354,15 @@ CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
|
|||
const CGFunctionInfo &
|
||||
CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
|
||||
const FunctionType *fnType) {
|
||||
RequiredArgs required = RequiredArgs::All;
|
||||
if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
|
||||
if (proto->isVariadic())
|
||||
required = RequiredArgs(proto->getNumArgs());
|
||||
} else if (CGM.getTargetCodeGenInfo()
|
||||
.isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
|
||||
required = RequiredArgs(0);
|
||||
}
|
||||
return arrangeFreeFunctionLikeCall(*this, args, fnType, 0);
|
||||
}
|
||||
|
||||
return arrangeFreeFunctionCall(fnType->getResultType(), args,
|
||||
fnType->getExtInfo(), required);
|
||||
/// A block function call is essentially a free-function call with an
|
||||
/// extra implicit argument.
|
||||
const CGFunctionInfo &
|
||||
CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
|
||||
const FunctionType *fnType) {
|
||||
return arrangeFreeFunctionLikeCall(*this, args, fnType, 1);
|
||||
}
|
||||
|
||||
const CGFunctionInfo &
|
||||
|
@ -865,8 +894,14 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
|
|||
break;
|
||||
}
|
||||
|
||||
for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
|
||||
ie = FI.arg_end(); it != ie; ++it) {
|
||||
// Add in all of the required arguments.
|
||||
CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie;
|
||||
if (FI.isVariadic()) {
|
||||
ie = it + FI.getRequiredArgs().getNumRequiredArgs();
|
||||
} else {
|
||||
ie = FI.arg_end();
|
||||
}
|
||||
for (; it != ie; ++it) {
|
||||
const ABIArgInfo &argAI = it->info;
|
||||
|
||||
// Insert a padding type to ensure proper alignment.
|
||||
|
|
|
@ -134,7 +134,7 @@ namespace CodeGen {
|
|||
}
|
||||
|
||||
bool allowsOptionalArgs() const { return NumRequired != ~0U; }
|
||||
bool getNumRequiredArgs() const {
|
||||
unsigned getNumRequiredArgs() const {
|
||||
assert(allowsOptionalArgs());
|
||||
return NumRequired;
|
||||
}
|
||||
|
|
|
@ -2930,7 +2930,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
|
|||
// through an unprototyped function type works like a *non-variadic*
|
||||
// call. The way we make this work is to cast to the exact type
|
||||
// of the promoted arguments.
|
||||
if (isa<FunctionNoProtoType>(FnType) && !FnInfo.isVariadic()) {
|
||||
if (isa<FunctionNoProtoType>(FnType)) {
|
||||
llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
|
||||
CalleeTy = CalleeTy->getPointerTo();
|
||||
Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
|
||||
|
|
|
@ -58,6 +58,7 @@ namespace CodeGen {
|
|||
/// CodeGenTypes - This class organizes the cross-module state that is used
|
||||
/// while lowering AST types to LLVM types.
|
||||
class CodeGenTypes {
|
||||
public:
|
||||
// Some of this stuff should probably be left on the CGM.
|
||||
ASTContext &Context;
|
||||
const TargetInfo &Target;
|
||||
|
@ -68,6 +69,7 @@ class CodeGenTypes {
|
|||
const CodeGenOptions &CodeGenOpts;
|
||||
CodeGenModule &CGM;
|
||||
|
||||
private:
|
||||
/// The opaque type map for Objective-C interfaces. All direct
|
||||
/// manipulation is done by the runtime interfaces, which are
|
||||
/// responsible for coercing to the appropriate type; these opaque
|
||||
|
@ -195,6 +197,8 @@ public:
|
|||
const CallArgList &args,
|
||||
FunctionType::ExtInfo info,
|
||||
RequiredArgs required);
|
||||
const CGFunctionInfo &arrangeBlockFunctionCall(const CallArgList &args,
|
||||
const FunctionType *type);
|
||||
|
||||
const CGFunctionInfo &arrangeCXXMethodCall(const CallArgList &args,
|
||||
const FunctionProtoType *type,
|
||||
|
|
|
@ -158,10 +158,13 @@ namespace clang {
|
|||
/// - the conventions are substantively different in how they pass
|
||||
/// arguments, because in this case using the variadic convention
|
||||
/// will lead to C99 violations.
|
||||
/// It is not necessarily correct when arguments are passed in the
|
||||
/// same way and some out-of-band information is passed for the
|
||||
/// benefit of variadic callees, as is the case for x86-64.
|
||||
/// In this case the ABI should be consulted.
|
||||
///
|
||||
/// However, some platforms make the conventions identical except
|
||||
/// for passing additional out-of-band information to a variadic
|
||||
/// function: for example, x86-64 passes the number of SSE
|
||||
/// arguments in %al. On these platforms, it is desireable to
|
||||
/// call unprototyped functions using the variadic convention so
|
||||
/// that unprototyped calls to varargs functions still succeed.
|
||||
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args,
|
||||
const FunctionNoProtoType *fnType) const;
|
||||
};
|
||||
|
|
|
@ -374,3 +374,21 @@ T1 test48(void) {
|
|||
// CHECK: memcpy
|
||||
return T1_retval;
|
||||
}
|
||||
|
||||
void test49_helper(double, ...);
|
||||
void test49(double d, double e) {
|
||||
test49_helper(d, e);
|
||||
}
|
||||
// CHECK: define void @test49(
|
||||
// CHECK: [[T0:%.*]] = load double*
|
||||
// CHECK-NEXT: [[T1:%.*]] = load double*
|
||||
// CHECK-NEXT: call void (double, ...)* @test49_helper(double [[T0]], double [[T1]])
|
||||
|
||||
void test50_helper();
|
||||
void test50(double d, double e) {
|
||||
test50_helper(d, e);
|
||||
}
|
||||
// CHECK: define void @test50(
|
||||
// CHECK: [[T0:%.*]] = load double*
|
||||
// CHECK-NEXT: [[T1:%.*]] = load double*
|
||||
// CHECK-NEXT: call void (double, double, ...)* bitcast (void (...)* @test50_helper to void (double, double, ...)*)(double [[T0]], double [[T1]])
|
||||
|
|
|
@ -100,3 +100,35 @@ void test2(Test2 *x) {
|
|||
// CHECK-NEXT: [[T4:%.*]] = load [[WEAK_T]]{{.*}}** [[T3]]
|
||||
// CHECK-NEXT: [[WEAKX:%.*]] = getelementptr inbounds [[WEAK_T]]{{.*}}* [[T4]], i32 0, i32 6
|
||||
// CHECK-NEXT: [[T0:%.*]] = load [[TEST2]]** [[WEAKX]], align 4
|
||||
|
||||
// rdar://problem/12722954
|
||||
// Make sure that ... is appropriately positioned in a block call.
|
||||
void test3(void (^block)(int, ...)) {
|
||||
block(0, 1, 2, 3);
|
||||
}
|
||||
// CHECK: define void @test3(
|
||||
// CHECK: [[BLOCK:%.*]] = alloca void (i32, ...)*, align 4
|
||||
// CHECK-NEXT: store void (i32, ...)*
|
||||
// CHECK-NEXT: [[T0:%.*]] = load void (i32, ...)** [[BLOCK]], align 4
|
||||
// CHECK-NEXT: [[T1:%.*]] = bitcast void (i32, ...)* [[T0]] to [[BLOCK_T:%.*]]*
|
||||
// CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [[BLOCK_T]]* [[T1]], i32 0, i32 3
|
||||
// CHECK-NEXT: [[T3:%.*]] = bitcast [[BLOCK_T]]* [[T1]] to i8*
|
||||
// CHECK-NEXT: [[T4:%.*]] = load i8** [[T2]]
|
||||
// CHECK-NEXT: [[T5:%.*]] = bitcast i8* [[T4]] to void (i8*, i32, ...)*
|
||||
// CHECK-NEXT: call void (i8*, i32, ...)* [[T5]](i8* [[T3]], i32 0, i32 1, i32 2, i32 3)
|
||||
// CHECK-NEXT: ret void
|
||||
|
||||
void test4(void (^block)()) {
|
||||
block(0, 1, 2, 3);
|
||||
}
|
||||
// CHECK: define void @test4(
|
||||
// CHECK: [[BLOCK:%.*]] = alloca void (...)*, align 4
|
||||
// CHECK-NEXT: store void (...)*
|
||||
// CHECK-NEXT: [[T0:%.*]] = load void (...)** [[BLOCK]], align 4
|
||||
// CHECK-NEXT: [[T1:%.*]] = bitcast void (...)* [[T0]] to [[BLOCK_T:%.*]]*
|
||||
// CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [[BLOCK_T]]* [[T1]], i32 0, i32 3
|
||||
// CHECK-NEXT: [[T3:%.*]] = bitcast [[BLOCK_T]]* [[T1]] to i8*
|
||||
// CHECK-NEXT: [[T4:%.*]] = load i8** [[T2]]
|
||||
// CHECK-NEXT: [[T5:%.*]] = bitcast i8* [[T4]] to void (i8*, i32, i32, i32, i32)*
|
||||
// CHECK-NEXT: call void [[T5]](i8* [[T3]], i32 0, i32 1, i32 2, i32 3)
|
||||
// CHECK-NEXT: ret void
|
||||
|
|
Loading…
Reference in New Issue