2014-09-03 23:27:03 +08:00
|
|
|
//===--- SemaCUDA.cpp - Semantic Analysis for CUDA constructs -------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
/// \file
|
|
|
|
/// \brief This file implements semantic analysis for CUDA constructs.
|
|
|
|
///
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "clang/AST/ASTContext.h"
|
|
|
|
#include "clang/AST/Decl.h"
|
2016-02-03 06:29:48 +08:00
|
|
|
#include "clang/AST/ExprCXX.h"
|
2014-12-04 05:53:36 +08:00
|
|
|
#include "clang/Lex/Preprocessor.h"
|
2016-03-31 07:30:21 +08:00
|
|
|
#include "clang/Sema/Lookup.h"
|
|
|
|
#include "clang/Sema/Sema.h"
|
2014-09-03 23:27:03 +08:00
|
|
|
#include "clang/Sema/SemaDiagnostic.h"
|
2016-10-14 02:45:08 +08:00
|
|
|
#include "clang/Sema/SemaInternal.h"
|
2016-03-31 07:30:21 +08:00
|
|
|
#include "clang/Sema/Template.h"
|
2014-09-30 04:38:29 +08:00
|
|
|
#include "llvm/ADT/Optional.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2014-09-03 23:27:03 +08:00
|
|
|
using namespace clang;
|
|
|
|
|
2016-10-09 06:15:58 +08:00
|
|
|
void Sema::PushForceCUDAHostDevice() {
|
|
|
|
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
|
|
|
|
ForceCUDAHostDeviceDepth++;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Sema::PopForceCUDAHostDevice() {
|
|
|
|
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
|
|
|
|
if (ForceCUDAHostDeviceDepth == 0)
|
|
|
|
return false;
|
|
|
|
ForceCUDAHostDeviceDepth--;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-09-03 23:27:03 +08:00
|
|
|
ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
|
|
|
|
MultiExprArg ExecConfig,
|
|
|
|
SourceLocation GGGLoc) {
|
|
|
|
FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl();
|
|
|
|
if (!ConfigDecl)
|
|
|
|
return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
|
|
|
|
<< "cudaConfigureCall");
|
|
|
|
QualType ConfigQTy = ConfigDecl->getType();
|
|
|
|
|
|
|
|
DeclRefExpr *ConfigDR = new (Context)
|
|
|
|
DeclRefExpr(ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
|
|
|
|
MarkFunctionReferenced(LLLLoc, ConfigDecl);
|
|
|
|
|
|
|
|
return ActOnCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr,
|
|
|
|
/*IsExecConfig=*/true);
|
|
|
|
}
|
|
|
|
|
2016-12-08 03:27:16 +08:00
|
|
|
Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const AttributeList *Attr) {
|
|
|
|
bool HasHostAttr = false;
|
|
|
|
bool HasDeviceAttr = false;
|
|
|
|
bool HasGlobalAttr = false;
|
|
|
|
bool HasInvalidTargetAttr = false;
|
|
|
|
while (Attr) {
|
|
|
|
switch(Attr->getKind()){
|
|
|
|
case AttributeList::AT_CUDAGlobal:
|
|
|
|
HasGlobalAttr = true;
|
|
|
|
break;
|
|
|
|
case AttributeList::AT_CUDAHost:
|
|
|
|
HasHostAttr = true;
|
|
|
|
break;
|
|
|
|
case AttributeList::AT_CUDADevice:
|
|
|
|
HasDeviceAttr = true;
|
|
|
|
break;
|
|
|
|
case AttributeList::AT_CUDAInvalidTarget:
|
|
|
|
HasInvalidTargetAttr = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
Attr = Attr->getNext();
|
|
|
|
}
|
|
|
|
if (HasInvalidTargetAttr)
|
|
|
|
return CFT_InvalidTarget;
|
|
|
|
|
|
|
|
if (HasGlobalAttr)
|
|
|
|
return CFT_Global;
|
|
|
|
|
|
|
|
if (HasHostAttr && HasDeviceAttr)
|
|
|
|
return CFT_HostDevice;
|
|
|
|
|
|
|
|
if (HasDeviceAttr)
|
|
|
|
return CFT_Device;
|
|
|
|
|
|
|
|
return CFT_Host;
|
|
|
|
}
|
|
|
|
|
2016-12-09 03:38:13 +08:00
|
|
|
template <typename A>
|
|
|
|
static bool hasAttr(const FunctionDecl *D, bool IgnoreImplicitAttr) {
|
|
|
|
return D->hasAttrs() && llvm::any_of(D->getAttrs(), [&](Attr *Attribute) {
|
|
|
|
return isa<A>(Attribute) &&
|
|
|
|
!(IgnoreImplicitAttr && Attribute->isImplicit());
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2014-09-03 23:27:03 +08:00
|
|
|
/// IdentifyCUDATarget - Determine the CUDA compilation target for this function
|
2016-12-09 03:38:13 +08:00
|
|
|
Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D,
|
|
|
|
bool IgnoreImplicitHDAttr) {
|
2016-10-14 02:45:08 +08:00
|
|
|
// Code that lives outside a function is run on the host.
|
|
|
|
if (D == nullptr)
|
|
|
|
return CFT_Host;
|
|
|
|
|
2014-09-30 04:38:29 +08:00
|
|
|
if (D->hasAttr<CUDAInvalidTargetAttr>())
|
|
|
|
return CFT_InvalidTarget;
|
2014-09-03 23:27:03 +08:00
|
|
|
|
|
|
|
if (D->hasAttr<CUDAGlobalAttr>())
|
|
|
|
return CFT_Global;
|
|
|
|
|
2016-12-09 03:38:13 +08:00
|
|
|
if (hasAttr<CUDADeviceAttr>(D, IgnoreImplicitHDAttr)) {
|
|
|
|
if (hasAttr<CUDAHostAttr>(D, IgnoreImplicitHDAttr))
|
2014-09-03 23:27:03 +08:00
|
|
|
return CFT_HostDevice;
|
|
|
|
return CFT_Device;
|
2016-12-09 03:38:13 +08:00
|
|
|
} else if (hasAttr<CUDAHostAttr>(D, IgnoreImplicitHDAttr)) {
|
2014-10-01 01:38:34 +08:00
|
|
|
return CFT_Host;
|
2016-12-09 03:38:13 +08:00
|
|
|
} else if (D->isImplicit() && !IgnoreImplicitHDAttr) {
|
2014-10-01 01:38:34 +08:00
|
|
|
// Some implicit declarations (like intrinsic functions) are not marked.
|
|
|
|
// Set the most lenient target on them for maximal flexibility.
|
|
|
|
return CFT_HostDevice;
|
2014-09-03 23:27:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return CFT_Host;
|
|
|
|
}
|
|
|
|
|
2015-09-23 01:22:59 +08:00
|
|
|
// * CUDA Call preference table
|
|
|
|
//
|
|
|
|
// F - from,
|
|
|
|
// T - to
|
|
|
|
// Ph - preference in host mode
|
|
|
|
// Pd - preference in device mode
|
|
|
|
// H - handled in (x)
|
2016-03-30 00:24:22 +08:00
|
|
|
// Preferences: N:native, SS:same side, HD:host-device, WS:wrong side, --:never.
|
2015-09-23 01:22:59 +08:00
|
|
|
//
|
2016-02-13 02:29:18 +08:00
|
|
|
// | F | T | Ph | Pd | H |
|
|
|
|
// |----+----+-----+-----+-----+
|
|
|
|
// | d | d | N | N | (c) |
|
|
|
|
// | d | g | -- | -- | (a) |
|
|
|
|
// | d | h | -- | -- | (e) |
|
|
|
|
// | d | hd | HD | HD | (b) |
|
|
|
|
// | g | d | N | N | (c) |
|
|
|
|
// | g | g | -- | -- | (a) |
|
|
|
|
// | g | h | -- | -- | (e) |
|
|
|
|
// | g | hd | HD | HD | (b) |
|
|
|
|
// | h | d | -- | -- | (e) |
|
|
|
|
// | h | g | N | N | (c) |
|
|
|
|
// | h | h | N | N | (c) |
|
|
|
|
// | h | hd | HD | HD | (b) |
|
|
|
|
// | hd | d | WS | SS | (d) |
|
|
|
|
// | hd | g | SS | -- |(d/a)|
|
|
|
|
// | hd | h | SS | WS | (d) |
|
|
|
|
// | hd | hd | HD | HD | (b) |
|
2015-09-23 01:22:59 +08:00
|
|
|
|
|
|
|
Sema::CUDAFunctionPreference
|
|
|
|
Sema::IdentifyCUDAPreference(const FunctionDecl *Caller,
|
|
|
|
const FunctionDecl *Callee) {
|
|
|
|
assert(Callee && "Callee must be valid.");
|
2016-10-14 02:45:08 +08:00
|
|
|
CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller);
|
2015-09-23 01:22:59 +08:00
|
|
|
CUDAFunctionTarget CalleeTarget = IdentifyCUDATarget(Callee);
|
|
|
|
|
|
|
|
// If one of the targets is invalid, the check always fails, no matter what
|
|
|
|
// the other target is.
|
|
|
|
if (CallerTarget == CFT_InvalidTarget || CalleeTarget == CFT_InvalidTarget)
|
|
|
|
return CFP_Never;
|
|
|
|
|
|
|
|
// (a) Can't call global from some contexts until we support CUDA's
|
|
|
|
// dynamic parallelism.
|
|
|
|
if (CalleeTarget == CFT_Global &&
|
2016-10-12 09:30:08 +08:00
|
|
|
(CallerTarget == CFT_Global || CallerTarget == CFT_Device))
|
2015-09-23 01:22:59 +08:00
|
|
|
return CFP_Never;
|
|
|
|
|
2016-02-13 02:29:18 +08:00
|
|
|
// (b) Calling HostDevice is OK for everyone.
|
|
|
|
if (CalleeTarget == CFT_HostDevice)
|
|
|
|
return CFP_HostDevice;
|
|
|
|
|
|
|
|
// (c) Best case scenarios
|
2015-09-23 01:22:59 +08:00
|
|
|
if (CalleeTarget == CallerTarget ||
|
|
|
|
(CallerTarget == CFT_Host && CalleeTarget == CFT_Global) ||
|
|
|
|
(CallerTarget == CFT_Global && CalleeTarget == CFT_Device))
|
2016-02-13 02:29:18 +08:00
|
|
|
return CFP_Native;
|
2015-09-23 01:22:59 +08:00
|
|
|
|
|
|
|
// (d) HostDevice behavior depends on compilation mode.
|
|
|
|
if (CallerTarget == CFT_HostDevice) {
|
2016-02-13 02:29:18 +08:00
|
|
|
// It's OK to call a compilation-mode matching function from an HD one.
|
|
|
|
if ((getLangOpts().CUDAIsDevice && CalleeTarget == CFT_Device) ||
|
|
|
|
(!getLangOpts().CUDAIsDevice &&
|
|
|
|
(CalleeTarget == CFT_Host || CalleeTarget == CFT_Global)))
|
|
|
|
return CFP_SameSide;
|
|
|
|
|
2016-03-30 00:24:16 +08:00
|
|
|
// Calls from HD to non-mode-matching functions (i.e., to host functions
|
|
|
|
// when compiling in device mode or to device functions when compiling in
|
|
|
|
// host mode) are allowed at the sema level, but eventually rejected if
|
|
|
|
// they're ever codegened. TODO: Reject said calls earlier.
|
|
|
|
return CFP_WrongSide;
|
2015-09-23 01:22:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// (e) Calling across device/host boundary is not something you should do.
|
|
|
|
if ((CallerTarget == CFT_Host && CalleeTarget == CFT_Device) ||
|
|
|
|
(CallerTarget == CFT_Device && CalleeTarget == CFT_Host) ||
|
|
|
|
(CallerTarget == CFT_Global && CalleeTarget == CFT_Host))
|
2016-02-13 02:29:18 +08:00
|
|
|
return CFP_Never;
|
2015-09-23 01:22:59 +08:00
|
|
|
|
|
|
|
llvm_unreachable("All cases should've been handled by now.");
|
|
|
|
}
|
|
|
|
|
2016-10-11 08:21:10 +08:00
|
|
|
void Sema::EraseUnwantedCUDAMatches(
|
|
|
|
const FunctionDecl *Caller,
|
|
|
|
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches) {
|
2015-09-23 01:22:59 +08:00
|
|
|
if (Matches.size() <= 1)
|
|
|
|
return;
|
|
|
|
|
2016-10-11 08:21:10 +08:00
|
|
|
using Pair = std::pair<DeclAccessPair, FunctionDecl*>;
|
|
|
|
|
2016-03-22 08:09:25 +08:00
|
|
|
// Gets the CUDA function preference for a call from Caller to Match.
|
2016-10-11 08:21:10 +08:00
|
|
|
auto GetCFP = [&](const Pair &Match) {
|
|
|
|
return IdentifyCUDAPreference(Caller, Match.second);
|
2016-03-22 08:09:25 +08:00
|
|
|
};
|
|
|
|
|
2015-09-23 01:22:59 +08:00
|
|
|
// Find the best call preference among the functions in Matches.
|
2016-10-11 08:21:10 +08:00
|
|
|
CUDAFunctionPreference BestCFP = GetCFP(*std::max_element(
|
2016-03-22 08:09:25 +08:00
|
|
|
Matches.begin(), Matches.end(),
|
2016-10-11 08:21:10 +08:00
|
|
|
[&](const Pair &M1, const Pair &M2) { return GetCFP(M1) < GetCFP(M2); }));
|
2015-09-23 01:22:59 +08:00
|
|
|
|
|
|
|
// Erase all functions with lower priority.
|
2017-01-05 03:16:29 +08:00
|
|
|
llvm::erase_if(Matches,
|
|
|
|
[&](const Pair &Match) { return GetCFP(Match) < BestCFP; });
|
2015-09-23 01:22:59 +08:00
|
|
|
}
|
|
|
|
|
2014-09-30 04:38:29 +08:00
|
|
|
/// When an implicitly-declared special member has to invoke more than one
|
|
|
|
/// base/field special member, conflicts may occur in the targets of these
|
|
|
|
/// members. For example, if one base's member __host__ and another's is
|
|
|
|
/// __device__, it's a conflict.
|
|
|
|
/// This function figures out if the given targets \param Target1 and
|
|
|
|
/// \param Target2 conflict, and if they do not it fills in
|
|
|
|
/// \param ResolvedTarget with a target that resolves for both calls.
|
|
|
|
/// \return true if there's a conflict, false otherwise.
|
|
|
|
static bool
|
|
|
|
resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1,
|
|
|
|
Sema::CUDAFunctionTarget Target2,
|
|
|
|
Sema::CUDAFunctionTarget *ResolvedTarget) {
|
2016-01-20 08:26:57 +08:00
|
|
|
// Only free functions and static member functions may be global.
|
|
|
|
assert(Target1 != Sema::CFT_Global);
|
|
|
|
assert(Target2 != Sema::CFT_Global);
|
2014-09-30 04:38:29 +08:00
|
|
|
|
|
|
|
if (Target1 == Sema::CFT_HostDevice) {
|
|
|
|
*ResolvedTarget = Target2;
|
|
|
|
} else if (Target2 == Sema::CFT_HostDevice) {
|
|
|
|
*ResolvedTarget = Target1;
|
|
|
|
} else if (Target1 != Target2) {
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
*ResolvedTarget = Target1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
|
|
|
|
CXXSpecialMember CSM,
|
|
|
|
CXXMethodDecl *MemberDecl,
|
|
|
|
bool ConstRHS,
|
|
|
|
bool Diagnose) {
|
|
|
|
llvm::Optional<CUDAFunctionTarget> InferredTarget;
|
|
|
|
|
|
|
|
// We're going to invoke special member lookup; mark that these special
|
|
|
|
// members are called from this one, and not from its caller.
|
|
|
|
ContextRAII MethodContext(*this, MemberDecl);
|
|
|
|
|
|
|
|
// Look for special members in base classes that should be invoked from here.
|
|
|
|
// Infer the target of this member base on the ones it should call.
|
|
|
|
// Skip direct and indirect virtual bases for abstract classes.
|
|
|
|
llvm::SmallVector<const CXXBaseSpecifier *, 16> Bases;
|
|
|
|
for (const auto &B : ClassDecl->bases()) {
|
|
|
|
if (!B.isVirtual()) {
|
|
|
|
Bases.push_back(&B);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ClassDecl->isAbstract()) {
|
|
|
|
for (const auto &VB : ClassDecl->vbases()) {
|
|
|
|
Bases.push_back(&VB);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (const auto *B : Bases) {
|
|
|
|
const RecordType *BaseType = B->getType()->getAs<RecordType>();
|
|
|
|
if (!BaseType) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
|
2017-02-24 10:07:20 +08:00
|
|
|
Sema::SpecialMemberOverloadResult SMOR =
|
2014-09-30 04:38:29 +08:00
|
|
|
LookupSpecialMember(BaseClassDecl, CSM,
|
|
|
|
/* ConstArg */ ConstRHS,
|
|
|
|
/* VolatileArg */ false,
|
|
|
|
/* RValueThis */ false,
|
|
|
|
/* ConstThis */ false,
|
|
|
|
/* VolatileThis */ false);
|
|
|
|
|
2017-02-24 10:07:20 +08:00
|
|
|
if (!SMOR.getMethod())
|
2014-09-30 04:38:29 +08:00
|
|
|
continue;
|
|
|
|
|
2017-02-24 10:07:20 +08:00
|
|
|
CUDAFunctionTarget BaseMethodTarget = IdentifyCUDATarget(SMOR.getMethod());
|
2014-09-30 04:38:29 +08:00
|
|
|
if (!InferredTarget.hasValue()) {
|
|
|
|
InferredTarget = BaseMethodTarget;
|
|
|
|
} else {
|
|
|
|
bool ResolutionError = resolveCalleeCUDATargetConflict(
|
|
|
|
InferredTarget.getValue(), BaseMethodTarget,
|
|
|
|
InferredTarget.getPointer());
|
|
|
|
if (ResolutionError) {
|
|
|
|
if (Diagnose) {
|
|
|
|
Diag(ClassDecl->getLocation(),
|
|
|
|
diag::note_implicit_member_target_infer_collision)
|
|
|
|
<< (unsigned)CSM << InferredTarget.getValue() << BaseMethodTarget;
|
|
|
|
}
|
|
|
|
MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Same as for bases, but now for special members of fields.
|
|
|
|
for (const auto *F : ClassDecl->fields()) {
|
|
|
|
if (F->isInvalidDecl()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
const RecordType *FieldType =
|
|
|
|
Context.getBaseElementType(F->getType())->getAs<RecordType>();
|
|
|
|
if (!FieldType) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(FieldType->getDecl());
|
2017-02-24 10:07:20 +08:00
|
|
|
Sema::SpecialMemberOverloadResult SMOR =
|
2014-09-30 04:38:29 +08:00
|
|
|
LookupSpecialMember(FieldRecDecl, CSM,
|
|
|
|
/* ConstArg */ ConstRHS && !F->isMutable(),
|
|
|
|
/* VolatileArg */ false,
|
|
|
|
/* RValueThis */ false,
|
|
|
|
/* ConstThis */ false,
|
|
|
|
/* VolatileThis */ false);
|
|
|
|
|
2017-02-24 10:07:20 +08:00
|
|
|
if (!SMOR.getMethod())
|
2014-09-30 04:38:29 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
CUDAFunctionTarget FieldMethodTarget =
|
2017-02-24 10:07:20 +08:00
|
|
|
IdentifyCUDATarget(SMOR.getMethod());
|
2014-09-30 04:38:29 +08:00
|
|
|
if (!InferredTarget.hasValue()) {
|
|
|
|
InferredTarget = FieldMethodTarget;
|
|
|
|
} else {
|
|
|
|
bool ResolutionError = resolveCalleeCUDATargetConflict(
|
|
|
|
InferredTarget.getValue(), FieldMethodTarget,
|
|
|
|
InferredTarget.getPointer());
|
|
|
|
if (ResolutionError) {
|
|
|
|
if (Diagnose) {
|
|
|
|
Diag(ClassDecl->getLocation(),
|
|
|
|
diag::note_implicit_member_target_infer_collision)
|
|
|
|
<< (unsigned)CSM << InferredTarget.getValue()
|
|
|
|
<< FieldMethodTarget;
|
|
|
|
}
|
|
|
|
MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (InferredTarget.hasValue()) {
|
|
|
|
if (InferredTarget.getValue() == CFT_Device) {
|
|
|
|
MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
|
|
|
|
} else if (InferredTarget.getValue() == CFT_Host) {
|
|
|
|
MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
|
|
|
|
} else {
|
|
|
|
MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
|
|
|
|
MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If no target was inferred, mark this member as __host__ __device__;
|
|
|
|
// it's the least restrictive option that can be invoked from any target.
|
|
|
|
MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
|
|
|
|
MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2016-02-03 06:29:48 +08:00
|
|
|
|
|
|
|
bool Sema::isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD) {
|
|
|
|
if (!CD->isDefined() && CD->isTemplateInstantiation())
|
|
|
|
InstantiateFunctionDefinition(Loc, CD->getFirstDecl());
|
|
|
|
|
|
|
|
// (E.2.3.1, CUDA 7.5) A constructor for a class type is considered
|
|
|
|
// empty at a point in the translation unit, if it is either a
|
|
|
|
// trivial constructor
|
|
|
|
if (CD->isTrivial())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// ... or it satisfies all of the following conditions:
|
|
|
|
// The constructor function has been defined.
|
|
|
|
// The constructor function has no parameters,
|
|
|
|
// and the function body is an empty compound statement.
|
|
|
|
if (!(CD->hasTrivialBody() && CD->getNumParams() == 0))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Its class has no virtual functions and no virtual base classes.
|
|
|
|
if (CD->getParent()->isDynamicClass())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The only form of initializer allowed is an empty constructor.
|
2016-05-20 04:13:53 +08:00
|
|
|
// This will recursively check all base classes and member initializers
|
2016-02-03 06:29:48 +08:00
|
|
|
if (!llvm::all_of(CD->inits(), [&](const CXXCtorInitializer *CI) {
|
|
|
|
if (const CXXConstructExpr *CE =
|
|
|
|
dyn_cast<CXXConstructExpr>(CI->getInit()))
|
|
|
|
return isEmptyCudaConstructor(Loc, CE->getConstructor());
|
|
|
|
return false;
|
|
|
|
}))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2016-03-31 07:30:21 +08:00
|
|
|
|
2016-05-20 04:13:53 +08:00
|
|
|
bool Sema::isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *DD) {
|
|
|
|
// No destructor -> no problem.
|
|
|
|
if (!DD)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!DD->isDefined() && DD->isTemplateInstantiation())
|
|
|
|
InstantiateFunctionDefinition(Loc, DD->getFirstDecl());
|
|
|
|
|
|
|
|
// (E.2.3.1, CUDA 7.5) A destructor for a class type is considered
|
|
|
|
// empty at a point in the translation unit, if it is either a
|
|
|
|
// trivial constructor
|
|
|
|
if (DD->isTrivial())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// ... or it satisfies all of the following conditions:
|
|
|
|
// The destructor function has been defined.
|
|
|
|
// and the function body is an empty compound statement.
|
|
|
|
if (!DD->hasTrivialBody())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const CXXRecordDecl *ClassDecl = DD->getParent();
|
|
|
|
|
|
|
|
// Its class has no virtual functions and no virtual base classes.
|
|
|
|
if (ClassDecl->isDynamicClass())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Only empty destructors are allowed. This will recursively check
|
|
|
|
// destructors for all base classes...
|
|
|
|
if (!llvm::all_of(ClassDecl->bases(), [&](const CXXBaseSpecifier &BS) {
|
|
|
|
if (CXXRecordDecl *RD = BS.getType()->getAsCXXRecordDecl())
|
|
|
|
return isEmptyCudaDestructor(Loc, RD->getDestructor());
|
|
|
|
return true;
|
|
|
|
}))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// ... and member fields.
|
|
|
|
if (!llvm::all_of(ClassDecl->fields(), [&](const FieldDecl *Field) {
|
|
|
|
if (CXXRecordDecl *RD = Field->getType()
|
|
|
|
->getBaseElementTypeUnsafe()
|
|
|
|
->getAsCXXRecordDecl())
|
|
|
|
return isEmptyCudaDestructor(Loc, RD->getDestructor());
|
|
|
|
return true;
|
|
|
|
}))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-03-31 07:30:21 +08:00
|
|
|
// With -fcuda-host-device-constexpr, an unattributed constexpr function is
|
|
|
|
// treated as implicitly __host__ __device__, unless:
|
|
|
|
// * it is a variadic function (device-side variadic functions are not
|
|
|
|
// allowed), or
|
|
|
|
// * a __device__ function with this signature was already declared, in which
|
|
|
|
// case in which case we output an error, unless the __device__ decl is in a
|
|
|
|
// system header, in which case we leave the constexpr function unattributed.
|
2016-10-09 06:15:58 +08:00
|
|
|
//
|
|
|
|
// In addition, all function decls are treated as __host__ __device__ when
|
|
|
|
// ForceCUDAHostDeviceDepth > 0 (corresponding to code within a
|
|
|
|
// #pragma clang force_cuda_host_device_begin/end
|
|
|
|
// pair).
|
2016-10-22 01:15:46 +08:00
|
|
|
void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD,
|
2016-03-31 07:30:21 +08:00
|
|
|
const LookupResult &Previous) {
|
2016-10-01 07:57:38 +08:00
|
|
|
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
|
2016-10-09 06:15:58 +08:00
|
|
|
|
|
|
|
if (ForceCUDAHostDeviceDepth > 0) {
|
|
|
|
if (!NewD->hasAttr<CUDAHostAttr>())
|
|
|
|
NewD->addAttr(CUDAHostAttr::CreateImplicit(Context));
|
|
|
|
if (!NewD->hasAttr<CUDADeviceAttr>())
|
|
|
|
NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-03-31 07:30:21 +08:00
|
|
|
if (!getLangOpts().CUDAHostDeviceConstexpr || !NewD->isConstexpr() ||
|
|
|
|
NewD->isVariadic() || NewD->hasAttr<CUDAHostAttr>() ||
|
|
|
|
NewD->hasAttr<CUDADeviceAttr>() || NewD->hasAttr<CUDAGlobalAttr>())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Is D a __device__ function with the same signature as NewD, ignoring CUDA
|
|
|
|
// attributes?
|
|
|
|
auto IsMatchingDeviceFn = [&](NamedDecl *D) {
|
|
|
|
if (UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(D))
|
|
|
|
D = Using->getTargetDecl();
|
|
|
|
FunctionDecl *OldD = D->getAsFunction();
|
|
|
|
return OldD && OldD->hasAttr<CUDADeviceAttr>() &&
|
|
|
|
!OldD->hasAttr<CUDAHostAttr>() &&
|
|
|
|
!IsOverload(NewD, OldD, /* UseMemberUsingDeclRules = */ false,
|
|
|
|
/* ConsiderCudaAttrs = */ false);
|
|
|
|
};
|
|
|
|
auto It = llvm::find_if(Previous, IsMatchingDeviceFn);
|
|
|
|
if (It != Previous.end()) {
|
|
|
|
// We found a __device__ function with the same name and signature as NewD
|
|
|
|
// (ignoring CUDA attrs). This is an error unless that function is defined
|
|
|
|
// in a system header, in which case we simply return without making NewD
|
|
|
|
// host+device.
|
|
|
|
NamedDecl *Match = *It;
|
|
|
|
if (!getSourceManager().isInSystemHeader(Match->getLocation())) {
|
|
|
|
Diag(NewD->getLocation(),
|
|
|
|
diag::err_cuda_unattributed_constexpr_cannot_overload_device)
|
|
|
|
<< NewD->getName();
|
|
|
|
Diag(Match->getLocation(),
|
|
|
|
diag::note_cuda_conflicting_device_function_declared_here);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
NewD->addAttr(CUDAHostAttr::CreateImplicit(Context));
|
|
|
|
NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
|
|
|
|
}
|
2016-08-16 07:00:49 +08:00
|
|
|
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
// In CUDA, there are some constructs which may appear in semantically-valid
|
|
|
|
// code, but trigger errors if we ever generate code for the function in which
|
|
|
|
// they appear. Essentially every construct you're not allowed to use on the
|
|
|
|
// device falls into this category, because you are allowed to use these
|
|
|
|
// constructs in a __host__ __device__ function, but only if that function is
|
|
|
|
// never codegen'ed on the device.
|
|
|
|
//
|
|
|
|
// To handle semantic checking for these constructs, we keep track of the set of
|
|
|
|
// functions we know will be emitted, either because we could tell a priori that
|
|
|
|
// they would be emitted, or because they were transitively called by a
|
|
|
|
// known-emitted function.
|
|
|
|
//
|
|
|
|
// We also keep a partial call graph of which not-known-emitted functions call
|
|
|
|
// which other not-known-emitted functions.
|
|
|
|
//
|
|
|
|
// When we see something which is illegal if the current function is emitted
|
|
|
|
// (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or
|
|
|
|
// CheckCUDACall), we first check if the current function is known-emitted. If
|
|
|
|
// so, we immediately output the diagnostic.
|
|
|
|
//
|
|
|
|
// Otherwise, we "defer" the diagnostic. It sits in Sema::CUDADeferredDiags
|
|
|
|
// until we discover that the function is known-emitted, at which point we take
|
|
|
|
// it out of this map and emit the diagnostic.
|
|
|
|
|
2016-10-20 05:15:01 +08:00
|
|
|
Sema::CUDADiagBuilder::CUDADiagBuilder(Kind K, SourceLocation Loc,
|
|
|
|
unsigned DiagID, FunctionDecl *Fn,
|
|
|
|
Sema &S)
|
|
|
|
: S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
|
|
|
|
ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
|
|
|
|
switch (K) {
|
|
|
|
case K_Nop:
|
|
|
|
break;
|
|
|
|
case K_Immediate:
|
|
|
|
case K_ImmediateWithCallStack:
|
|
|
|
ImmediateDiag.emplace(S.Diag(Loc, DiagID));
|
|
|
|
break;
|
|
|
|
case K_Deferred:
|
|
|
|
assert(Fn && "Must have a function to attach the deferred diag to.");
|
|
|
|
PartialDiag.emplace(S.PDiag(DiagID));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Print notes showing how we can reach FD starting from an a priori
|
|
|
|
// known-callable function.
|
|
|
|
static void EmitCallStackNotes(Sema &S, FunctionDecl *FD) {
|
|
|
|
auto FnIt = S.CUDAKnownEmittedFns.find(FD);
|
|
|
|
while (FnIt != S.CUDAKnownEmittedFns.end()) {
|
|
|
|
DiagnosticBuilder Builder(
|
|
|
|
S.Diags.Report(FnIt->second.Loc, diag::note_called_by));
|
|
|
|
Builder << FnIt->second.FD;
|
|
|
|
Builder.setForceEmit();
|
|
|
|
|
|
|
|
FnIt = S.CUDAKnownEmittedFns.find(FnIt->second.FD);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Sema::CUDADiagBuilder::~CUDADiagBuilder() {
|
|
|
|
if (ImmediateDiag) {
|
|
|
|
// Emit our diagnostic and, if it was a warning or error, output a callstack
|
|
|
|
// if Fn isn't a priori known-emitted.
|
|
|
|
bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel(
|
|
|
|
DiagID, Loc) >= DiagnosticsEngine::Warning;
|
|
|
|
ImmediateDiag.reset(); // Emit the immediate diag.
|
|
|
|
if (IsWarningOrError && ShowCallStack)
|
|
|
|
EmitCallStackNotes(S, Fn);
|
|
|
|
} else if (PartialDiag) {
|
|
|
|
assert(ShowCallStack && "Must always show call stack for deferred diags.");
|
|
|
|
S.CUDADeferredDiags[Fn].push_back({Loc, std::move(*PartialDiag)});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
// Do we know that we will eventually codegen the given function?
|
|
|
|
static bool IsKnownEmitted(Sema &S, FunctionDecl *FD) {
|
|
|
|
// Templates are emitted when they're instantiated.
|
|
|
|
if (FD->isDependentContext())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// When compiling for device, host functions are never emitted. Similarly,
|
|
|
|
// when compiling for host, device and global functions are never emitted.
|
|
|
|
// (Technically, we do emit a host-side stub for global functions, but this
|
|
|
|
// doesn't count for our purposes here.)
|
|
|
|
Sema::CUDAFunctionTarget T = S.IdentifyCUDATarget(FD);
|
|
|
|
if (S.getLangOpts().CUDAIsDevice && T == Sema::CFT_Host)
|
|
|
|
return false;
|
|
|
|
if (!S.getLangOpts().CUDAIsDevice &&
|
|
|
|
(T == Sema::CFT_Device || T == Sema::CFT_Global))
|
|
|
|
return false;
|
|
|
|
|
2016-11-09 07:45:51 +08:00
|
|
|
// Check whether this function is externally visible -- if so, it's
|
|
|
|
// known-emitted.
|
|
|
|
//
|
|
|
|
// We have to check the GVA linkage of the function's *definition* -- if we
|
|
|
|
// only have a declaration, we don't know whether or not the function will be
|
|
|
|
// emitted, because (say) the definition could include "inline".
|
|
|
|
FunctionDecl *Def = FD->getDefinition();
|
|
|
|
|
|
|
|
if (Def &&
|
|
|
|
!isDiscardableGVALinkage(S.getASTContext().GetGVALinkageForFunction(Def)))
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// Otherwise, the function is known-emitted if it's in our set of
|
|
|
|
// known-emitted functions.
|
|
|
|
return S.CUDAKnownEmittedFns.count(FD) > 0;
|
|
|
|
}
|
|
|
|
|
2016-10-14 02:45:08 +08:00
|
|
|
Sema::CUDADiagBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
|
|
|
|
unsigned DiagID) {
|
|
|
|
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
CUDADiagBuilder::Kind DiagKind = [&] {
|
|
|
|
switch (CurrentCUDATarget()) {
|
|
|
|
case CFT_Global:
|
|
|
|
case CFT_Device:
|
|
|
|
return CUDADiagBuilder::K_Immediate;
|
|
|
|
case CFT_HostDevice:
|
|
|
|
// An HD function counts as host code if we're compiling for host, and
|
|
|
|
// device code if we're compiling for device. Defer any errors in device
|
|
|
|
// mode until the function is known-emitted.
|
|
|
|
if (getLangOpts().CUDAIsDevice) {
|
|
|
|
return IsKnownEmitted(*this, dyn_cast<FunctionDecl>(CurContext))
|
2016-10-20 05:15:01 +08:00
|
|
|
? CUDADiagBuilder::K_ImmediateWithCallStack
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
: CUDADiagBuilder::K_Deferred;
|
|
|
|
}
|
|
|
|
return CUDADiagBuilder::K_Nop;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return CUDADiagBuilder::K_Nop;
|
|
|
|
}
|
|
|
|
}();
|
2016-10-14 02:45:08 +08:00
|
|
|
return CUDADiagBuilder(DiagKind, Loc, DiagID,
|
|
|
|
dyn_cast<FunctionDecl>(CurContext), *this);
|
2016-08-16 07:00:49 +08:00
|
|
|
}
|
2016-09-29 06:45:54 +08:00
|
|
|
|
2016-10-14 02:45:08 +08:00
|
|
|
Sema::CUDADiagBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc,
|
|
|
|
unsigned DiagID) {
|
2016-09-29 06:45:54 +08:00
|
|
|
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
CUDADiagBuilder::Kind DiagKind = [&] {
|
|
|
|
switch (CurrentCUDATarget()) {
|
|
|
|
case CFT_Host:
|
|
|
|
return CUDADiagBuilder::K_Immediate;
|
|
|
|
case CFT_HostDevice:
|
|
|
|
// An HD function counts as host code if we're compiling for host, and
|
|
|
|
// device code if we're compiling for device. Defer any errors in device
|
|
|
|
// mode until the function is known-emitted.
|
|
|
|
if (getLangOpts().CUDAIsDevice)
|
|
|
|
return CUDADiagBuilder::K_Nop;
|
|
|
|
|
|
|
|
return IsKnownEmitted(*this, dyn_cast<FunctionDecl>(CurContext))
|
2016-10-20 05:15:01 +08:00
|
|
|
? CUDADiagBuilder::K_ImmediateWithCallStack
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
: CUDADiagBuilder::K_Deferred;
|
|
|
|
default:
|
|
|
|
return CUDADiagBuilder::K_Nop;
|
|
|
|
}
|
|
|
|
}();
|
2016-10-14 02:45:08 +08:00
|
|
|
return CUDADiagBuilder(DiagKind, Loc, DiagID,
|
|
|
|
dyn_cast<FunctionDecl>(CurContext), *this);
|
2016-09-29 06:45:54 +08:00
|
|
|
}
|
2016-09-29 06:45:58 +08:00
|
|
|
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
// Emit any deferred diagnostics for FD and erase them from the map in which
|
|
|
|
// they're stored.
|
|
|
|
static void EmitDeferredDiags(Sema &S, FunctionDecl *FD) {
|
|
|
|
auto It = S.CUDADeferredDiags.find(FD);
|
|
|
|
if (It == S.CUDADeferredDiags.end())
|
|
|
|
return;
|
2016-10-20 05:15:01 +08:00
|
|
|
bool HasWarningOrError = false;
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
for (PartialDiagnosticAt &PDAt : It->second) {
|
|
|
|
const SourceLocation &Loc = PDAt.first;
|
|
|
|
const PartialDiagnostic &PD = PDAt.second;
|
2016-10-20 05:15:01 +08:00
|
|
|
HasWarningOrError |= S.getDiagnostics().getDiagnosticLevel(
|
|
|
|
PD.getDiagID(), Loc) >= DiagnosticsEngine::Warning;
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID()));
|
|
|
|
Builder.setForceEmit();
|
|
|
|
PD.Emit(Builder);
|
|
|
|
}
|
|
|
|
S.CUDADeferredDiags.erase(It);
|
2016-10-20 05:15:01 +08:00
|
|
|
|
|
|
|
// FIXME: Should this be called after every warning/error emitted in the loop
|
|
|
|
// above, instead of just once per function? That would be consistent with
|
|
|
|
// how we handle immediate errors, but it also seems like a bit much.
|
|
|
|
if (HasWarningOrError)
|
|
|
|
EmitCallStackNotes(S, FD);
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Indicate that this function (and thus everything it transtively calls) will
|
|
|
|
// be codegen'ed, and emit any deferred diagnostics on this function and its
|
|
|
|
// (transitive) callees.
|
2016-10-20 05:15:01 +08:00
|
|
|
static void MarkKnownEmitted(Sema &S, FunctionDecl *OrigCaller,
|
|
|
|
FunctionDecl *OrigCallee, SourceLocation OrigLoc) {
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
// Nothing to do if we already know that FD is emitted.
|
2016-10-20 05:15:01 +08:00
|
|
|
if (IsKnownEmitted(S, OrigCallee)) {
|
|
|
|
assert(!S.CUDACallGraph.count(OrigCallee));
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-10-20 05:15:01 +08:00
|
|
|
// We've just discovered that OrigCallee is known-emitted. Walk our call
|
|
|
|
// graph to see what else we can now discover also must be emitted.
|
|
|
|
|
|
|
|
struct CallInfo {
|
|
|
|
FunctionDecl *Caller;
|
|
|
|
FunctionDecl *Callee;
|
|
|
|
SourceLocation Loc;
|
|
|
|
};
|
|
|
|
llvm::SmallVector<CallInfo, 4> Worklist = {{OrigCaller, OrigCallee, OrigLoc}};
|
|
|
|
llvm::SmallSet<CanonicalDeclPtr<FunctionDecl>, 4> Seen;
|
|
|
|
Seen.insert(OrigCallee);
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
while (!Worklist.empty()) {
|
2016-10-20 05:15:01 +08:00
|
|
|
CallInfo C = Worklist.pop_back_val();
|
|
|
|
assert(!IsKnownEmitted(S, C.Callee) &&
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
"Worklist should not contain known-emitted functions.");
|
2016-10-20 05:15:01 +08:00
|
|
|
S.CUDAKnownEmittedFns[C.Callee] = {C.Caller, C.Loc};
|
|
|
|
EmitDeferredDiags(S, C.Callee);
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
|
2016-10-17 10:25:55 +08:00
|
|
|
// If this is a template instantiation, explore its callgraph as well:
|
|
|
|
// Non-dependent calls are part of the template's callgraph, while dependent
|
|
|
|
// calls are part of to the instantiation's call graph.
|
2016-10-20 05:15:01 +08:00
|
|
|
if (auto *Templ = C.Callee->getPrimaryTemplate()) {
|
2016-10-17 10:25:55 +08:00
|
|
|
FunctionDecl *TemplFD = Templ->getAsFunction();
|
|
|
|
if (!Seen.count(TemplFD) && !S.CUDAKnownEmittedFns.count(TemplFD)) {
|
|
|
|
Seen.insert(TemplFD);
|
2016-10-20 05:15:01 +08:00
|
|
|
Worklist.push_back(
|
|
|
|
{/* Caller = */ C.Caller, /* Callee = */ TemplFD, C.Loc});
|
2016-10-17 10:25:55 +08:00
|
|
|
}
|
|
|
|
}
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
|
2016-10-20 05:15:01 +08:00
|
|
|
// Add all functions called by Callee to our worklist.
|
|
|
|
auto CGIt = S.CUDACallGraph.find(C.Callee);
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
if (CGIt == S.CUDACallGraph.end())
|
|
|
|
continue;
|
|
|
|
|
2016-10-20 05:15:01 +08:00
|
|
|
for (std::pair<CanonicalDeclPtr<FunctionDecl>, SourceLocation> FDLoc :
|
|
|
|
CGIt->second) {
|
|
|
|
FunctionDecl *NewCallee = FDLoc.first;
|
|
|
|
SourceLocation CallLoc = FDLoc.second;
|
|
|
|
if (Seen.count(NewCallee) || IsKnownEmitted(S, NewCallee))
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
continue;
|
2016-10-20 05:15:01 +08:00
|
|
|
Seen.insert(NewCallee);
|
|
|
|
Worklist.push_back(
|
|
|
|
{/* Caller = */ C.Callee, /* Callee = */ NewCallee, CallLoc});
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
}
|
|
|
|
|
2016-10-20 05:15:01 +08:00
|
|
|
// C.Callee is now known-emitted, so we no longer need to maintain its list
|
|
|
|
// of callees in CUDACallGraph.
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
S.CUDACallGraph.erase(CGIt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-14 02:45:08 +08:00
|
|
|
bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
|
2016-09-29 06:45:58 +08:00
|
|
|
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
|
2016-10-14 02:45:08 +08:00
|
|
|
assert(Callee && "Callee may not be null.");
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
// FIXME: Is bailing out early correct here? Should we instead assume that
|
|
|
|
// the caller is a global initializer?
|
2016-10-14 02:45:08 +08:00
|
|
|
FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
|
|
|
|
if (!Caller)
|
2016-09-29 06:45:58 +08:00
|
|
|
return true;
|
2016-10-14 02:45:08 +08:00
|
|
|
|
2016-10-17 10:25:55 +08:00
|
|
|
// If the caller is known-emitted, mark the callee as known-emitted.
|
|
|
|
// Otherwise, mark the call in our call graph so we can traverse it later.
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
bool CallerKnownEmitted = IsKnownEmitted(*this, Caller);
|
|
|
|
if (CallerKnownEmitted)
|
2016-10-20 05:15:01 +08:00
|
|
|
MarkKnownEmitted(*this, Caller, Callee, Loc);
|
2016-10-17 10:25:55 +08:00
|
|
|
else {
|
|
|
|
// If we have
|
|
|
|
// host fn calls kernel fn calls host+device,
|
|
|
|
// the HD function does not get instantiated on the host. We model this by
|
|
|
|
// omitting at the call to the kernel from the callgraph. This ensures
|
|
|
|
// that, when compiling for host, only HD functions actually called from the
|
|
|
|
// host get marked as known-emitted.
|
|
|
|
if (getLangOpts().CUDAIsDevice || IdentifyCUDATarget(Callee) != CFT_Global)
|
2016-10-20 05:15:01 +08:00
|
|
|
CUDACallGraph[Caller].insert({Callee, Loc});
|
2016-10-17 10:25:55 +08:00
|
|
|
}
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
|
|
|
|
CUDADiagBuilder::Kind DiagKind = [&] {
|
|
|
|
switch (IdentifyCUDAPreference(Caller, Callee)) {
|
|
|
|
case CFP_Never:
|
|
|
|
return CUDADiagBuilder::K_Immediate;
|
|
|
|
case CFP_WrongSide:
|
|
|
|
assert(Caller && "WrongSide calls require a non-null caller");
|
|
|
|
// If we know the caller will be emitted, we know this wrong-side call
|
|
|
|
// will be emitted, so it's an immediate error. Otherwise, defer the
|
|
|
|
// error until we know the caller is emitted.
|
2016-10-20 05:15:01 +08:00
|
|
|
return CallerKnownEmitted ? CUDADiagBuilder::K_ImmediateWithCallStack
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
: CUDADiagBuilder::K_Deferred;
|
|
|
|
default:
|
|
|
|
return CUDADiagBuilder::K_Nop;
|
|
|
|
}
|
|
|
|
}();
|
2016-10-14 02:45:08 +08:00
|
|
|
|
2016-10-20 05:03:38 +08:00
|
|
|
if (DiagKind == CUDADiagBuilder::K_Nop)
|
|
|
|
return true;
|
|
|
|
|
2016-10-14 02:45:08 +08:00
|
|
|
// Avoid emitting this error twice for the same location. Using a hashtable
|
|
|
|
// like this is unfortunate, but because we must continue parsing as normal
|
|
|
|
// after encountering a deferred error, it's otherwise very tricky for us to
|
|
|
|
// ensure that we only emit this deferred error once.
|
2016-10-22 04:08:52 +08:00
|
|
|
if (!LocsWithCUDACallDiags.insert({Caller, Loc}).second)
|
2016-10-14 02:45:08 +08:00
|
|
|
return true;
|
|
|
|
|
2016-10-20 05:03:38 +08:00
|
|
|
CUDADiagBuilder(DiagKind, Loc, diag::err_ref_bad_target, Caller, *this)
|
2016-10-14 02:45:08 +08:00
|
|
|
<< IdentifyCUDATarget(Callee) << Callee << IdentifyCUDATarget(Caller);
|
|
|
|
CUDADiagBuilder(DiagKind, Callee->getLocation(), diag::note_previous_decl,
|
|
|
|
Caller, *this)
|
|
|
|
<< Callee;
|
2016-10-20 05:15:01 +08:00
|
|
|
return DiagKind != CUDADiagBuilder::K_Immediate &&
|
|
|
|
DiagKind != CUDADiagBuilder::K_ImmediateWithCallStack;
|
2016-09-29 06:45:58 +08:00
|
|
|
}
|
2016-10-01 01:14:53 +08:00
|
|
|
|
|
|
|
void Sema::CUDASetLambdaAttrs(CXXMethodDecl *Method) {
|
2016-10-01 07:57:38 +08:00
|
|
|
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
|
2016-10-01 01:14:53 +08:00
|
|
|
if (Method->hasAttr<CUDAHostAttr>() || Method->hasAttr<CUDADeviceAttr>())
|
|
|
|
return;
|
|
|
|
FunctionDecl *CurFn = dyn_cast<FunctionDecl>(CurContext);
|
|
|
|
if (!CurFn)
|
|
|
|
return;
|
|
|
|
CUDAFunctionTarget Target = IdentifyCUDATarget(CurFn);
|
|
|
|
if (Target == CFT_Global || Target == CFT_Device) {
|
|
|
|
Method->addAttr(CUDADeviceAttr::CreateImplicit(Context));
|
|
|
|
} else if (Target == CFT_HostDevice) {
|
|
|
|
Method->addAttr(CUDADeviceAttr::CreateImplicit(Context));
|
|
|
|
Method->addAttr(CUDAHostAttr::CreateImplicit(Context));
|
|
|
|
}
|
|
|
|
}
|
2016-12-08 03:27:16 +08:00
|
|
|
|
|
|
|
void Sema::checkCUDATargetOverload(FunctionDecl *NewFD,
|
2016-12-09 03:38:13 +08:00
|
|
|
const LookupResult &Previous) {
|
2016-12-08 03:27:16 +08:00
|
|
|
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
|
|
|
|
CUDAFunctionTarget NewTarget = IdentifyCUDATarget(NewFD);
|
|
|
|
for (NamedDecl *OldND : Previous) {
|
|
|
|
FunctionDecl *OldFD = OldND->getAsFunction();
|
|
|
|
if (!OldFD)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
CUDAFunctionTarget OldTarget = IdentifyCUDATarget(OldFD);
|
|
|
|
// Don't allow HD and global functions to overload other functions with the
|
|
|
|
// same signature. We allow overloading based on CUDA attributes so that
|
|
|
|
// functions can have different implementations on the host and device, but
|
|
|
|
// HD/global functions "exist" in some sense on both the host and device, so
|
|
|
|
// should have the same implementation on both sides.
|
|
|
|
if (NewTarget != OldTarget &&
|
|
|
|
((NewTarget == CFT_HostDevice) || (OldTarget == CFT_HostDevice) ||
|
|
|
|
(NewTarget == CFT_Global) || (OldTarget == CFT_Global)) &&
|
|
|
|
!IsOverload(NewFD, OldFD, /* UseMemberUsingDeclRules = */ false,
|
|
|
|
/* ConsiderCudaAttrs = */ false)) {
|
|
|
|
Diag(NewFD->getLocation(), diag::err_cuda_ovl_target)
|
|
|
|
<< NewTarget << NewFD->getDeclName() << OldTarget << OldFD;
|
|
|
|
Diag(OldFD->getLocation(), diag::note_previous_declaration);
|
|
|
|
NewFD->setInvalidDecl();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-12-09 03:38:13 +08:00
|
|
|
|
|
|
|
template <typename AttrTy>
|
|
|
|
static void copyAttrIfPresent(Sema &S, FunctionDecl *FD,
|
|
|
|
const FunctionDecl &TemplateFD) {
|
|
|
|
if (AttrTy *Attribute = TemplateFD.getAttr<AttrTy>()) {
|
|
|
|
AttrTy *Clone = Attribute->clone(S.Context);
|
|
|
|
Clone->setInherited(true);
|
|
|
|
FD->addAttr(Clone);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Sema::inheritCUDATargetAttrs(FunctionDecl *FD,
|
|
|
|
const FunctionTemplateDecl &TD) {
|
|
|
|
const FunctionDecl &TemplateFD = *TD.getTemplatedDecl();
|
|
|
|
copyAttrIfPresent<CUDAGlobalAttr>(*this, FD, TemplateFD);
|
|
|
|
copyAttrIfPresent<CUDAHostAttr>(*this, FD, TemplateFD);
|
|
|
|
copyAttrIfPresent<CUDADeviceAttr>(*this, FD, TemplateFD);
|
|
|
|
}
|