2014-09-03 23:27:03 +08:00
|
|
|
//===--- SemaCUDA.cpp - Semantic Analysis for CUDA constructs -------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
/// \file
|
|
|
|
/// \brief This file implements semantic analysis for CUDA constructs.
|
|
|
|
///
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "clang/AST/ASTContext.h"
|
|
|
|
#include "clang/AST/Decl.h"
|
2016-02-03 06:29:48 +08:00
|
|
|
#include "clang/AST/ExprCXX.h"
|
2014-12-04 05:53:36 +08:00
|
|
|
#include "clang/Lex/Preprocessor.h"
|
2016-03-31 07:30:21 +08:00
|
|
|
#include "clang/Sema/Lookup.h"
|
|
|
|
#include "clang/Sema/Sema.h"
|
2014-09-03 23:27:03 +08:00
|
|
|
#include "clang/Sema/SemaDiagnostic.h"
|
2016-10-14 02:45:08 +08:00
|
|
|
#include "clang/Sema/SemaInternal.h"
|
2016-03-31 07:30:21 +08:00
|
|
|
#include "clang/Sema/Template.h"
|
2014-09-30 04:38:29 +08:00
|
|
|
#include "llvm/ADT/Optional.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2014-09-03 23:27:03 +08:00
|
|
|
using namespace clang;
|
|
|
|
|
2016-10-09 06:15:58 +08:00
|
|
|
void Sema::PushForceCUDAHostDevice() {
|
|
|
|
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
|
|
|
|
ForceCUDAHostDeviceDepth++;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Sema::PopForceCUDAHostDevice() {
|
|
|
|
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
|
|
|
|
if (ForceCUDAHostDeviceDepth == 0)
|
|
|
|
return false;
|
|
|
|
ForceCUDAHostDeviceDepth--;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-09-03 23:27:03 +08:00
|
|
|
ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
|
|
|
|
MultiExprArg ExecConfig,
|
|
|
|
SourceLocation GGGLoc) {
|
|
|
|
FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl();
|
|
|
|
if (!ConfigDecl)
|
|
|
|
return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
|
|
|
|
<< "cudaConfigureCall");
|
|
|
|
QualType ConfigQTy = ConfigDecl->getType();
|
|
|
|
|
|
|
|
DeclRefExpr *ConfigDR = new (Context)
|
|
|
|
DeclRefExpr(ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
|
|
|
|
MarkFunctionReferenced(LLLLoc, ConfigDecl);
|
|
|
|
|
|
|
|
return ActOnCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr,
|
|
|
|
/*IsExecConfig=*/true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// IdentifyCUDATarget - Determine the CUDA compilation target for this function
|
|
|
|
Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D) {
|
2016-10-14 02:45:08 +08:00
|
|
|
// Code that lives outside a function is run on the host.
|
|
|
|
if (D == nullptr)
|
|
|
|
return CFT_Host;
|
|
|
|
|
2014-09-30 04:38:29 +08:00
|
|
|
if (D->hasAttr<CUDAInvalidTargetAttr>())
|
|
|
|
return CFT_InvalidTarget;
|
2014-09-03 23:27:03 +08:00
|
|
|
|
|
|
|
if (D->hasAttr<CUDAGlobalAttr>())
|
|
|
|
return CFT_Global;
|
|
|
|
|
|
|
|
if (D->hasAttr<CUDADeviceAttr>()) {
|
|
|
|
if (D->hasAttr<CUDAHostAttr>())
|
|
|
|
return CFT_HostDevice;
|
|
|
|
return CFT_Device;
|
2014-10-01 01:38:34 +08:00
|
|
|
} else if (D->hasAttr<CUDAHostAttr>()) {
|
|
|
|
return CFT_Host;
|
|
|
|
} else if (D->isImplicit()) {
|
|
|
|
// Some implicit declarations (like intrinsic functions) are not marked.
|
|
|
|
// Set the most lenient target on them for maximal flexibility.
|
|
|
|
return CFT_HostDevice;
|
2014-09-03 23:27:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return CFT_Host;
|
|
|
|
}
|
|
|
|
|
2015-09-23 01:22:59 +08:00
|
|
|
// * CUDA Call preference table
|
|
|
|
//
|
|
|
|
// F - from,
|
|
|
|
// T - to
|
|
|
|
// Ph - preference in host mode
|
|
|
|
// Pd - preference in device mode
|
|
|
|
// H - handled in (x)
|
2016-03-30 00:24:22 +08:00
|
|
|
// Preferences: N:native, SS:same side, HD:host-device, WS:wrong side, --:never.
|
2015-09-23 01:22:59 +08:00
|
|
|
//
|
2016-02-13 02:29:18 +08:00
|
|
|
// | F | T | Ph | Pd | H |
|
|
|
|
// |----+----+-----+-----+-----+
|
|
|
|
// | d | d | N | N | (c) |
|
|
|
|
// | d | g | -- | -- | (a) |
|
|
|
|
// | d | h | -- | -- | (e) |
|
|
|
|
// | d | hd | HD | HD | (b) |
|
|
|
|
// | g | d | N | N | (c) |
|
|
|
|
// | g | g | -- | -- | (a) |
|
|
|
|
// | g | h | -- | -- | (e) |
|
|
|
|
// | g | hd | HD | HD | (b) |
|
|
|
|
// | h | d | -- | -- | (e) |
|
|
|
|
// | h | g | N | N | (c) |
|
|
|
|
// | h | h | N | N | (c) |
|
|
|
|
// | h | hd | HD | HD | (b) |
|
|
|
|
// | hd | d | WS | SS | (d) |
|
|
|
|
// | hd | g | SS | -- |(d/a)|
|
|
|
|
// | hd | h | SS | WS | (d) |
|
|
|
|
// | hd | hd | HD | HD | (b) |
|
2015-09-23 01:22:59 +08:00
|
|
|
|
|
|
|
Sema::CUDAFunctionPreference
|
|
|
|
Sema::IdentifyCUDAPreference(const FunctionDecl *Caller,
|
|
|
|
const FunctionDecl *Callee) {
|
|
|
|
assert(Callee && "Callee must be valid.");
|
2016-10-14 02:45:08 +08:00
|
|
|
CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller);
|
2015-09-23 01:22:59 +08:00
|
|
|
CUDAFunctionTarget CalleeTarget = IdentifyCUDATarget(Callee);
|
|
|
|
|
|
|
|
// If one of the targets is invalid, the check always fails, no matter what
|
|
|
|
// the other target is.
|
|
|
|
if (CallerTarget == CFT_InvalidTarget || CalleeTarget == CFT_InvalidTarget)
|
|
|
|
return CFP_Never;
|
|
|
|
|
|
|
|
// (a) Can't call global from some contexts until we support CUDA's
|
|
|
|
// dynamic parallelism.
|
|
|
|
if (CalleeTarget == CFT_Global &&
|
2016-10-12 09:30:08 +08:00
|
|
|
(CallerTarget == CFT_Global || CallerTarget == CFT_Device))
|
2015-09-23 01:22:59 +08:00
|
|
|
return CFP_Never;
|
|
|
|
|
2016-02-13 02:29:18 +08:00
|
|
|
// (b) Calling HostDevice is OK for everyone.
|
|
|
|
if (CalleeTarget == CFT_HostDevice)
|
|
|
|
return CFP_HostDevice;
|
|
|
|
|
|
|
|
// (c) Best case scenarios
|
2015-09-23 01:22:59 +08:00
|
|
|
if (CalleeTarget == CallerTarget ||
|
|
|
|
(CallerTarget == CFT_Host && CalleeTarget == CFT_Global) ||
|
|
|
|
(CallerTarget == CFT_Global && CalleeTarget == CFT_Device))
|
2016-02-13 02:29:18 +08:00
|
|
|
return CFP_Native;
|
2015-09-23 01:22:59 +08:00
|
|
|
|
|
|
|
// (d) HostDevice behavior depends on compilation mode.
|
|
|
|
if (CallerTarget == CFT_HostDevice) {
|
2016-02-13 02:29:18 +08:00
|
|
|
// It's OK to call a compilation-mode matching function from an HD one.
|
|
|
|
if ((getLangOpts().CUDAIsDevice && CalleeTarget == CFT_Device) ||
|
|
|
|
(!getLangOpts().CUDAIsDevice &&
|
|
|
|
(CalleeTarget == CFT_Host || CalleeTarget == CFT_Global)))
|
|
|
|
return CFP_SameSide;
|
|
|
|
|
2016-03-30 00:24:16 +08:00
|
|
|
// Calls from HD to non-mode-matching functions (i.e., to host functions
|
|
|
|
// when compiling in device mode or to device functions when compiling in
|
|
|
|
// host mode) are allowed at the sema level, but eventually rejected if
|
|
|
|
// they're ever codegened. TODO: Reject said calls earlier.
|
|
|
|
return CFP_WrongSide;
|
2015-09-23 01:22:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// (e) Calling across device/host boundary is not something you should do.
|
|
|
|
if ((CallerTarget == CFT_Host && CalleeTarget == CFT_Device) ||
|
|
|
|
(CallerTarget == CFT_Device && CalleeTarget == CFT_Host) ||
|
|
|
|
(CallerTarget == CFT_Global && CalleeTarget == CFT_Host))
|
2016-02-13 02:29:18 +08:00
|
|
|
return CFP_Never;
|
2015-09-23 01:22:59 +08:00
|
|
|
|
|
|
|
llvm_unreachable("All cases should've been handled by now.");
|
|
|
|
}
|
|
|
|
|
2016-10-11 08:21:10 +08:00
|
|
|
void Sema::EraseUnwantedCUDAMatches(
|
|
|
|
const FunctionDecl *Caller,
|
|
|
|
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches) {
|
2015-09-23 01:22:59 +08:00
|
|
|
if (Matches.size() <= 1)
|
|
|
|
return;
|
|
|
|
|
2016-10-11 08:21:10 +08:00
|
|
|
using Pair = std::pair<DeclAccessPair, FunctionDecl*>;
|
|
|
|
|
2016-03-22 08:09:25 +08:00
|
|
|
// Gets the CUDA function preference for a call from Caller to Match.
|
2016-10-11 08:21:10 +08:00
|
|
|
auto GetCFP = [&](const Pair &Match) {
|
|
|
|
return IdentifyCUDAPreference(Caller, Match.second);
|
2016-03-22 08:09:25 +08:00
|
|
|
};
|
|
|
|
|
2015-09-23 01:22:59 +08:00
|
|
|
// Find the best call preference among the functions in Matches.
|
2016-10-11 08:21:10 +08:00
|
|
|
CUDAFunctionPreference BestCFP = GetCFP(*std::max_element(
|
2016-03-22 08:09:25 +08:00
|
|
|
Matches.begin(), Matches.end(),
|
2016-10-11 08:21:10 +08:00
|
|
|
[&](const Pair &M1, const Pair &M2) { return GetCFP(M1) < GetCFP(M2); }));
|
2015-09-23 01:22:59 +08:00
|
|
|
|
|
|
|
// Erase all functions with lower priority.
|
2016-07-13 07:23:13 +08:00
|
|
|
Matches.erase(
|
2016-10-11 08:21:10 +08:00
|
|
|
llvm::remove_if(
|
|
|
|
Matches, [&](const Pair &Match) { return GetCFP(Match) < BestCFP; }),
|
2016-07-13 07:23:13 +08:00
|
|
|
Matches.end());
|
2015-09-23 01:22:59 +08:00
|
|
|
}
|
|
|
|
|
2014-09-30 04:38:29 +08:00
|
|
|
/// When an implicitly-declared special member has to invoke more than one
|
|
|
|
/// base/field special member, conflicts may occur in the targets of these
|
|
|
|
/// members. For example, if one base's member __host__ and another's is
|
|
|
|
/// __device__, it's a conflict.
|
|
|
|
/// This function figures out if the given targets \param Target1 and
|
|
|
|
/// \param Target2 conflict, and if they do not it fills in
|
|
|
|
/// \param ResolvedTarget with a target that resolves for both calls.
|
|
|
|
/// \return true if there's a conflict, false otherwise.
|
|
|
|
static bool
|
|
|
|
resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1,
|
|
|
|
Sema::CUDAFunctionTarget Target2,
|
|
|
|
Sema::CUDAFunctionTarget *ResolvedTarget) {
|
2016-01-20 08:26:57 +08:00
|
|
|
// Only free functions and static member functions may be global.
|
|
|
|
assert(Target1 != Sema::CFT_Global);
|
|
|
|
assert(Target2 != Sema::CFT_Global);
|
2014-09-30 04:38:29 +08:00
|
|
|
|
|
|
|
if (Target1 == Sema::CFT_HostDevice) {
|
|
|
|
*ResolvedTarget = Target2;
|
|
|
|
} else if (Target2 == Sema::CFT_HostDevice) {
|
|
|
|
*ResolvedTarget = Target1;
|
|
|
|
} else if (Target1 != Target2) {
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
*ResolvedTarget = Target1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
|
|
|
|
CXXSpecialMember CSM,
|
|
|
|
CXXMethodDecl *MemberDecl,
|
|
|
|
bool ConstRHS,
|
|
|
|
bool Diagnose) {
|
|
|
|
llvm::Optional<CUDAFunctionTarget> InferredTarget;
|
|
|
|
|
|
|
|
// We're going to invoke special member lookup; mark that these special
|
|
|
|
// members are called from this one, and not from its caller.
|
|
|
|
ContextRAII MethodContext(*this, MemberDecl);
|
|
|
|
|
|
|
|
// Look for special members in base classes that should be invoked from here.
|
|
|
|
// Infer the target of this member base on the ones it should call.
|
|
|
|
// Skip direct and indirect virtual bases for abstract classes.
|
|
|
|
llvm::SmallVector<const CXXBaseSpecifier *, 16> Bases;
|
|
|
|
for (const auto &B : ClassDecl->bases()) {
|
|
|
|
if (!B.isVirtual()) {
|
|
|
|
Bases.push_back(&B);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ClassDecl->isAbstract()) {
|
|
|
|
for (const auto &VB : ClassDecl->vbases()) {
|
|
|
|
Bases.push_back(&VB);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (const auto *B : Bases) {
|
|
|
|
const RecordType *BaseType = B->getType()->getAs<RecordType>();
|
|
|
|
if (!BaseType) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
|
|
|
|
Sema::SpecialMemberOverloadResult *SMOR =
|
|
|
|
LookupSpecialMember(BaseClassDecl, CSM,
|
|
|
|
/* ConstArg */ ConstRHS,
|
|
|
|
/* VolatileArg */ false,
|
|
|
|
/* RValueThis */ false,
|
|
|
|
/* ConstThis */ false,
|
|
|
|
/* VolatileThis */ false);
|
|
|
|
|
|
|
|
if (!SMOR || !SMOR->getMethod()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
CUDAFunctionTarget BaseMethodTarget = IdentifyCUDATarget(SMOR->getMethod());
|
|
|
|
if (!InferredTarget.hasValue()) {
|
|
|
|
InferredTarget = BaseMethodTarget;
|
|
|
|
} else {
|
|
|
|
bool ResolutionError = resolveCalleeCUDATargetConflict(
|
|
|
|
InferredTarget.getValue(), BaseMethodTarget,
|
|
|
|
InferredTarget.getPointer());
|
|
|
|
if (ResolutionError) {
|
|
|
|
if (Diagnose) {
|
|
|
|
Diag(ClassDecl->getLocation(),
|
|
|
|
diag::note_implicit_member_target_infer_collision)
|
|
|
|
<< (unsigned)CSM << InferredTarget.getValue() << BaseMethodTarget;
|
|
|
|
}
|
|
|
|
MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Same as for bases, but now for special members of fields.
|
|
|
|
for (const auto *F : ClassDecl->fields()) {
|
|
|
|
if (F->isInvalidDecl()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
const RecordType *FieldType =
|
|
|
|
Context.getBaseElementType(F->getType())->getAs<RecordType>();
|
|
|
|
if (!FieldType) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(FieldType->getDecl());
|
|
|
|
Sema::SpecialMemberOverloadResult *SMOR =
|
|
|
|
LookupSpecialMember(FieldRecDecl, CSM,
|
|
|
|
/* ConstArg */ ConstRHS && !F->isMutable(),
|
|
|
|
/* VolatileArg */ false,
|
|
|
|
/* RValueThis */ false,
|
|
|
|
/* ConstThis */ false,
|
|
|
|
/* VolatileThis */ false);
|
|
|
|
|
|
|
|
if (!SMOR || !SMOR->getMethod()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
CUDAFunctionTarget FieldMethodTarget =
|
|
|
|
IdentifyCUDATarget(SMOR->getMethod());
|
|
|
|
if (!InferredTarget.hasValue()) {
|
|
|
|
InferredTarget = FieldMethodTarget;
|
|
|
|
} else {
|
|
|
|
bool ResolutionError = resolveCalleeCUDATargetConflict(
|
|
|
|
InferredTarget.getValue(), FieldMethodTarget,
|
|
|
|
InferredTarget.getPointer());
|
|
|
|
if (ResolutionError) {
|
|
|
|
if (Diagnose) {
|
|
|
|
Diag(ClassDecl->getLocation(),
|
|
|
|
diag::note_implicit_member_target_infer_collision)
|
|
|
|
<< (unsigned)CSM << InferredTarget.getValue()
|
|
|
|
<< FieldMethodTarget;
|
|
|
|
}
|
|
|
|
MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (InferredTarget.hasValue()) {
|
|
|
|
if (InferredTarget.getValue() == CFT_Device) {
|
|
|
|
MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
|
|
|
|
} else if (InferredTarget.getValue() == CFT_Host) {
|
|
|
|
MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
|
|
|
|
} else {
|
|
|
|
MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
|
|
|
|
MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If no target was inferred, mark this member as __host__ __device__;
|
|
|
|
// it's the least restrictive option that can be invoked from any target.
|
|
|
|
MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
|
|
|
|
MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2016-02-03 06:29:48 +08:00
|
|
|
|
|
|
|
bool Sema::isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD) {
|
|
|
|
if (!CD->isDefined() && CD->isTemplateInstantiation())
|
|
|
|
InstantiateFunctionDefinition(Loc, CD->getFirstDecl());
|
|
|
|
|
|
|
|
// (E.2.3.1, CUDA 7.5) A constructor for a class type is considered
|
|
|
|
// empty at a point in the translation unit, if it is either a
|
|
|
|
// trivial constructor
|
|
|
|
if (CD->isTrivial())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// ... or it satisfies all of the following conditions:
|
|
|
|
// The constructor function has been defined.
|
|
|
|
// The constructor function has no parameters,
|
|
|
|
// and the function body is an empty compound statement.
|
|
|
|
if (!(CD->hasTrivialBody() && CD->getNumParams() == 0))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Its class has no virtual functions and no virtual base classes.
|
|
|
|
if (CD->getParent()->isDynamicClass())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The only form of initializer allowed is an empty constructor.
|
2016-05-20 04:13:53 +08:00
|
|
|
// This will recursively check all base classes and member initializers
|
2016-02-03 06:29:48 +08:00
|
|
|
if (!llvm::all_of(CD->inits(), [&](const CXXCtorInitializer *CI) {
|
|
|
|
if (const CXXConstructExpr *CE =
|
|
|
|
dyn_cast<CXXConstructExpr>(CI->getInit()))
|
|
|
|
return isEmptyCudaConstructor(Loc, CE->getConstructor());
|
|
|
|
return false;
|
|
|
|
}))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2016-03-31 07:30:21 +08:00
|
|
|
|
2016-05-20 04:13:53 +08:00
|
|
|
bool Sema::isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *DD) {
|
|
|
|
// No destructor -> no problem.
|
|
|
|
if (!DD)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!DD->isDefined() && DD->isTemplateInstantiation())
|
|
|
|
InstantiateFunctionDefinition(Loc, DD->getFirstDecl());
|
|
|
|
|
|
|
|
// (E.2.3.1, CUDA 7.5) A destructor for a class type is considered
|
|
|
|
// empty at a point in the translation unit, if it is either a
|
|
|
|
// trivial constructor
|
|
|
|
if (DD->isTrivial())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// ... or it satisfies all of the following conditions:
|
|
|
|
// The destructor function has been defined.
|
|
|
|
// and the function body is an empty compound statement.
|
|
|
|
if (!DD->hasTrivialBody())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const CXXRecordDecl *ClassDecl = DD->getParent();
|
|
|
|
|
|
|
|
// Its class has no virtual functions and no virtual base classes.
|
|
|
|
if (ClassDecl->isDynamicClass())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Only empty destructors are allowed. This will recursively check
|
|
|
|
// destructors for all base classes...
|
|
|
|
if (!llvm::all_of(ClassDecl->bases(), [&](const CXXBaseSpecifier &BS) {
|
|
|
|
if (CXXRecordDecl *RD = BS.getType()->getAsCXXRecordDecl())
|
|
|
|
return isEmptyCudaDestructor(Loc, RD->getDestructor());
|
|
|
|
return true;
|
|
|
|
}))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// ... and member fields.
|
|
|
|
if (!llvm::all_of(ClassDecl->fields(), [&](const FieldDecl *Field) {
|
|
|
|
if (CXXRecordDecl *RD = Field->getType()
|
|
|
|
->getBaseElementTypeUnsafe()
|
|
|
|
->getAsCXXRecordDecl())
|
|
|
|
return isEmptyCudaDestructor(Loc, RD->getDestructor());
|
|
|
|
return true;
|
|
|
|
}))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-03-31 07:30:21 +08:00
|
|
|
// With -fcuda-host-device-constexpr, an unattributed constexpr function is
|
|
|
|
// treated as implicitly __host__ __device__, unless:
|
|
|
|
// * it is a variadic function (device-side variadic functions are not
|
|
|
|
// allowed), or
|
|
|
|
// * a __device__ function with this signature was already declared, in which
|
|
|
|
// case in which case we output an error, unless the __device__ decl is in a
|
|
|
|
// system header, in which case we leave the constexpr function unattributed.
|
2016-10-09 06:15:58 +08:00
|
|
|
//
|
|
|
|
// In addition, all function decls are treated as __host__ __device__ when
|
|
|
|
// ForceCUDAHostDeviceDepth > 0 (corresponding to code within a
|
|
|
|
// #pragma clang force_cuda_host_device_begin/end
|
|
|
|
// pair).
|
2016-03-31 07:30:21 +08:00
|
|
|
void Sema::maybeAddCUDAHostDeviceAttrs(Scope *S, FunctionDecl *NewD,
|
|
|
|
const LookupResult &Previous) {
|
2016-10-01 07:57:38 +08:00
|
|
|
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
|
2016-10-09 06:15:58 +08:00
|
|
|
|
|
|
|
if (ForceCUDAHostDeviceDepth > 0) {
|
|
|
|
if (!NewD->hasAttr<CUDAHostAttr>())
|
|
|
|
NewD->addAttr(CUDAHostAttr::CreateImplicit(Context));
|
|
|
|
if (!NewD->hasAttr<CUDADeviceAttr>())
|
|
|
|
NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-03-31 07:30:21 +08:00
|
|
|
if (!getLangOpts().CUDAHostDeviceConstexpr || !NewD->isConstexpr() ||
|
|
|
|
NewD->isVariadic() || NewD->hasAttr<CUDAHostAttr>() ||
|
|
|
|
NewD->hasAttr<CUDADeviceAttr>() || NewD->hasAttr<CUDAGlobalAttr>())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Is D a __device__ function with the same signature as NewD, ignoring CUDA
|
|
|
|
// attributes?
|
|
|
|
auto IsMatchingDeviceFn = [&](NamedDecl *D) {
|
|
|
|
if (UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(D))
|
|
|
|
D = Using->getTargetDecl();
|
|
|
|
FunctionDecl *OldD = D->getAsFunction();
|
|
|
|
return OldD && OldD->hasAttr<CUDADeviceAttr>() &&
|
|
|
|
!OldD->hasAttr<CUDAHostAttr>() &&
|
|
|
|
!IsOverload(NewD, OldD, /* UseMemberUsingDeclRules = */ false,
|
|
|
|
/* ConsiderCudaAttrs = */ false);
|
|
|
|
};
|
|
|
|
auto It = llvm::find_if(Previous, IsMatchingDeviceFn);
|
|
|
|
if (It != Previous.end()) {
|
|
|
|
// We found a __device__ function with the same name and signature as NewD
|
|
|
|
// (ignoring CUDA attrs). This is an error unless that function is defined
|
|
|
|
// in a system header, in which case we simply return without making NewD
|
|
|
|
// host+device.
|
|
|
|
NamedDecl *Match = *It;
|
|
|
|
if (!getSourceManager().isInSystemHeader(Match->getLocation())) {
|
|
|
|
Diag(NewD->getLocation(),
|
|
|
|
diag::err_cuda_unattributed_constexpr_cannot_overload_device)
|
|
|
|
<< NewD->getName();
|
|
|
|
Diag(Match->getLocation(),
|
|
|
|
diag::note_cuda_conflicting_device_function_declared_here);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
NewD->addAttr(CUDAHostAttr::CreateImplicit(Context));
|
|
|
|
NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
|
|
|
|
}
|
2016-08-16 07:00:49 +08:00
|
|
|
|
2016-10-14 02:45:08 +08:00
|
|
|
Sema::CUDADiagBuilder::CUDADiagBuilder(Kind K, SourceLocation Loc,
|
|
|
|
unsigned DiagID, FunctionDecl *Fn,
|
|
|
|
Sema &S) {
|
|
|
|
switch (K) {
|
|
|
|
case K_Nop:
|
|
|
|
break;
|
|
|
|
case K_Immediate:
|
|
|
|
ImmediateDiagBuilder.emplace(S.Diag(Loc, DiagID));
|
|
|
|
break;
|
|
|
|
case K_Deferred:
|
|
|
|
assert(Fn && "Must have a function to attach the deferred diag to.");
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
PartialDiagInfo.emplace(S, Loc, S.PDiag(DiagID), Fn);
|
2016-10-14 02:45:08 +08:00
|
|
|
break;
|
2016-08-16 07:00:49 +08:00
|
|
|
}
|
2016-10-14 02:45:08 +08:00
|
|
|
}
|
2016-10-08 09:07:11 +08:00
|
|
|
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
// In CUDA, there are some constructs which may appear in semantically-valid
|
|
|
|
// code, but trigger errors if we ever generate code for the function in which
|
|
|
|
// they appear. Essentially every construct you're not allowed to use on the
|
|
|
|
// device falls into this category, because you are allowed to use these
|
|
|
|
// constructs in a __host__ __device__ function, but only if that function is
|
|
|
|
// never codegen'ed on the device.
|
|
|
|
//
|
|
|
|
// To handle semantic checking for these constructs, we keep track of the set of
|
|
|
|
// functions we know will be emitted, either because we could tell a priori that
|
|
|
|
// they would be emitted, or because they were transitively called by a
|
|
|
|
// known-emitted function.
|
|
|
|
//
|
|
|
|
// We also keep a partial call graph of which not-known-emitted functions call
|
|
|
|
// which other not-known-emitted functions.
|
|
|
|
//
|
|
|
|
// When we see something which is illegal if the current function is emitted
|
|
|
|
// (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or
|
|
|
|
// CheckCUDACall), we first check if the current function is known-emitted. If
|
|
|
|
// so, we immediately output the diagnostic.
|
|
|
|
//
|
|
|
|
// Otherwise, we "defer" the diagnostic. It sits in Sema::CUDADeferredDiags
|
|
|
|
// until we discover that the function is known-emitted, at which point we take
|
|
|
|
// it out of this map and emit the diagnostic.
|
|
|
|
|
|
|
|
// Do we know that we will eventually codegen the given function?
|
|
|
|
static bool IsKnownEmitted(Sema &S, FunctionDecl *FD) {
|
|
|
|
// Templates are emitted when they're instantiated.
|
|
|
|
if (FD->isDependentContext())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// When compiling for device, host functions are never emitted. Similarly,
|
|
|
|
// when compiling for host, device and global functions are never emitted.
|
|
|
|
// (Technically, we do emit a host-side stub for global functions, but this
|
|
|
|
// doesn't count for our purposes here.)
|
|
|
|
Sema::CUDAFunctionTarget T = S.IdentifyCUDATarget(FD);
|
|
|
|
if (S.getLangOpts().CUDAIsDevice && T == Sema::CFT_Host)
|
|
|
|
return false;
|
|
|
|
if (!S.getLangOpts().CUDAIsDevice &&
|
|
|
|
(T == Sema::CFT_Device || T == Sema::CFT_Global))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Externally-visible and similar functions are always emitted.
|
2016-10-14 04:52:17 +08:00
|
|
|
if (!isDiscardableGVALinkage(S.getASTContext().GetGVALinkageForFunction(FD)))
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// Otherwise, the function is known-emitted if it's in our set of
|
|
|
|
// known-emitted functions.
|
|
|
|
return S.CUDAKnownEmittedFns.count(FD) > 0;
|
|
|
|
}
|
|
|
|
|
2016-10-14 02:45:08 +08:00
|
|
|
Sema::CUDADiagBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
|
|
|
|
unsigned DiagID) {
|
|
|
|
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
CUDADiagBuilder::Kind DiagKind = [&] {
|
|
|
|
switch (CurrentCUDATarget()) {
|
|
|
|
case CFT_Global:
|
|
|
|
case CFT_Device:
|
|
|
|
return CUDADiagBuilder::K_Immediate;
|
|
|
|
case CFT_HostDevice:
|
|
|
|
// An HD function counts as host code if we're compiling for host, and
|
|
|
|
// device code if we're compiling for device. Defer any errors in device
|
|
|
|
// mode until the function is known-emitted.
|
|
|
|
if (getLangOpts().CUDAIsDevice) {
|
|
|
|
return IsKnownEmitted(*this, dyn_cast<FunctionDecl>(CurContext))
|
|
|
|
? CUDADiagBuilder::K_Immediate
|
|
|
|
: CUDADiagBuilder::K_Deferred;
|
|
|
|
}
|
|
|
|
return CUDADiagBuilder::K_Nop;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return CUDADiagBuilder::K_Nop;
|
|
|
|
}
|
|
|
|
}();
|
2016-10-14 02:45:08 +08:00
|
|
|
return CUDADiagBuilder(DiagKind, Loc, DiagID,
|
|
|
|
dyn_cast<FunctionDecl>(CurContext), *this);
|
2016-08-16 07:00:49 +08:00
|
|
|
}
|
2016-09-29 06:45:54 +08:00
|
|
|
|
2016-10-14 02:45:08 +08:00
|
|
|
Sema::CUDADiagBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc,
|
|
|
|
unsigned DiagID) {
|
2016-09-29 06:45:54 +08:00
|
|
|
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
CUDADiagBuilder::Kind DiagKind = [&] {
|
|
|
|
switch (CurrentCUDATarget()) {
|
|
|
|
case CFT_Host:
|
|
|
|
return CUDADiagBuilder::K_Immediate;
|
|
|
|
case CFT_HostDevice:
|
|
|
|
// An HD function counts as host code if we're compiling for host, and
|
|
|
|
// device code if we're compiling for device. Defer any errors in device
|
|
|
|
// mode until the function is known-emitted.
|
|
|
|
if (getLangOpts().CUDAIsDevice)
|
|
|
|
return CUDADiagBuilder::K_Nop;
|
|
|
|
|
|
|
|
return IsKnownEmitted(*this, dyn_cast<FunctionDecl>(CurContext))
|
|
|
|
? CUDADiagBuilder::K_Immediate
|
|
|
|
: CUDADiagBuilder::K_Deferred;
|
|
|
|
default:
|
|
|
|
return CUDADiagBuilder::K_Nop;
|
|
|
|
}
|
|
|
|
}();
|
2016-10-14 02:45:08 +08:00
|
|
|
return CUDADiagBuilder(DiagKind, Loc, DiagID,
|
|
|
|
dyn_cast<FunctionDecl>(CurContext), *this);
|
2016-09-29 06:45:54 +08:00
|
|
|
}
|
2016-09-29 06:45:58 +08:00
|
|
|
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
// Emit any deferred diagnostics for FD and erase them from the map in which
|
|
|
|
// they're stored.
|
|
|
|
static void EmitDeferredDiags(Sema &S, FunctionDecl *FD) {
|
|
|
|
auto It = S.CUDADeferredDiags.find(FD);
|
|
|
|
if (It == S.CUDADeferredDiags.end())
|
|
|
|
return;
|
|
|
|
for (PartialDiagnosticAt &PDAt : It->second) {
|
|
|
|
const SourceLocation &Loc = PDAt.first;
|
|
|
|
const PartialDiagnostic &PD = PDAt.second;
|
|
|
|
DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID()));
|
|
|
|
Builder.setForceEmit();
|
|
|
|
PD.Emit(Builder);
|
|
|
|
}
|
|
|
|
S.CUDADeferredDiags.erase(It);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Indicate that this function (and thus everything it transtively calls) will
|
|
|
|
// be codegen'ed, and emit any deferred diagnostics on this function and its
|
|
|
|
// (transitive) callees.
|
|
|
|
static void MarkKnownEmitted(Sema &S, FunctionDecl *FD) {
|
|
|
|
// Nothing to do if we already know that FD is emitted.
|
|
|
|
if (IsKnownEmitted(S, FD)) {
|
|
|
|
assert(!S.CUDACallGraph.count(FD));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We've just discovered that FD is known-emitted. Walk our call graph to see
|
|
|
|
// what else we can now discover also must be emitted.
|
|
|
|
llvm::SmallVector<FunctionDecl *, 4> Worklist = {FD};
|
|
|
|
llvm::SmallSet<FunctionDecl *, 4> Seen;
|
|
|
|
Seen.insert(FD);
|
|
|
|
while (!Worklist.empty()) {
|
|
|
|
FunctionDecl *Caller = Worklist.pop_back_val();
|
|
|
|
assert(!IsKnownEmitted(S, Caller) &&
|
|
|
|
"Worklist should not contain known-emitted functions.");
|
|
|
|
S.CUDAKnownEmittedFns.insert(Caller);
|
|
|
|
EmitDeferredDiags(S, Caller);
|
|
|
|
|
|
|
|
// Deferred diags are often emitted on the template itself, so emit those as
|
|
|
|
// well.
|
|
|
|
if (auto *Templ = Caller->getPrimaryTemplate())
|
|
|
|
EmitDeferredDiags(S, Templ->getAsFunction());
|
|
|
|
|
|
|
|
// Add all functions called by Caller to our worklist.
|
|
|
|
auto CGIt = S.CUDACallGraph.find(Caller);
|
|
|
|
if (CGIt == S.CUDACallGraph.end())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (FunctionDecl *Callee : CGIt->second) {
|
|
|
|
if (Seen.count(Callee) || IsKnownEmitted(S, Callee))
|
|
|
|
continue;
|
|
|
|
Seen.insert(Callee);
|
|
|
|
Worklist.push_back(Callee);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Caller is now known-emitted, so we no longer need to maintain its list of
|
|
|
|
// callees in CUDACallGraph.
|
|
|
|
S.CUDACallGraph.erase(CGIt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-14 02:45:08 +08:00
|
|
|
bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
|
2016-09-29 06:45:58 +08:00
|
|
|
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
|
2016-10-14 02:45:08 +08:00
|
|
|
assert(Callee && "Callee may not be null.");
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
// FIXME: Is bailing out early correct here? Should we instead assume that
|
|
|
|
// the caller is a global initializer?
|
2016-10-14 02:45:08 +08:00
|
|
|
FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
|
|
|
|
if (!Caller)
|
2016-09-29 06:45:58 +08:00
|
|
|
return true;
|
2016-10-14 02:45:08 +08:00
|
|
|
|
[CUDA] Emit deferred diagnostics during Sema rather than during codegen.
Summary:
Emitting deferred diagnostics during codegen was a hack. It did work,
but usability was poor, both for us as compiler devs and for users. We
don't codegen if there are any sema errors, so for users this meant that
they wouldn't see deferred errors if there were any non-deferred errors.
For devs, this meant that we had to carefully split up our tests so that
when we tested deferred errors, we didn't emit any non-deferred errors.
This change moves checking for deferred errors into Sema. See the big
comment in SemaCUDA.cpp for an overview of the idea.
This checking adds overhead to compilation, because we have to maintain
a partial call graph. As a result, this change makes deferred errors a
CUDA-only concept (whereas before they were a general concept). If
anyone else wants to use this framework for something other than CUDA,
we can generalize at that time.
This patch makes the minimal set of test changes -- after this lands,
I'll go back through and do a cleanup of the tests that we no longer
have to split up.
Reviewers: rnk
Subscribers: cfe-commits, rsmith, tra
Differential Revision: https://reviews.llvm.org/D25541
llvm-svn: 284158
2016-10-14 04:52:12 +08:00
|
|
|
bool CallerKnownEmitted = IsKnownEmitted(*this, Caller);
|
|
|
|
if (CallerKnownEmitted)
|
|
|
|
MarkKnownEmitted(*this, Callee);
|
|
|
|
else
|
|
|
|
CUDACallGraph[Caller].insert(Callee);
|
|
|
|
|
|
|
|
CUDADiagBuilder::Kind DiagKind = [&] {
|
|
|
|
switch (IdentifyCUDAPreference(Caller, Callee)) {
|
|
|
|
case CFP_Never:
|
|
|
|
return CUDADiagBuilder::K_Immediate;
|
|
|
|
case CFP_WrongSide:
|
|
|
|
assert(Caller && "WrongSide calls require a non-null caller");
|
|
|
|
// If we know the caller will be emitted, we know this wrong-side call
|
|
|
|
// will be emitted, so it's an immediate error. Otherwise, defer the
|
|
|
|
// error until we know the caller is emitted.
|
|
|
|
return CallerKnownEmitted ? CUDADiagBuilder::K_Immediate
|
|
|
|
: CUDADiagBuilder::K_Deferred;
|
|
|
|
default:
|
|
|
|
return CUDADiagBuilder::K_Nop;
|
|
|
|
}
|
|
|
|
}();
|
2016-10-14 02:45:08 +08:00
|
|
|
|
|
|
|
// Avoid emitting this error twice for the same location. Using a hashtable
|
|
|
|
// like this is unfortunate, but because we must continue parsing as normal
|
|
|
|
// after encountering a deferred error, it's otherwise very tricky for us to
|
|
|
|
// ensure that we only emit this deferred error once.
|
|
|
|
if (!LocsWithCUDACallDiags.insert(Loc.getRawEncoding()).second)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
bool IsImmediateErr =
|
|
|
|
CUDADiagBuilder(DiagKind, Loc, diag::err_ref_bad_target, Caller, *this)
|
|
|
|
<< IdentifyCUDATarget(Callee) << Callee << IdentifyCUDATarget(Caller);
|
|
|
|
CUDADiagBuilder(DiagKind, Callee->getLocation(), diag::note_previous_decl,
|
|
|
|
Caller, *this)
|
|
|
|
<< Callee;
|
|
|
|
return !IsImmediateErr;
|
2016-09-29 06:45:58 +08:00
|
|
|
}
|
2016-10-01 01:14:53 +08:00
|
|
|
|
|
|
|
void Sema::CUDASetLambdaAttrs(CXXMethodDecl *Method) {
|
2016-10-01 07:57:38 +08:00
|
|
|
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
|
2016-10-01 01:14:53 +08:00
|
|
|
if (Method->hasAttr<CUDAHostAttr>() || Method->hasAttr<CUDADeviceAttr>())
|
|
|
|
return;
|
|
|
|
FunctionDecl *CurFn = dyn_cast<FunctionDecl>(CurContext);
|
|
|
|
if (!CurFn)
|
|
|
|
return;
|
|
|
|
CUDAFunctionTarget Target = IdentifyCUDATarget(CurFn);
|
|
|
|
if (Target == CFT_Global || Target == CFT_Device) {
|
|
|
|
Method->addAttr(CUDADeviceAttr::CreateImplicit(Context));
|
|
|
|
} else if (Target == CFT_HostDevice) {
|
|
|
|
Method->addAttr(CUDADeviceAttr::CreateImplicit(Context));
|
|
|
|
Method->addAttr(CUDAHostAttr::CreateImplicit(Context));
|
|
|
|
}
|
|
|
|
}
|