llvm-project/clang/lib/Sema/Sema.cpp

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

2568 lines
96 KiB
C++
Raw Normal View History

//===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the actions class which performs semantic analysis and
// builds an AST out of a parse stream.
//
//===----------------------------------------------------------------------===//
#include "UsedDeclVisitor.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/PartialDiagnostic.h"
Avoid SourceManager.h include in RawCommentList.h, add missing incs SourceManager.h includes FileManager.h, which is expensive due to dependencies on LLVM FS headers. Remove dead BeforeThanCompare specialization. Sink ASTContext::addComment to cpp file. This reduces the time to compile a file that does nothing but include ASTContext.h from ~3.4s to ~2.8s for me. Saves these includes: 219 - ../clang/include/clang/Basic/SourceManager.h 204 - ../clang/include/clang/Basic/FileSystemOptions.h 204 - ../clang/include/clang/Basic/FileManager.h 165 - ../llvm/include/llvm/Support/VirtualFileSystem.h 164 - ../llvm/include/llvm/Support/SourceMgr.h 164 - ../llvm/include/llvm/Support/SMLoc.h 161 - ../llvm/include/llvm/Support/Path.h 141 - ../llvm/include/llvm/ADT/BitVector.h 128 - ../llvm/include/llvm/Support/MemoryBuffer.h 124 - ../llvm/include/llvm/Support/FileSystem.h 124 - ../llvm/include/llvm/Support/Chrono.h 124 - .../MSVCSTL/include/stack 122 - ../llvm/include/llvm-c/Types.h 122 - ../llvm/include/llvm/Support/NativeFormatting.h 122 - ../llvm/include/llvm/Support/FormatProviders.h 122 - ../llvm/include/llvm/Support/CBindingWrapping.h 122 - .../MSVCSTL/include/xtimec.h 122 - .../MSVCSTL/include/ratio 122 - .../MSVCSTL/include/chrono 121 - ../llvm/include/llvm/Support/FormatVariadicDetails.h 118 - ../llvm/include/llvm/Support/MD5.h 109 - .../MSVCSTL/include/deque 105 - ../llvm/include/llvm/Support/Host.h 105 - ../llvm/include/llvm/Support/Endian.h Reviewed By: aaron.ballman, hans Differential Revision: https://reviews.llvm.org/D75279
2020-02-28 03:01:58 +08:00
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Stack.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/CXXFieldCollector.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/MultiplexExternalSemaSource.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaConsumer.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/TemplateInstCallback.h"
#include "clang/Sema/TypoCorrection.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/TimeProfiler.h"
using namespace clang;
using namespace sema;
SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) {
return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts);
}
ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); }
IdentifierInfo *
Sema::InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned int Index) {
std::string InventedName;
llvm::raw_string_ostream OS(InventedName);
if (!ParamName)
OS << "auto:" << Index + 1;
else
OS << ParamName->getName() << ":auto";
OS.flush();
return &Context.Idents.get(OS.str());
}
PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
const Preprocessor &PP) {
PrintingPolicy Policy = Context.getPrintingPolicy();
// In diagnostics, we print _Bool as bool if the latter is defined as the
// former.
Policy.Bool = Context.getLangOpts().Bool;
if (!Policy.Bool) {
if (const MacroInfo *BoolMacro = PP.getMacroInfo(Context.getBoolName())) {
Policy.Bool = BoolMacro->isObjectLike() &&
BoolMacro->getNumTokens() == 1 &&
BoolMacro->getReplacementToken(0).is(tok::kw__Bool);
}
}
return Policy;
}
void Sema::ActOnTranslationUnitScope(Scope *S) {
TUScope = S;
PushDeclContext(S, Context.getTranslationUnitDecl());
}
namespace clang {
namespace sema {
class SemaPPCallbacks : public PPCallbacks {
Sema *S = nullptr;
llvm::SmallVector<SourceLocation, 8> IncludeStack;
public:
void set(Sema &S) { this->S = &S; }
void reset() { S = nullptr; }
virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
FileID PrevFID) override {
if (!S)
return;
switch (Reason) {
case EnterFile: {
SourceManager &SM = S->getSourceManager();
SourceLocation IncludeLoc = SM.getIncludeLoc(SM.getFileID(Loc));
if (IncludeLoc.isValid()) {
if (llvm::timeTraceProfilerEnabled()) {
const FileEntry *FE = SM.getFileEntryForID(SM.getFileID(Loc));
llvm::timeTraceProfilerBegin(
"Source", FE != nullptr ? FE->getName() : StringRef("<unknown>"));
}
IncludeStack.push_back(IncludeLoc);
S->DiagnoseNonDefaultPragmaAlignPack(
Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude,
IncludeLoc);
}
break;
}
case ExitFile:
if (!IncludeStack.empty()) {
if (llvm::timeTraceProfilerEnabled())
llvm::timeTraceProfilerEnd();
S->DiagnoseNonDefaultPragmaAlignPack(
Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit,
IncludeStack.pop_back_val());
}
break;
default:
break;
}
}
};
} // end namespace sema
} // end namespace clang
const unsigned Sema::MaxAlignmentExponent;
const unsigned Sema::MaximumAlignment;
Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter)
: ExternalSource(nullptr), isMultiplexExternalSource(false),
CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp),
Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()),
SourceMgr(PP.getSourceManager()), CollectStats(false),
CodeCompleter(CodeCompleter), CurContext(nullptr),
OriginalLexicalContext(nullptr), MSStructPragmaOn(false),
MSPointerToMemberRepresentationMethod(
LangOpts.getMSPointerToMemberRepresentationMethod()),
VtorDispStack(LangOpts.getVtorDispMode()),
AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)),
DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
CodeSegStack(nullptr), FpPragmaStack(FPOptionsOverride()),
CurInitSeg(nullptr), VisContext(nullptr),
PragmaAttributeCurrentTargetDecl(nullptr),
IsBuildingRecoveryCallExpr(false), Cleanup{}, LateTemplateParser(nullptr),
LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp),
StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr),
StdCoroutineTraitsCache(nullptr), CXXTypeInfoDecl(nullptr),
MSVCGuidDecl(nullptr), NSNumberDecl(nullptr), NSValueDecl(nullptr),
NSStringDecl(nullptr), StringWithUTF8StringMethod(nullptr),
ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr),
ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr),
DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false),
TUKind(TUKind), NumSFINAEErrors(0),
FullyCheckedComparisonCategories(
static_cast<unsigned>(ComparisonCategoryType::Last) + 1),
SatisfactionCache(Context), AccessCheckingSFINAE(false),
InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0),
ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr),
DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this),
ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr),
CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) {
TUScope = nullptr;
[clang] perform semantic checking in constant context Summary: Since the addition of __builtin_is_constant_evaluated the result of an expression can change based on whether it is evaluated in constant context. a lot of semantic checking performs evaluations with out specifying context. which can lead to wrong diagnostics. for example: ``` constexpr int i0 = (long long)__builtin_is_constant_evaluated() * (1ll << 33); //#1 constexpr int i1 = (long long)!__builtin_is_constant_evaluated() * (1ll << 33); //#2 ``` before the patch, #2 was diagnosed incorrectly and #1 wasn't diagnosed. after the patch #1 is diagnosed as it should and #2 isn't. Changes: - add a flag to Sema to passe in constant context mode. - in SemaChecking.cpp calls to Expr::Evaluate* are now done in constant context when they should. - in SemaChecking.cpp diagnostics for UB are not checked for in constant context because an error will be emitted by the constant evaluator. - in SemaChecking.cpp diagnostics for construct that cannot appear in constant context are not checked for in constant context. - in SemaChecking.cpp diagnostics on constant expression are always emitted because constant expression are always evaluated. - semantic checking for initialization of constexpr variables is now done in constant context. - adapt test that were depending on warning changes. - add test. Reviewers: rsmith Reviewed By: rsmith Subscribers: cfe-commits Tags: #clang Differential Revision: https://reviews.llvm.org/D62009 llvm-svn: 363488
2019-06-15 16:32:56 +08:00
isConstantEvaluatedOverride = false;
LoadedExternalKnownNamespaces = false;
for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
NSNumberLiteralMethods[I] = nullptr;
if (getLangOpts().ObjC)
NSAPIObj.reset(new NSAPI(Context));
if (getLangOpts().CPlusPlus)
FieldCollector.reset(new CXXFieldCollector());
// Tell diagnostics how to render things from the AST library.
Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context);
ExprEvalContexts.emplace_back(
ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{},
nullptr, ExpressionEvaluationContextRecord::EK_Other);
// Initialization of data sharing attributes stack for OpenMP
InitDataSharingAttributesStack();
std::unique_ptr<sema::SemaPPCallbacks> Callbacks =
std::make_unique<sema::SemaPPCallbacks>();
SemaPPCallbackHandler = Callbacks.get();
PP.addPPCallbacks(std::move(Callbacks));
SemaPPCallbackHandler->set(*this);
}
// Anchor Sema's type info to this TU.
void Sema::anchor() {}
void Sema::addImplicitTypedef(StringRef Name, QualType T) {
DeclarationName DN = &Context.Idents.get(Name);
if (IdResolver.begin(DN) == IdResolver.end())
PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope);
}
void Sema::Initialize() {
if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
SC->InitializeSema(*this);
// Tell the external Sema source about this Sema object.
if (ExternalSemaSource *ExternalSema
= dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
ExternalSema->InitializeSema(*this);
// This needs to happen after ExternalSemaSource::InitializeSema(this) or we
// will not be able to merge any duplicate __va_list_tag decls correctly.
VAListTagName = PP.getIdentifierInfo("__va_list_tag");
if (!TUScope)
return;
// Initialize predefined 128-bit integer types, if needed.
if (Context.getTargetInfo().hasInt128Type() ||
(Context.getAuxTargetInfo() &&
Context.getAuxTargetInfo()->hasInt128Type())) {
// If either of the 128-bit integer types are unavailable to name lookup,
// define them now.
DeclarationName Int128 = &Context.Idents.get("__int128_t");
Make the loading of information attached to an IdentifierInfo from an AST file more lazy, so that we don't eagerly load that information for all known identifiers each time a new AST file is loaded. The eager reloading made some sense in the context of precompiled headers, since very few identifiers were defined before PCH load time. With modules, however, a huge amount of code can get parsed before we see an @import, so laziness becomes important here. The approach taken to make this information lazy is fairly simple: when we load a new AST file, we mark all of the existing identifiers as being out-of-date. Whenever we want to access information that may come from an AST (e.g., whether the identifier has a macro definition, or what top-level declarations have that name), we check the out-of-date bit and, if it's set, ask the AST reader to update the IdentifierInfo from the AST files. The update is a merge, and we now take care to merge declarations before/after imports with declarations from multiple imports. The results of this optimization are fairly dramatic. On a small application that brings in 14 non-trivial modules, this takes modules from being > 3x slower than a "perfect" PCH file down to 30% slower for a full rebuild. A partial rebuild (where the PCH file or modules can be re-used) is down to 7% slower. Making the PCH file just a little imperfect (e.g., adding two smallish modules used by a bunch of .m files that aren't in the PCH file) tips the scales in favor of the modules approach, with 24% faster partial rebuilds. This is just a first step; the lazy scheme could possibly be improved by adding versioning, so we don't search into modules we already searched. Moreover, we'll need similar lazy schemes for all of the other lookup data structures, such as DeclContexts. llvm-svn: 143100
2011-10-27 17:33:13 +08:00
if (IdResolver.begin(Int128) == IdResolver.end())
PushOnScopeChains(Context.getInt128Decl(), TUScope);
DeclarationName UInt128 = &Context.Idents.get("__uint128_t");
Make the loading of information attached to an IdentifierInfo from an AST file more lazy, so that we don't eagerly load that information for all known identifiers each time a new AST file is loaded. The eager reloading made some sense in the context of precompiled headers, since very few identifiers were defined before PCH load time. With modules, however, a huge amount of code can get parsed before we see an @import, so laziness becomes important here. The approach taken to make this information lazy is fairly simple: when we load a new AST file, we mark all of the existing identifiers as being out-of-date. Whenever we want to access information that may come from an AST (e.g., whether the identifier has a macro definition, or what top-level declarations have that name), we check the out-of-date bit and, if it's set, ask the AST reader to update the IdentifierInfo from the AST files. The update is a merge, and we now take care to merge declarations before/after imports with declarations from multiple imports. The results of this optimization are fairly dramatic. On a small application that brings in 14 non-trivial modules, this takes modules from being > 3x slower than a "perfect" PCH file down to 30% slower for a full rebuild. A partial rebuild (where the PCH file or modules can be re-used) is down to 7% slower. Making the PCH file just a little imperfect (e.g., adding two smallish modules used by a bunch of .m files that aren't in the PCH file) tips the scales in favor of the modules approach, with 24% faster partial rebuilds. This is just a first step; the lazy scheme could possibly be improved by adding versioning, so we don't search into modules we already searched. Moreover, we'll need similar lazy schemes for all of the other lookup data structures, such as DeclContexts. llvm-svn: 143100
2011-10-27 17:33:13 +08:00
if (IdResolver.begin(UInt128) == IdResolver.end())
PushOnScopeChains(Context.getUInt128Decl(), TUScope);
}
// Initialize predefined Objective-C types:
if (getLangOpts().ObjC) {
// If 'SEL' does not yet refer to any declarations, make it refer to the
// predefined 'SEL'.
DeclarationName SEL = &Context.Idents.get("SEL");
Make the loading of information attached to an IdentifierInfo from an AST file more lazy, so that we don't eagerly load that information for all known identifiers each time a new AST file is loaded. The eager reloading made some sense in the context of precompiled headers, since very few identifiers were defined before PCH load time. With modules, however, a huge amount of code can get parsed before we see an @import, so laziness becomes important here. The approach taken to make this information lazy is fairly simple: when we load a new AST file, we mark all of the existing identifiers as being out-of-date. Whenever we want to access information that may come from an AST (e.g., whether the identifier has a macro definition, or what top-level declarations have that name), we check the out-of-date bit and, if it's set, ask the AST reader to update the IdentifierInfo from the AST files. The update is a merge, and we now take care to merge declarations before/after imports with declarations from multiple imports. The results of this optimization are fairly dramatic. On a small application that brings in 14 non-trivial modules, this takes modules from being > 3x slower than a "perfect" PCH file down to 30% slower for a full rebuild. A partial rebuild (where the PCH file or modules can be re-used) is down to 7% slower. Making the PCH file just a little imperfect (e.g., adding two smallish modules used by a bunch of .m files that aren't in the PCH file) tips the scales in favor of the modules approach, with 24% faster partial rebuilds. This is just a first step; the lazy scheme could possibly be improved by adding versioning, so we don't search into modules we already searched. Moreover, we'll need similar lazy schemes for all of the other lookup data structures, such as DeclContexts. llvm-svn: 143100
2011-10-27 17:33:13 +08:00
if (IdResolver.begin(SEL) == IdResolver.end())
PushOnScopeChains(Context.getObjCSelDecl(), TUScope);
// If 'id' does not yet refer to any declarations, make it refer to the
// predefined 'id'.
DeclarationName Id = &Context.Idents.get("id");
Make the loading of information attached to an IdentifierInfo from an AST file more lazy, so that we don't eagerly load that information for all known identifiers each time a new AST file is loaded. The eager reloading made some sense in the context of precompiled headers, since very few identifiers were defined before PCH load time. With modules, however, a huge amount of code can get parsed before we see an @import, so laziness becomes important here. The approach taken to make this information lazy is fairly simple: when we load a new AST file, we mark all of the existing identifiers as being out-of-date. Whenever we want to access information that may come from an AST (e.g., whether the identifier has a macro definition, or what top-level declarations have that name), we check the out-of-date bit and, if it's set, ask the AST reader to update the IdentifierInfo from the AST files. The update is a merge, and we now take care to merge declarations before/after imports with declarations from multiple imports. The results of this optimization are fairly dramatic. On a small application that brings in 14 non-trivial modules, this takes modules from being > 3x slower than a "perfect" PCH file down to 30% slower for a full rebuild. A partial rebuild (where the PCH file or modules can be re-used) is down to 7% slower. Making the PCH file just a little imperfect (e.g., adding two smallish modules used by a bunch of .m files that aren't in the PCH file) tips the scales in favor of the modules approach, with 24% faster partial rebuilds. This is just a first step; the lazy scheme could possibly be improved by adding versioning, so we don't search into modules we already searched. Moreover, we'll need similar lazy schemes for all of the other lookup data structures, such as DeclContexts. llvm-svn: 143100
2011-10-27 17:33:13 +08:00
if (IdResolver.begin(Id) == IdResolver.end())
PushOnScopeChains(Context.getObjCIdDecl(), TUScope);
// Create the built-in typedef for 'Class'.
DeclarationName Class = &Context.Idents.get("Class");
Make the loading of information attached to an IdentifierInfo from an AST file more lazy, so that we don't eagerly load that information for all known identifiers each time a new AST file is loaded. The eager reloading made some sense in the context of precompiled headers, since very few identifiers were defined before PCH load time. With modules, however, a huge amount of code can get parsed before we see an @import, so laziness becomes important here. The approach taken to make this information lazy is fairly simple: when we load a new AST file, we mark all of the existing identifiers as being out-of-date. Whenever we want to access information that may come from an AST (e.g., whether the identifier has a macro definition, or what top-level declarations have that name), we check the out-of-date bit and, if it's set, ask the AST reader to update the IdentifierInfo from the AST files. The update is a merge, and we now take care to merge declarations before/after imports with declarations from multiple imports. The results of this optimization are fairly dramatic. On a small application that brings in 14 non-trivial modules, this takes modules from being > 3x slower than a "perfect" PCH file down to 30% slower for a full rebuild. A partial rebuild (where the PCH file or modules can be re-used) is down to 7% slower. Making the PCH file just a little imperfect (e.g., adding two smallish modules used by a bunch of .m files that aren't in the PCH file) tips the scales in favor of the modules approach, with 24% faster partial rebuilds. This is just a first step; the lazy scheme could possibly be improved by adding versioning, so we don't search into modules we already searched. Moreover, we'll need similar lazy schemes for all of the other lookup data structures, such as DeclContexts. llvm-svn: 143100
2011-10-27 17:33:13 +08:00
if (IdResolver.begin(Class) == IdResolver.end())
PushOnScopeChains(Context.getObjCClassDecl(), TUScope);
// Create the built-in forward declaratino for 'Protocol'.
DeclarationName Protocol = &Context.Idents.get("Protocol");
if (IdResolver.begin(Protocol) == IdResolver.end())
PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope);
}
// Create the internal type for the *StringMakeConstantString builtins.
DeclarationName ConstantString = &Context.Idents.get("__NSConstantString");
if (IdResolver.begin(ConstantString) == IdResolver.end())
PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope);
// Initialize Microsoft "predefined C++ types".
if (getLangOpts().MSVCCompat) {
if (getLangOpts().CPlusPlus &&
IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end())
PushOnScopeChains(Context.buildImplicitRecord("type_info", TTK_Class),
TUScope);
addImplicitTypedef("size_t", Context.getSizeType());
}
// Initialize predefined OpenCL types and supported extensions and (optional)
// core features.
if (getLangOpts().OpenCL) {
getOpenCLOptions().addSupport(
Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts());
getOpenCLOptions().enableSupportedCore(getLangOpts());
addImplicitTypedef("sampler_t", Context.OCLSamplerTy);
addImplicitTypedef("event_t", Context.OCLEventTy);
if (getLangOpts().OpenCLCPlusPlus || getLangOpts().OpenCLVersion >= 200) {
addImplicitTypedef("clk_event_t", Context.OCLClkEventTy);
addImplicitTypedef("queue_t", Context.OCLQueueTy);
addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy);
addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy));
addImplicitTypedef("atomic_uint",
Context.getAtomicType(Context.UnsignedIntTy));
auto AtomicLongT = Context.getAtomicType(Context.LongTy);
addImplicitTypedef("atomic_long", AtomicLongT);
auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy);
addImplicitTypedef("atomic_ulong", AtomicULongT);
addImplicitTypedef("atomic_float",
Context.getAtomicType(Context.FloatTy));
auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy);
addImplicitTypedef("atomic_double", AtomicDoubleT);
// OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as
// 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide.
addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy));
auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType());
addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT);
auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType());
addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT);
auto AtomicSizeT = Context.getAtomicType(Context.getSizeType());
addImplicitTypedef("atomic_size_t", AtomicSizeT);
auto AtomicPtrDiffT = Context.getAtomicType(Context.getPointerDiffType());
addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT);
// OpenCL v2.0 s6.13.11.6:
// - The atomic_long and atomic_ulong types are supported if the
// cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
// extensions are supported.
// - The atomic_double type is only supported if double precision
// is supported and the cl_khr_int64_base_atomics and
// cl_khr_int64_extended_atomics extensions are supported.
// - If the device address space is 64-bits, the data types
// atomic_intptr_t, atomic_uintptr_t, atomic_size_t and
// atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and
// cl_khr_int64_extended_atomics extensions are supported.
std::vector<QualType> Atomic64BitTypes;
Atomic64BitTypes.push_back(AtomicLongT);
Atomic64BitTypes.push_back(AtomicULongT);
Atomic64BitTypes.push_back(AtomicDoubleT);
if (Context.getTypeSize(AtomicSizeT) == 64) {
Atomic64BitTypes.push_back(AtomicSizeT);
Atomic64BitTypes.push_back(AtomicIntPtrT);
Atomic64BitTypes.push_back(AtomicUIntPtrT);
Atomic64BitTypes.push_back(AtomicPtrDiffT);
}
for (auto &I : Atomic64BitTypes)
setOpenCLExtensionForType(I,
"cl_khr_int64_base_atomics cl_khr_int64_extended_atomics");
setOpenCLExtensionForType(AtomicDoubleT, "cl_khr_fp64");
}
setOpenCLExtensionForType(Context.DoubleTy, "cl_khr_fp64");
#define GENERIC_IMAGE_TYPE_EXT(Type, Id, Ext) \
setOpenCLExtensionForType(Context.Id, Ext);
#include "clang/Basic/OpenCLImageTypes.def"
#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
addImplicitTypedef(#ExtType, Context.Id##Ty); \
setOpenCLExtensionForType(Context.Id##Ty, #Ext);
#include "clang/Basic/OpenCLExtensionTypes.def"
Add SVE opaque built-in types This patch adds the SVE built-in types defined by the Procedure Call Standard for the Arm Architecture: https://developer.arm.com/docs/100986/0000 It handles the types in all relevant places that deal with built-in types. At the moment, some of these places bail out with an error, including: (1) trying to generate LLVM IR for the types (2) trying to generate debug info for the types (3) trying to mangle the types using the Microsoft C++ ABI (4) trying to @encode the types in Objective C (1) and (2) are fixed by follow-on patches but (unlike this patch) they deal mostly with target-specific LLVM details, so seemed like a logically separate change. There is currently no spec for (3) and (4), so reporting an error seems like the correct behaviour for now. The intention is that the types will become sizeless types: http://lists.llvm.org/pipermail/cfe-dev/2019-June/062523.html The main purpose of the sizeless type extension is to diagnose impossible or dangerous uses of the types, such as any that would require sizeof to have a meaningful defined value. Until then, the patch sets the alignments of the types to the values specified in the link above. It also sets the sizes of the types to zero, which is chosen to be consistently wrong and shouldn't affect correctly-written code (i.e. code that would compile even with the sizeless type extension). The patch adds the common subset of functionality needed to test the sizeless type extension on the one hand and to provide SVE intrinsic functions on the other. After this patch, the two pieces of work are essentially independent. The patch is based on one by Graham Hunter: https://reviews.llvm.org/D59245 Differential Revision: https://reviews.llvm.org/D62960 llvm-svn: 368413
2019-08-09 16:52:54 +08:00
}
if (Context.getTargetInfo().hasAArch64SVETypes()) {
#define SVE_TYPE(Name, Id, SingletonId) \
addImplicitTypedef(Name, Context.SingletonId);
#include "clang/Basic/AArch64SVEACLETypes.def"
}
if (Context.getTargetInfo().getTriple().isPPC64() &&
Context.getTargetInfo().hasFeature("paired-vector-memops")) {
if (Context.getTargetInfo().hasFeature("mma")) {
#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
addImplicitTypedef(#Name, Context.Id##Ty);
#include "clang/Basic/PPCTypes.def"
}
#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
addImplicitTypedef(#Name, Context.Id##Ty);
#include "clang/Basic/PPCTypes.def"
}
if (Context.getTargetInfo().hasRISCVVTypes()) {
#define RVV_TYPE(Name, Id, SingletonId) \
addImplicitTypedef(Name, Context.SingletonId);
#include "clang/Basic/RISCVVTypes.def"
}
if (Context.getTargetInfo().hasBuiltinMSVaList()) {
DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list");
if (IdResolver.begin(MSVaList) == IdResolver.end())
PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope);
}
DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list");
if (IdResolver.begin(BuiltinVaList) == IdResolver.end())
PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope);
}
Sema::~Sema() {
assert(InstantiatingSpecializations.empty() &&
"failed to clean up an InstantiatingTemplate?");
if (VisContext) FreeVisContext();
// Kill all the active scopes.
for (sema::FunctionScopeInfo *FSI : FunctionScopes)
delete FSI;
// Tell the SemaConsumer to forget about us; we're going out of scope.
if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
SC->ForgetSema();
// Detach from the external Sema source.
if (ExternalSemaSource *ExternalSema
= dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
ExternalSema->ForgetSema();
// If Sema's ExternalSource is the multiplexer - we own it.
if (isMultiplexExternalSource)
delete ExternalSource;
// Delete cached satisfactions.
std::vector<ConstraintSatisfaction *> Satisfactions;
Satisfactions.reserve(Satisfactions.size());
for (auto &Node : SatisfactionCache)
Satisfactions.push_back(&Node);
for (auto *Node : Satisfactions)
delete Node;
threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache);
// Destroys data sharing attributes stack for OpenMP
DestroyDataSharingAttributesStack();
// Detach from the PP callback handler which outlives Sema since it's owned
// by the preprocessor.
SemaPPCallbackHandler->reset();
}
void Sema::warnStackExhausted(SourceLocation Loc) {
// Only warn about this once.
if (!WarnedStackExhausted) {
Diag(Loc, diag::warn_stack_exhausted);
WarnedStackExhausted = true;
}
}
void Sema::runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn) {
clang::runWithSufficientStackSpace([&] { warnStackExhausted(Loc); }, Fn);
}
/// makeUnavailableInSystemHeader - There is an error in the current
/// context. If we're still in a system header, and we can plausibly
/// make the relevant declaration unavailable instead of erroring, do
/// so and return true.
bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason) {
// If we're not in a function, it's an error.
FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext);
if (!fn) return false;
// If we're in template instantiation, it's an error.
if (inTemplateInstantiation())
return false;
// If that function's not in a system header, it's an error.
if (!Context.getSourceManager().isInSystemHeader(loc))
return false;
// If the function is already unavailable, it's not an error.
if (fn->hasAttr<UnavailableAttr>()) return true;
fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc));
return true;
}
ASTMutationListener *Sema::getASTMutationListener() const {
return getASTConsumer().GetASTMutationListener();
}
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void Sema::addExternalSource(ExternalSemaSource *E) {
assert(E && "Cannot use with NULL ptr");
if (!ExternalSource) {
ExternalSource = E;
return;
}
if (isMultiplexExternalSource)
static_cast<MultiplexExternalSemaSource*>(ExternalSource)->addSource(*E);
else {
ExternalSource = new MultiplexExternalSemaSource(*ExternalSource, *E);
isMultiplexExternalSource = true;
}
}
/// Print out statistics about the semantic analysis.
Build up statistics about the work done for analysis based warnings. Special detail is added for uninitialized variable analysis as this has serious performance problems than need to be tracked. Computing some of this data is expensive, for example walking the CFG to determine its size. To avoid doing that unless the stats data is going to be used, we thread a bit into the Sema object to track whether detailed stats should be collected or not. This bit is used to avoid computations whereever the computations are likely to be more expensive than checking the state of the flag. Thus, counters are in some cases unconditionally updated, but the more expensive (and less frequent) aggregation steps are skipped. With this patch, we're able to see that for 'gcc.c': *** Analysis Based Warnings Stats: 232 functions analyzed (0 w/o CFGs). 7151 CFG blocks built. 30 average CFG blocks per function. 1167 max CFG blocks per function. 163 functions analyzed for uninitialiazed variables 640 variables analyzed. 3 average variables per function. 94 max variables per function. 96409 block visits. 591 average block visits per function. 61546 max block visits per function. And for the reduced testcase in PR10183: *** Analysis Based Warnings Stats: 98 functions analyzed (0 w/o CFGs). 8526 CFG blocks built. 87 average CFG blocks per function. 7277 max CFG blocks per function. 68 functions analyzed for uninitialiazed variables 1359 variables analyzed. 19 average variables per function. 1196 max variables per function. 2540494 block visits. 37360 average block visits per function. 2536495 max block visits per function. That last number is the somewhat scary one that indicates the problem in PR10183. llvm-svn: 134494
2011-07-07 00:21:37 +08:00
void Sema::PrintStats() const {
llvm::errs() << "\n*** Semantic Analysis Stats:\n";
llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n";
BumpAlloc.PrintStats();
AnalysisWarnings.PrintStats();
}
void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
QualType SrcType,
SourceLocation Loc) {
Optional<NullabilityKind> ExprNullability = SrcType->getNullability(Context);
if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable &&
*ExprNullability != NullabilityKind::NullableResult))
return;
Optional<NullabilityKind> TypeNullability = DstType->getNullability(Context);
if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull)
return;
Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType;
}
void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr* E) {
if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant,
E->getBeginLoc()))
return;
// nullptr only exists from C++11 on, so don't warn on its absence earlier.
if (!getLangOpts().CPlusPlus11)
return;
if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
return;
if (E->IgnoreParenImpCasts()->getType()->isNullPtrType())
return;
// Don't diagnose the conversion from a 0 literal to a null pointer argument
// in a synthesized call to operator<=>.
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.back().Kind ==
CodeSynthesisContext::RewritingOperatorAsSpaceship)
return;
// If it is a macro from system header, and if the macro name is not "NULL",
// do not warn.
SourceLocation MaybeMacroLoc = E->getBeginLoc();
if (Diags.getSuppressSystemWarnings() &&
SourceMgr.isInSystemMacro(MaybeMacroLoc) &&
!findMacroSpelling(MaybeMacroLoc, "NULL"))
return;
Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant)
<< FixItHint::CreateReplacement(E->getSourceRange(), "nullptr");
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
/// If there is already an implicit cast, merge into the existing one.
/// The result is of the given category.
ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
CastKind Kind, ExprValueKind VK,
const CXXCastPath *BasePath,
CheckedConversionKind CCK) {
#ifndef NDEBUG
if (VK == VK_RValue && !E->isRValue()) {
switch (Kind) {
default:
llvm_unreachable(("can't implicitly cast lvalue to rvalue with this cast "
"kind: " +
std::string(CastExpr::getCastKindName(Kind)))
.c_str());
case CK_Dependent:
case CK_LValueToRValue:
case CK_ArrayToPointerDecay:
case CK_FunctionToPointerDecay:
case CK_ToVoid:
case CK_NonAtomicToAtomic:
break;
}
}
assert((VK == VK_RValue || Kind == CK_Dependent || !E->isRValue()) &&
"can't cast rvalue to lvalue");
#endif
diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc());
diagnoseZeroToNullptrConversion(Kind, E);
QualType ExprTy = Context.getCanonicalType(E->getType());
QualType TypeTy = Context.getCanonicalType(Ty);
if (ExprTy == TypeTy)
return E;
// C++1z [conv.array]: The temporary materialization conversion is applied.
// We also use this to fuel C++ DR1213, which applies to C++11 onwards.
if (Kind == CK_ArrayToPointerDecay && getLangOpts().CPlusPlus &&
E->getValueKind() == VK_RValue) {
// The temporary is an lvalue in C++98 and an xvalue otherwise.
ExprResult Materialized = CreateMaterializeTemporaryExpr(
E->getType(), E, !getLangOpts().CPlusPlus11);
if (Materialized.isInvalid())
return ExprError();
E = Materialized.get();
}
if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) {
if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) {
ImpCast->setType(Ty);
ImpCast->setValueKind(VK);
return E;
}
}
return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK,
CurFPFeatureOverrides());
}
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) {
switch (ScalarTy->getScalarTypeKind()) {
case Type::STK_Bool: return CK_NoOp;
case Type::STK_CPointer: return CK_PointerToBoolean;
case Type::STK_BlockPointer: return CK_PointerToBoolean;
case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean;
case Type::STK_MemberPointer: return CK_MemberPointerToBoolean;
case Type::STK_Integral: return CK_IntegralToBoolean;
case Type::STK_Floating: return CK_FloatingToBoolean;
case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean;
case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean;
case Type::STK_FixedPoint: return CK_FixedPointToBoolean;
}
llvm_unreachable("unknown scalar type kind");
}
/// Used to prune the decls of Sema's UnusedFileScopedDecls vector.
static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
if (D->getMostRecentDecl()->isUsed())
return true;
if (D->isExternallyVisible())
return true;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// If this is a function template and none of its specializations is used,
// we should warn.
if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate())
for (const auto *Spec : Template->specializations())
if (ShouldRemoveFromUnused(SemaRef, Spec))
return true;
// UnusedFileScopedDecls stores the first declaration.
// The declaration may have become definition so check again.
const FunctionDecl *DeclToCheck;
if (FD->hasBody(DeclToCheck))
return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
// Later redecls may add new information resulting in not having to warn,
// so check again.
DeclToCheck = FD->getMostRecentDecl();
if (DeclToCheck != FD)
return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
}
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
// If a variable usable in constant expressions is referenced,
// don't warn if it isn't used: if the value of a variable is required
// for the computation of a constant expression, it doesn't make sense to
// warn even if the variable isn't odr-used. (isReferenced doesn't
// precisely reflect that, but it's a decent approximation.)
if (VD->isReferenced() &&
VD->mightBeUsableInConstantExpressions(SemaRef->Context))
return true;
if (VarTemplateDecl *Template = VD->getDescribedVarTemplate())
// If this is a variable template and none of its specializations is used,
// we should warn.
for (const auto *Spec : Template->specializations())
if (ShouldRemoveFromUnused(SemaRef, Spec))
return true;
// UnusedFileScopedDecls stores the first declaration.
// The declaration may have become definition so check again.
const VarDecl *DeclToCheck = VD->getDefinition();
if (DeclToCheck)
return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
// Later redecls may add new information resulting in not having to warn,
// so check again.
DeclToCheck = VD->getMostRecentDecl();
if (DeclToCheck != VD)
return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
}
return false;
}
static bool isFunctionOrVarDeclExternC(NamedDecl *ND) {
if (auto *FD = dyn_cast<FunctionDecl>(ND))
return FD->isExternC();
return cast<VarDecl>(ND)->isExternC();
}
/// Determine whether ND is an external-linkage function or variable whose
/// type has no linkage.
bool Sema::isExternalWithNoLinkageType(ValueDecl *VD) {
// Note: it's not quite enough to check whether VD has UniqueExternalLinkage,
// because we also want to catch the case where its type has VisibleNoLinkage,
// which does not affect the linkage of VD.
return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() &&
!isExternalFormalLinkage(VD->getType()->getLinkage()) &&
!isFunctionOrVarDeclExternC(VD);
}
/// Obtains a sorted list of functions and variables that are undefined but
/// ODR-used.
void Sema::getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) {
for (const auto &UndefinedUse : UndefinedButUsed) {
NamedDecl *ND = UndefinedUse.first;
// Ignore attributes that have become invalid.
if (ND->isInvalidDecl()) continue;
// __attribute__((weakref)) is basically a definition.
if (ND->hasAttr<WeakRefAttr>()) continue;
if (isa<CXXDeductionGuideDecl>(ND))
continue;
if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) {
// An exported function will always be emitted when defined, so even if
// the function is inline, it doesn't have to be emitted in this TU. An
// imported function implies that it has been exported somewhere else.
continue;
}
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
if (FD->isDefined())
continue;
if (FD->isExternallyVisible() &&
!isExternalWithNoLinkageType(FD) &&
!FD->getMostRecentDecl()->isInlined() &&
!FD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
continue;
if (FD->getBuiltinID())
continue;
} else {
auto *VD = cast<VarDecl>(ND);
if (VD->hasDefinition() != VarDecl::DeclarationOnly)
continue;
if (VD->isExternallyVisible() &&
!isExternalWithNoLinkageType(VD) &&
!VD->getMostRecentDecl()->isInline() &&
!VD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
continue;
// Skip VarDecls that lack formal definitions but which we know are in
// fact defined somewhere.
if (VD->isKnownToBeDefined())
continue;
}
Undefined.push_back(std::make_pair(ND, UndefinedUse.second));
}
}
/// checkUndefinedButUsed - Check for undefined objects with internal linkage
/// or that are inline.
static void checkUndefinedButUsed(Sema &S) {
if (S.UndefinedButUsed.empty()) return;
// Collect all the still-undefined entities with internal linkage.
SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined;
S.getUndefinedButUsed(Undefined);
if (Undefined.empty()) return;
for (auto Undef : Undefined) {
ValueDecl *VD = cast<ValueDecl>(Undef.first);
SourceLocation UseLoc = Undef.second;
if (S.isExternalWithNoLinkageType(VD)) {
// C++ [basic.link]p8:
// A type without linkage shall not be used as the type of a variable
// or function with external linkage unless
// -- the entity has C language linkage
// -- the entity is not odr-used or is defined in the same TU
//
// As an extension, accept this in cases where the type is externally
// visible, since the function or variable actually can be defined in
// another translation unit in that case.
S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage())
? diag::ext_undefined_internal_type
: diag::err_undefined_internal_type)
<< isa<VarDecl>(VD) << VD;
} else if (!VD->isExternallyVisible()) {
// FIXME: We can promote this to an error. The function or variable can't
// be defined anywhere else, so the program must necessarily violate the
// one definition rule.
S.Diag(VD->getLocation(), diag::warn_undefined_internal)
<< isa<VarDecl>(VD) << VD;
} else if (auto *FD = dyn_cast<FunctionDecl>(VD)) {
(void)FD;
assert(FD->getMostRecentDecl()->isInlined() &&
"used object requires definition but isn't inline or internal?");
// FIXME: This is ill-formed; we should reject.
S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD;
} else {
assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() &&
"used var requires definition but isn't inline or internal?");
S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD;
}
if (UseLoc.isValid())
S.Diag(UseLoc, diag::note_used_here);
}
S.UndefinedButUsed.clear();
}
void Sema::LoadExternalWeakUndeclaredIdentifiers() {
if (!ExternalSource)
return;
SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs;
ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs);
for (auto &WeakID : WeakIDs)
WeakUndeclaredIdentifiers.insert(WeakID);
}
typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap;
/// Returns true, if all methods and nested classes of the given
/// CXXRecordDecl are defined in this translation unit.
///
/// Should only be called from ActOnEndOfTranslationUnit so that all
/// definitions are actually read.
static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD,
RecordCompleteMap &MNCComplete) {
RecordCompleteMap::iterator Cache = MNCComplete.find(RD);
if (Cache != MNCComplete.end())
return Cache->second;
if (!RD->isCompleteDefinition())
return false;
bool Complete = true;
for (DeclContext::decl_iterator I = RD->decls_begin(),
E = RD->decls_end();
I != E && Complete; ++I) {
if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I))
Complete = M->isDefined() || M->isDefaulted() ||
(M->isPure() && !isa<CXXDestructorDecl>(M));
else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I))
// If the template function is marked as late template parsed at this
// point, it has not been instantiated and therefore we have not
// performed semantic analysis on it yet, so we cannot know if the type
// can be considered complete.
Complete = !F->getTemplatedDecl()->isLateTemplateParsed() &&
F->getTemplatedDecl()->isDefined();
else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) {
if (R->isInjectedClassName())
continue;
if (R->hasDefinition())
Complete = MethodsAndNestedClassesComplete(R->getDefinition(),
MNCComplete);
else
Complete = false;
}
}
MNCComplete[RD] = Complete;
return Complete;
}
/// Returns true, if the given CXXRecordDecl is fully defined in this
/// translation unit, i.e. all methods are defined or pure virtual and all
/// friends, friend functions and nested classes are fully defined in this
/// translation unit.
///
/// Should only be called from ActOnEndOfTranslationUnit so that all
/// definitions are actually read.
static bool IsRecordFullyDefined(const CXXRecordDecl *RD,
RecordCompleteMap &RecordsComplete,
RecordCompleteMap &MNCComplete) {
RecordCompleteMap::iterator Cache = RecordsComplete.find(RD);
if (Cache != RecordsComplete.end())
return Cache->second;
bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete);
for (CXXRecordDecl::friend_iterator I = RD->friend_begin(),
E = RD->friend_end();
I != E && Complete; ++I) {
// Check if friend classes and methods are complete.
if (TypeSourceInfo *TSI = (*I)->getFriendType()) {
// Friend classes are available as the TypeSourceInfo of the FriendDecl.
if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl())
Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete);
else
Complete = false;
} else {
// Friend functions are available through the NamedDecl of FriendDecl.
if (const FunctionDecl *FD =
dyn_cast<FunctionDecl>((*I)->getFriendDecl()))
Complete = FD->isDefined();
else
// This is a template friend, give up.
Complete = false;
}
}
RecordsComplete[RD] = Complete;
return Complete;
}
Add -Wunused-local-typedef, a warning that finds unused local typedefs. The warning warns on TypedefNameDecls -- typedefs and C++11 using aliases -- that are !isReferenced(). Since the isReferenced() bit on TypedefNameDecls wasn't used for anything before this warning it wasn't always set correctly, so this patch also adds a few missing MarkAnyDeclReferenced() calls in various places for TypedefNameDecls. This is made a bit complicated due to local typedefs possibly being used only after their local scope has closed. Consider: template <class T> void template_fun(T t) { typename T::Foo s3foo; // YYY (void)s3foo; } void template_fun_user() { struct Local { typedef int Foo; // XXX } p; template_fun(p); } Here the typedef in XXX is only used at end-of-translation unit, when YYY in template_fun() gets instantiated. To handle this, typedefs that are unused when their scope exits are added to a set of potentially unused typedefs, and that set gets checked at end-of-TU. Typedefs that are still unused at that point then get warned on. There's also serialization code for this set, so that the warning works with precompiled headers and modules. For modules, the warning is emitted when the module is built, for precompiled headers each time the header gets used. Finally, consider a function using C++14 auto return types to return a local type defined in a header: auto f() { struct S { typedef int a; }; return S(); } Here, the typedef escapes its local scope and could be used by only some translation units including the header. To not warn on this, add a RecursiveASTVisitor that marks all delcs on local types returned from auto functions as referenced. (Except if it's a function with internal linkage, or the decls are private and the local type has no friends -- in these cases, it _is_ safe to warn.) Several of the included testcases (most of the interesting ones) were provided by Richard Smith. (gcc's spelling -Wunused-local-typedefs is supported as an alias for this warning.) llvm-svn: 217298
2014-09-06 09:25:55 +08:00
void Sema::emitAndClearUnusedLocalTypedefWarnings() {
if (ExternalSource)
ExternalSource->ReadUnusedLocalTypedefNameCandidates(
UnusedLocalTypedefNameCandidates);
for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) {
if (TD->isReferenced())
continue;
Diag(TD->getLocation(), diag::warn_unused_local_typedef)
<< isa<TypeAliasDecl>(TD) << TD->getDeclName();
}
UnusedLocalTypedefNameCandidates.clear();
}
/// This is called before the very first declaration in the translation unit
/// is parsed. Note that the ASTContext may have already injected some
/// declarations.
void Sema::ActOnStartOfTranslationUnit() {
if (getLangOpts().ModulesTS &&
(getLangOpts().getCompilingModule() == LangOptions::CMK_ModuleInterface ||
getLangOpts().getCompilingModule() == LangOptions::CMK_None)) {
// We start in an implied global module fragment.
SourceLocation StartOfTU =
SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID());
ActOnGlobalModuleFragmentDecl(StartOfTU);
ModuleScopes.back().ImplicitGlobalModuleFragment = true;
}
}
void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) {
// No explicit actions are required at the end of the global module fragment.
if (Kind == TUFragmentKind::Global)
return;
// Transfer late parsed template instantiations over to the pending template
// instantiation list. During normal compilation, the late template parser
// will be installed and instantiating these templates will succeed.
//
// If we are building a TU prefix for serialization, it is also safe to
// transfer these over, even though they are not parsed. The end of the TU
// should be outside of any eager template instantiation scope, so when this
// AST is deserialized, these templates will not be parsed until the end of
// the combined TU.
PendingInstantiations.insert(PendingInstantiations.end(),
LateParsedInstantiations.begin(),
LateParsedInstantiations.end());
LateParsedInstantiations.clear();
// If DefinedUsedVTables ends up marking any virtual member functions it
// might lead to more pending template instantiations, which we then need
// to instantiate.
DefineUsedVTables();
// C++: Perform implicit template instantiations.
//
// FIXME: When we perform these implicit instantiations, we do not
// carefully keep track of the point of instantiation (C++ [temp.point]).
// This means that name lookup that occurs within the template
// instantiation will always happen at the end of the translation unit,
// so it will find some names that are not required to be found. This is
// valid, but we could do better by diagnosing if an instantiation uses a
// name that was not visible at its first point of instantiation.
if (ExternalSource) {
// Load pending instantiations from the external source.
SmallVector<PendingImplicitInstantiation, 4> Pending;
ExternalSource->ReadPendingInstantiations(Pending);
for (auto PII : Pending)
if (auto Func = dyn_cast<FunctionDecl>(PII.first))
Func->setInstantiationIsPending(true);
PendingInstantiations.insert(PendingInstantiations.begin(),
Pending.begin(), Pending.end());
}
{
llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
PerformPendingInstantiations();
}
emitDeferredDiags();
assert(LateParsedInstantiations.empty() &&
"end of TU template instantiation should not create more "
"late-parsed templates");
// Report diagnostics for uncorrected delayed typos. Ideally all of them
// should have been corrected by that time, but it is very hard to cover all
// cases in practice.
for (const auto &Typo : DelayedTypos) {
// We pass an empty TypoCorrection to indicate no correction was performed.
Typo.second.DiagHandler(TypoCorrection());
}
DelayedTypos.clear();
}
/// ActOnEndOfTranslationUnit - This is called at the very end of the
/// translation unit when EOF is reached and all but the top-level scope is
/// popped.
void Sema::ActOnEndOfTranslationUnit() {
assert(DelayedDiagnostics.getCurrentPool() == nullptr
&& "reached end of translation unit with a pool attached?");
// If code completion is enabled, don't perform any end-of-translation-unit
// work.
if (PP.isCodeCompletionEnabled())
return;
When we perform dependent name lookup during template instantiation, it's not sufficient to only consider names visible at the point of instantiation, because that may not include names that were visible when the template was defined. More generally, if the instantiation backtrace goes through a module M, then every declaration visible within M should be available to the instantiation. Any of those declarations might be part of the interface that M intended to export to a template that it instantiates. The fix here has two parts: 1) If we find a non-visible declaration during name lookup during template instantiation, check whether the declaration was visible from the defining module of all entities on the active template instantiation stack. The defining module is not the owning module in all cases: we look at the module in which a template was defined, not the module in which it was first instantiated. 2) Perform pending instantiations at the end of a module, not at the end of the translation unit. This is general goodness, since it significantly cuts down the amount of redundant work that is performed in every TU importing a module, and also implicitly adds the module containing the point of instantiation to the set of modules checked for declarations in a lookup within a template instantiation. There's a known issue here with template instantiations performed while building a module, if additional imports are added later on. I'll fix that in a subsequent commit. llvm-svn: 187167
2013-07-26 07:08:39 +08:00
// Complete translation units and modules define vtables and perform implicit
// instantiations. PCH files do not.
if (TUKind != TU_Prefix) {
DiagnoseUseOfUnimplementedSelectors();
ActOnEndOfTranslationUnitFragment(
!ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
Module::PrivateModuleFragment
? TUFragmentKind::Private
: TUFragmentKind::Normal);
if (LateTemplateParserCleanup)
LateTemplateParserCleanup(OpaqueParser);
CheckDelayedMemberExceptionSpecs();
} else {
// If we are building a TU prefix for serialization, it is safe to transfer
// these over, even though they are not parsed. The end of the TU should be
// outside of any eager template instantiation scope, so when this AST is
// deserialized, these templates will not be parsed until the end of the
// combined TU.
PendingInstantiations.insert(PendingInstantiations.end(),
LateParsedInstantiations.begin(),
LateParsedInstantiations.end());
LateParsedInstantiations.clear();
if (LangOpts.PCHInstantiateTemplates) {
llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
PerformPendingInstantiations();
}
}
DiagnoseUnterminatedPragmaAlignPack();
DiagnoseUnterminatedPragmaAttribute();
// All delayed member exception specs should be checked or we end up accepting
// incompatible declarations.
assert(DelayedOverridingExceptionSpecChecks.empty());
assert(DelayedEquivalentExceptionSpecChecks.empty());
// All dllexport classes should have been processed already.
assert(DelayedDllExportClasses.empty());
assert(DelayedDllExportMemberFunctions.empty());
// Remove file scoped decls that turned out to be used.
UnusedFileScopedDecls.erase(
std::remove_if(UnusedFileScopedDecls.begin(nullptr, true),
UnusedFileScopedDecls.end(),
[this](const DeclaratorDecl *DD) {
return ShouldRemoveFromUnused(this, DD);
}),
UnusedFileScopedDecls.end());
if (TUKind == TU_Prefix) {
// Translation unit prefixes don't need any of the checking below.
if (!PP.isIncrementalProcessingEnabled())
TUScope = nullptr;
return;
}
// Check for #pragma weak identifiers that were never declared
LoadExternalWeakUndeclaredIdentifiers();
for (auto WeakID : WeakUndeclaredIdentifiers) {
if (WeakID.second.getUsed())
continue;
Decl *PrevDecl = LookupSingleName(TUScope, WeakID.first, SourceLocation(),
LookupOrdinaryName);
if (PrevDecl != nullptr &&
!(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl)))
Diag(WeakID.second.getLocation(), diag::warn_attribute_wrong_decl_type)
<< "'weak'" << ExpectedVariableOrFunction;
else
Diag(WeakID.second.getLocation(), diag::warn_weak_identifier_undeclared)
<< WeakID.first;
}
if (LangOpts.CPlusPlus11 &&
!Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation()))
CheckDelegatingCtorCycles();
if (!Diags.hasErrorOccurred()) {
if (ExternalSource)
ExternalSource->ReadUndefinedButUsed(UndefinedButUsed);
checkUndefinedButUsed(*this);
}
// A global-module-fragment is only permitted within a module unit.
bool DiagnosedMissingModuleDeclaration = false;
if (!ModuleScopes.empty() &&
ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment &&
!ModuleScopes.back().ImplicitGlobalModuleFragment) {
Diag(ModuleScopes.back().BeginLoc,
diag::err_module_declaration_missing_after_global_module_introducer);
DiagnosedMissingModuleDeclaration = true;
}
if (TUKind == TU_Module) {
// If we are building a module interface unit, we need to have seen the
// module declaration by now.
if (getLangOpts().getCompilingModule() ==
LangOptions::CMK_ModuleInterface &&
(ModuleScopes.empty() ||
!ModuleScopes.back().Module->isModulePurview()) &&
!DiagnosedMissingModuleDeclaration) {
// FIXME: Make a better guess as to where to put the module declaration.
Diag(getSourceManager().getLocForStartOfFile(
getSourceManager().getMainFileID()),
diag::err_module_declaration_missing);
}
// If we are building a module, resolve all of the exported declarations
// now.
if (Module *CurrentModule = PP.getCurrentModule()) {
ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
SmallVector<Module *, 2> Stack;
Stack.push_back(CurrentModule);
while (!Stack.empty()) {
Module *Mod = Stack.pop_back_val();
// Resolve the exported declarations and conflicts.
// FIXME: Actually complain, once we figure out how to teach the
// diagnostic client to deal with complaints in the module map at this
// point.
ModMap.resolveExports(Mod, /*Complain=*/false);
ModMap.resolveUses(Mod, /*Complain=*/false);
ModMap.resolveConflicts(Mod, /*Complain=*/false);
// Queue the submodules, so their exports will also be resolved.
Stack.append(Mod->submodule_begin(), Mod->submodule_end());
}
}
Add -Wunused-local-typedef, a warning that finds unused local typedefs. The warning warns on TypedefNameDecls -- typedefs and C++11 using aliases -- that are !isReferenced(). Since the isReferenced() bit on TypedefNameDecls wasn't used for anything before this warning it wasn't always set correctly, so this patch also adds a few missing MarkAnyDeclReferenced() calls in various places for TypedefNameDecls. This is made a bit complicated due to local typedefs possibly being used only after their local scope has closed. Consider: template <class T> void template_fun(T t) { typename T::Foo s3foo; // YYY (void)s3foo; } void template_fun_user() { struct Local { typedef int Foo; // XXX } p; template_fun(p); } Here the typedef in XXX is only used at end-of-translation unit, when YYY in template_fun() gets instantiated. To handle this, typedefs that are unused when their scope exits are added to a set of potentially unused typedefs, and that set gets checked at end-of-TU. Typedefs that are still unused at that point then get warned on. There's also serialization code for this set, so that the warning works with precompiled headers and modules. For modules, the warning is emitted when the module is built, for precompiled headers each time the header gets used. Finally, consider a function using C++14 auto return types to return a local type defined in a header: auto f() { struct S { typedef int a; }; return S(); } Here, the typedef escapes its local scope and could be used by only some translation units including the header. To not warn on this, add a RecursiveASTVisitor that marks all delcs on local types returned from auto functions as referenced. (Except if it's a function with internal linkage, or the decls are private and the local type has no friends -- in these cases, it _is_ safe to warn.) Several of the included testcases (most of the interesting ones) were provided by Richard Smith. (gcc's spelling -Wunused-local-typedefs is supported as an alias for this warning.) llvm-svn: 217298
2014-09-06 09:25:55 +08:00
// Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for
// modules when they are built, not every time they are used.
emitAndClearUnusedLocalTypedefWarnings();
}
// C99 6.9.2p2:
// A declaration of an identifier for an object that has file
// scope without an initializer, and without a storage-class
// specifier or with the storage-class specifier static,
// constitutes a tentative definition. If a translation unit
// contains one or more tentative definitions for an identifier,
// and the translation unit contains no external definition for
// that identifier, then the behavior is exactly as if the
// translation unit contains a file scope declaration of that
// identifier, with the composite type as of the end of the
// translation unit, with an initializer equal to 0.
llvm::SmallSet<VarDecl *, 32> Seen;
for (TentativeDefinitionsType::iterator
T = TentativeDefinitions.begin(ExternalSource),
TEnd = TentativeDefinitions.end();
T != TEnd; ++T) {
VarDecl *VD = (*T)->getActingDefinition();
// If the tentative definition was completed, getActingDefinition() returns
// null. If we've already seen this variable before, insert()'s second
// return value is false.
if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second)
continue;
if (const IncompleteArrayType *ArrayT
= Context.getAsIncompleteArrayType(VD->getType())) {
// Set the length of the array to 1 (C99 6.9.2p5).
Diag(VD->getLocation(), diag::warn_tentative_incomplete_array);
llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true);
QualType T = Context.getConstantArrayType(ArrayT->getElementType(), One,
nullptr, ArrayType::Normal, 0);
VD->setType(T);
} else if (RequireCompleteType(VD->getLocation(), VD->getType(),
diag::err_tentative_def_incomplete_type))
VD->setInvalidDecl();
// No initialization is performed for a tentative definition.
CheckCompleteVariableDeclaration(VD);
// Notify the consumer that we've completed a tentative definition.
if (!VD->isInvalidDecl())
Consumer.CompleteTentativeDefinition(VD);
}
for (auto D : ExternalDeclarations) {
if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed())
continue;
Consumer.CompleteExternalDeclaration(D);
}
// If there were errors, disable 'unused' warnings since they will mostly be
// noise. Don't warn for a use from a module: either we should warn on all
// file-scope declarations in modules or not at all, but whether the
// declaration is used is immaterial.
if (!Diags.hasErrorOccurred() && TUKind != TU_Module) {
// Output warning for unused file scoped decls.
for (UnusedFileScopedDeclsType::iterator
I = UnusedFileScopedDecls.begin(ExternalSource),
E = UnusedFileScopedDecls.end(); I != E; ++I) {
if (ShouldRemoveFromUnused(this, *I))
continue;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
const FunctionDecl *DiagD;
if (!FD->hasBody(DiagD))
DiagD = FD;
if (DiagD->isDeleted())
continue; // Deleted functions are supposed to be unused.
if (DiagD->isReferenced()) {
if (isa<CXXMethodDecl>(DiagD))
Diag(DiagD->getLocation(), diag::warn_unneeded_member_function)
[clang] Pass the NamedDecl* instead of the DeclarationName into many diagnostics. Background: ----------- There are two related argument types which can be sent into a diagnostic to display the name of an entity: DeclarationName (ak_declarationname) or NamedDecl* (ak_nameddecl) (there is also ak_identifierinfo for IdentifierInfo*, but we are not concerned with it here). A DeclarationName in a diagnostic will just be streamed to the output, which will directly result in a call to DeclarationName::print. A NamedDecl* in a diagnostic will also ultimately result in a call to DeclarationName::print, but with two customisation points along the way: The first customisation point is NamedDecl::getNameForDiagnostic which is overloaded by FunctionDecl, ClassTemplateSpecializationDecl and VarTemplateSpecializationDecl to print the template arguments, if any. The second customisation point is NamedDecl::printName. By default it just streams the stored DeclarationName into the output but it can be customised to provide a user-friendly name for an entity. It is currently overloaded by DecompositionDecl and MSGuidDecl. What this patch does: --------------------- For many diagnostics a DeclarationName is used instead of the NamedDecl*. This bypasses the two customisation points mentioned above. This patches fix this for diagnostics in Sema.cpp, SemaCast.cpp, SemaChecking.cpp, SemaDecl.cpp, SemaDeclAttr.cpp, SemaDecl.cpp, SemaOverload.cpp and SemaStmt.cpp. I have only modified diagnostics where I could construct a test-case which demonstrates that the change is appropriate (either with this patch or the next one). Reviewed By: erichkeane, aaron.ballman Differential Revision: https://reviews.llvm.org/D84656
2020-07-28 06:22:21 +08:00
<< DiagD;
else {
if (FD->getStorageClass() == SC_Static &&
!FD->isInlineSpecified() &&
!SourceMgr.isInMainFile(
SourceMgr.getExpansionLoc(FD->getLocation())))
Diag(DiagD->getLocation(),
diag::warn_unneeded_static_internal_decl)
[clang] Pass the NamedDecl* instead of the DeclarationName into many diagnostics. Background: ----------- There are two related argument types which can be sent into a diagnostic to display the name of an entity: DeclarationName (ak_declarationname) or NamedDecl* (ak_nameddecl) (there is also ak_identifierinfo for IdentifierInfo*, but we are not concerned with it here). A DeclarationName in a diagnostic will just be streamed to the output, which will directly result in a call to DeclarationName::print. A NamedDecl* in a diagnostic will also ultimately result in a call to DeclarationName::print, but with two customisation points along the way: The first customisation point is NamedDecl::getNameForDiagnostic which is overloaded by FunctionDecl, ClassTemplateSpecializationDecl and VarTemplateSpecializationDecl to print the template arguments, if any. The second customisation point is NamedDecl::printName. By default it just streams the stored DeclarationName into the output but it can be customised to provide a user-friendly name for an entity. It is currently overloaded by DecompositionDecl and MSGuidDecl. What this patch does: --------------------- For many diagnostics a DeclarationName is used instead of the NamedDecl*. This bypasses the two customisation points mentioned above. This patches fix this for diagnostics in Sema.cpp, SemaCast.cpp, SemaChecking.cpp, SemaDecl.cpp, SemaDeclAttr.cpp, SemaDecl.cpp, SemaOverload.cpp and SemaStmt.cpp. I have only modified diagnostics where I could construct a test-case which demonstrates that the change is appropriate (either with this patch or the next one). Reviewed By: erichkeane, aaron.ballman Differential Revision: https://reviews.llvm.org/D84656
2020-07-28 06:22:21 +08:00
<< DiagD;
else
Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
[clang] Pass the NamedDecl* instead of the DeclarationName into many diagnostics. Background: ----------- There are two related argument types which can be sent into a diagnostic to display the name of an entity: DeclarationName (ak_declarationname) or NamedDecl* (ak_nameddecl) (there is also ak_identifierinfo for IdentifierInfo*, but we are not concerned with it here). A DeclarationName in a diagnostic will just be streamed to the output, which will directly result in a call to DeclarationName::print. A NamedDecl* in a diagnostic will also ultimately result in a call to DeclarationName::print, but with two customisation points along the way: The first customisation point is NamedDecl::getNameForDiagnostic which is overloaded by FunctionDecl, ClassTemplateSpecializationDecl and VarTemplateSpecializationDecl to print the template arguments, if any. The second customisation point is NamedDecl::printName. By default it just streams the stored DeclarationName into the output but it can be customised to provide a user-friendly name for an entity. It is currently overloaded by DecompositionDecl and MSGuidDecl. What this patch does: --------------------- For many diagnostics a DeclarationName is used instead of the NamedDecl*. This bypasses the two customisation points mentioned above. This patches fix this for diagnostics in Sema.cpp, SemaCast.cpp, SemaChecking.cpp, SemaDecl.cpp, SemaDeclAttr.cpp, SemaDecl.cpp, SemaOverload.cpp and SemaStmt.cpp. I have only modified diagnostics where I could construct a test-case which demonstrates that the change is appropriate (either with this patch or the next one). Reviewed By: erichkeane, aaron.ballman Differential Revision: https://reviews.llvm.org/D84656
2020-07-28 06:22:21 +08:00
<< /*function*/ 0 << DiagD;
}
} else {
if (FD->getDescribedFunctionTemplate())
Diag(DiagD->getLocation(), diag::warn_unused_template)
[clang] Pass the NamedDecl* instead of the DeclarationName into many diagnostics. Background: ----------- There are two related argument types which can be sent into a diagnostic to display the name of an entity: DeclarationName (ak_declarationname) or NamedDecl* (ak_nameddecl) (there is also ak_identifierinfo for IdentifierInfo*, but we are not concerned with it here). A DeclarationName in a diagnostic will just be streamed to the output, which will directly result in a call to DeclarationName::print. A NamedDecl* in a diagnostic will also ultimately result in a call to DeclarationName::print, but with two customisation points along the way: The first customisation point is NamedDecl::getNameForDiagnostic which is overloaded by FunctionDecl, ClassTemplateSpecializationDecl and VarTemplateSpecializationDecl to print the template arguments, if any. The second customisation point is NamedDecl::printName. By default it just streams the stored DeclarationName into the output but it can be customised to provide a user-friendly name for an entity. It is currently overloaded by DecompositionDecl and MSGuidDecl. What this patch does: --------------------- For many diagnostics a DeclarationName is used instead of the NamedDecl*. This bypasses the two customisation points mentioned above. This patches fix this for diagnostics in Sema.cpp, SemaCast.cpp, SemaChecking.cpp, SemaDecl.cpp, SemaDeclAttr.cpp, SemaDecl.cpp, SemaOverload.cpp and SemaStmt.cpp. I have only modified diagnostics where I could construct a test-case which demonstrates that the change is appropriate (either with this patch or the next one). Reviewed By: erichkeane, aaron.ballman Differential Revision: https://reviews.llvm.org/D84656
2020-07-28 06:22:21 +08:00
<< /*function*/ 0 << DiagD;
else
[clang] Pass the NamedDecl* instead of the DeclarationName into many diagnostics. Background: ----------- There are two related argument types which can be sent into a diagnostic to display the name of an entity: DeclarationName (ak_declarationname) or NamedDecl* (ak_nameddecl) (there is also ak_identifierinfo for IdentifierInfo*, but we are not concerned with it here). A DeclarationName in a diagnostic will just be streamed to the output, which will directly result in a call to DeclarationName::print. A NamedDecl* in a diagnostic will also ultimately result in a call to DeclarationName::print, but with two customisation points along the way: The first customisation point is NamedDecl::getNameForDiagnostic which is overloaded by FunctionDecl, ClassTemplateSpecializationDecl and VarTemplateSpecializationDecl to print the template arguments, if any. The second customisation point is NamedDecl::printName. By default it just streams the stored DeclarationName into the output but it can be customised to provide a user-friendly name for an entity. It is currently overloaded by DecompositionDecl and MSGuidDecl. What this patch does: --------------------- For many diagnostics a DeclarationName is used instead of the NamedDecl*. This bypasses the two customisation points mentioned above. This patches fix this for diagnostics in Sema.cpp, SemaCast.cpp, SemaChecking.cpp, SemaDecl.cpp, SemaDeclAttr.cpp, SemaDecl.cpp, SemaOverload.cpp and SemaStmt.cpp. I have only modified diagnostics where I could construct a test-case which demonstrates that the change is appropriate (either with this patch or the next one). Reviewed By: erichkeane, aaron.ballman Differential Revision: https://reviews.llvm.org/D84656
2020-07-28 06:22:21 +08:00
Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD)
? diag::warn_unused_member_function
: diag::warn_unused_function)
[clang] Pass the NamedDecl* instead of the DeclarationName into many diagnostics. Background: ----------- There are two related argument types which can be sent into a diagnostic to display the name of an entity: DeclarationName (ak_declarationname) or NamedDecl* (ak_nameddecl) (there is also ak_identifierinfo for IdentifierInfo*, but we are not concerned with it here). A DeclarationName in a diagnostic will just be streamed to the output, which will directly result in a call to DeclarationName::print. A NamedDecl* in a diagnostic will also ultimately result in a call to DeclarationName::print, but with two customisation points along the way: The first customisation point is NamedDecl::getNameForDiagnostic which is overloaded by FunctionDecl, ClassTemplateSpecializationDecl and VarTemplateSpecializationDecl to print the template arguments, if any. The second customisation point is NamedDecl::printName. By default it just streams the stored DeclarationName into the output but it can be customised to provide a user-friendly name for an entity. It is currently overloaded by DecompositionDecl and MSGuidDecl. What this patch does: --------------------- For many diagnostics a DeclarationName is used instead of the NamedDecl*. This bypasses the two customisation points mentioned above. This patches fix this for diagnostics in Sema.cpp, SemaCast.cpp, SemaChecking.cpp, SemaDecl.cpp, SemaDeclAttr.cpp, SemaDecl.cpp, SemaOverload.cpp and SemaStmt.cpp. I have only modified diagnostics where I could construct a test-case which demonstrates that the change is appropriate (either with this patch or the next one). Reviewed By: erichkeane, aaron.ballman Differential Revision: https://reviews.llvm.org/D84656
2020-07-28 06:22:21 +08:00
<< DiagD;
}
} else {
const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition();
if (!DiagD)
DiagD = cast<VarDecl>(*I);
if (DiagD->isReferenced()) {
Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
[clang] Pass the NamedDecl* instead of the DeclarationName into many diagnostics. Background: ----------- There are two related argument types which can be sent into a diagnostic to display the name of an entity: DeclarationName (ak_declarationname) or NamedDecl* (ak_nameddecl) (there is also ak_identifierinfo for IdentifierInfo*, but we are not concerned with it here). A DeclarationName in a diagnostic will just be streamed to the output, which will directly result in a call to DeclarationName::print. A NamedDecl* in a diagnostic will also ultimately result in a call to DeclarationName::print, but with two customisation points along the way: The first customisation point is NamedDecl::getNameForDiagnostic which is overloaded by FunctionDecl, ClassTemplateSpecializationDecl and VarTemplateSpecializationDecl to print the template arguments, if any. The second customisation point is NamedDecl::printName. By default it just streams the stored DeclarationName into the output but it can be customised to provide a user-friendly name for an entity. It is currently overloaded by DecompositionDecl and MSGuidDecl. What this patch does: --------------------- For many diagnostics a DeclarationName is used instead of the NamedDecl*. This bypasses the two customisation points mentioned above. This patches fix this for diagnostics in Sema.cpp, SemaCast.cpp, SemaChecking.cpp, SemaDecl.cpp, SemaDeclAttr.cpp, SemaDecl.cpp, SemaOverload.cpp and SemaStmt.cpp. I have only modified diagnostics where I could construct a test-case which demonstrates that the change is appropriate (either with this patch or the next one). Reviewed By: erichkeane, aaron.ballman Differential Revision: https://reviews.llvm.org/D84656
2020-07-28 06:22:21 +08:00
<< /*variable*/ 1 << DiagD;
} else if (DiagD->getType().isConstQualified()) {
const SourceManager &SM = SourceMgr;
if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) ||
!PP.getLangOpts().IsHeaderFile)
Diag(DiagD->getLocation(), diag::warn_unused_const_variable)
[clang] Pass the NamedDecl* instead of the DeclarationName into many diagnostics. Background: ----------- There are two related argument types which can be sent into a diagnostic to display the name of an entity: DeclarationName (ak_declarationname) or NamedDecl* (ak_nameddecl) (there is also ak_identifierinfo for IdentifierInfo*, but we are not concerned with it here). A DeclarationName in a diagnostic will just be streamed to the output, which will directly result in a call to DeclarationName::print. A NamedDecl* in a diagnostic will also ultimately result in a call to DeclarationName::print, but with two customisation points along the way: The first customisation point is NamedDecl::getNameForDiagnostic which is overloaded by FunctionDecl, ClassTemplateSpecializationDecl and VarTemplateSpecializationDecl to print the template arguments, if any. The second customisation point is NamedDecl::printName. By default it just streams the stored DeclarationName into the output but it can be customised to provide a user-friendly name for an entity. It is currently overloaded by DecompositionDecl and MSGuidDecl. What this patch does: --------------------- For many diagnostics a DeclarationName is used instead of the NamedDecl*. This bypasses the two customisation points mentioned above. This patches fix this for diagnostics in Sema.cpp, SemaCast.cpp, SemaChecking.cpp, SemaDecl.cpp, SemaDeclAttr.cpp, SemaDecl.cpp, SemaOverload.cpp and SemaStmt.cpp. I have only modified diagnostics where I could construct a test-case which demonstrates that the change is appropriate (either with this patch or the next one). Reviewed By: erichkeane, aaron.ballman Differential Revision: https://reviews.llvm.org/D84656
2020-07-28 06:22:21 +08:00
<< DiagD;
} else {
if (DiagD->getDescribedVarTemplate())
Diag(DiagD->getLocation(), diag::warn_unused_template)
[clang] Pass the NamedDecl* instead of the DeclarationName into many diagnostics. Background: ----------- There are two related argument types which can be sent into a diagnostic to display the name of an entity: DeclarationName (ak_declarationname) or NamedDecl* (ak_nameddecl) (there is also ak_identifierinfo for IdentifierInfo*, but we are not concerned with it here). A DeclarationName in a diagnostic will just be streamed to the output, which will directly result in a call to DeclarationName::print. A NamedDecl* in a diagnostic will also ultimately result in a call to DeclarationName::print, but with two customisation points along the way: The first customisation point is NamedDecl::getNameForDiagnostic which is overloaded by FunctionDecl, ClassTemplateSpecializationDecl and VarTemplateSpecializationDecl to print the template arguments, if any. The second customisation point is NamedDecl::printName. By default it just streams the stored DeclarationName into the output but it can be customised to provide a user-friendly name for an entity. It is currently overloaded by DecompositionDecl and MSGuidDecl. What this patch does: --------------------- For many diagnostics a DeclarationName is used instead of the NamedDecl*. This bypasses the two customisation points mentioned above. This patches fix this for diagnostics in Sema.cpp, SemaCast.cpp, SemaChecking.cpp, SemaDecl.cpp, SemaDeclAttr.cpp, SemaDecl.cpp, SemaOverload.cpp and SemaStmt.cpp. I have only modified diagnostics where I could construct a test-case which demonstrates that the change is appropriate (either with this patch or the next one). Reviewed By: erichkeane, aaron.ballman Differential Revision: https://reviews.llvm.org/D84656
2020-07-28 06:22:21 +08:00
<< /*variable*/ 1 << DiagD;
else
[clang] Pass the NamedDecl* instead of the DeclarationName into many diagnostics. Background: ----------- There are two related argument types which can be sent into a diagnostic to display the name of an entity: DeclarationName (ak_declarationname) or NamedDecl* (ak_nameddecl) (there is also ak_identifierinfo for IdentifierInfo*, but we are not concerned with it here). A DeclarationName in a diagnostic will just be streamed to the output, which will directly result in a call to DeclarationName::print. A NamedDecl* in a diagnostic will also ultimately result in a call to DeclarationName::print, but with two customisation points along the way: The first customisation point is NamedDecl::getNameForDiagnostic which is overloaded by FunctionDecl, ClassTemplateSpecializationDecl and VarTemplateSpecializationDecl to print the template arguments, if any. The second customisation point is NamedDecl::printName. By default it just streams the stored DeclarationName into the output but it can be customised to provide a user-friendly name for an entity. It is currently overloaded by DecompositionDecl and MSGuidDecl. What this patch does: --------------------- For many diagnostics a DeclarationName is used instead of the NamedDecl*. This bypasses the two customisation points mentioned above. This patches fix this for diagnostics in Sema.cpp, SemaCast.cpp, SemaChecking.cpp, SemaDecl.cpp, SemaDeclAttr.cpp, SemaDecl.cpp, SemaOverload.cpp and SemaStmt.cpp. I have only modified diagnostics where I could construct a test-case which demonstrates that the change is appropriate (either with this patch or the next one). Reviewed By: erichkeane, aaron.ballman Differential Revision: https://reviews.llvm.org/D84656
2020-07-28 06:22:21 +08:00
Diag(DiagD->getLocation(), diag::warn_unused_variable) << DiagD;
}
}
}
Add -Wunused-local-typedef, a warning that finds unused local typedefs. The warning warns on TypedefNameDecls -- typedefs and C++11 using aliases -- that are !isReferenced(). Since the isReferenced() bit on TypedefNameDecls wasn't used for anything before this warning it wasn't always set correctly, so this patch also adds a few missing MarkAnyDeclReferenced() calls in various places for TypedefNameDecls. This is made a bit complicated due to local typedefs possibly being used only after their local scope has closed. Consider: template <class T> void template_fun(T t) { typename T::Foo s3foo; // YYY (void)s3foo; } void template_fun_user() { struct Local { typedef int Foo; // XXX } p; template_fun(p); } Here the typedef in XXX is only used at end-of-translation unit, when YYY in template_fun() gets instantiated. To handle this, typedefs that are unused when their scope exits are added to a set of potentially unused typedefs, and that set gets checked at end-of-TU. Typedefs that are still unused at that point then get warned on. There's also serialization code for this set, so that the warning works with precompiled headers and modules. For modules, the warning is emitted when the module is built, for precompiled headers each time the header gets used. Finally, consider a function using C++14 auto return types to return a local type defined in a header: auto f() { struct S { typedef int a; }; return S(); } Here, the typedef escapes its local scope and could be used by only some translation units including the header. To not warn on this, add a RecursiveASTVisitor that marks all delcs on local types returned from auto functions as referenced. (Except if it's a function with internal linkage, or the decls are private and the local type has no friends -- in these cases, it _is_ safe to warn.) Several of the included testcases (most of the interesting ones) were provided by Richard Smith. (gcc's spelling -Wunused-local-typedefs is supported as an alias for this warning.) llvm-svn: 217298
2014-09-06 09:25:55 +08:00
emitAndClearUnusedLocalTypedefWarnings();
}
if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) {
// FIXME: Load additional unused private field candidates from the external
// source.
RecordCompleteMap RecordsComplete;
RecordCompleteMap MNCComplete;
for (NamedDeclSetType::iterator I = UnusedPrivateFields.begin(),
E = UnusedPrivateFields.end(); I != E; ++I) {
const NamedDecl *D = *I;
const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext());
if (RD && !RD->isUnion() &&
IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) {
Diag(D->getLocation(), diag::warn_unused_private_field)
<< D->getDeclName();
}
}
}
if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) {
if (ExternalSource)
ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs);
for (const auto &DeletedFieldInfo : DeleteExprs) {
for (const auto &DeleteExprLoc : DeletedFieldInfo.second) {
AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first,
DeleteExprLoc.second);
}
}
}
// Check we've noticed that we're no longer parsing the initializer for every
// variable. If we miss cases, then at best we have a performance issue and
// at worst a rejects-valid bug.
assert(ParsingInitForAutoVars.empty() &&
"Didn't unmark var as having its initializer parsed");
if (!PP.isIncrementalProcessingEnabled())
TUScope = nullptr;
}
//===----------------------------------------------------------------------===//
// Helper functions.
//===----------------------------------------------------------------------===//
DeclContext *Sema::getFunctionLevelDeclContext() {
DeclContext *DC = CurContext;
while (true) {
if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) ||
isa<RequiresExprBodyDecl>(DC)) {
DC = DC->getParent();
} else if (isa<CXXMethodDecl>(DC) &&
cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call &&
cast<CXXRecordDecl>(DC->getParent())->isLambda()) {
DC = DC->getParent()->getParent();
}
else break;
}
return DC;
}
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *Sema::getCurFunctionDecl() {
DeclContext *DC = getFunctionLevelDeclContext();
return dyn_cast<FunctionDecl>(DC);
}
ObjCMethodDecl *Sema::getCurMethodDecl() {
DeclContext *DC = getFunctionLevelDeclContext();
while (isa<RecordDecl>(DC))
DC = DC->getParent();
return dyn_cast<ObjCMethodDecl>(DC);
}
NamedDecl *Sema::getCurFunctionOrMethodDecl() {
DeclContext *DC = getFunctionLevelDeclContext();
if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC))
return cast<NamedDecl>(DC);
return nullptr;
}
LangAS Sema::getDefaultCXXMethodAddrSpace() const {
if (getLangOpts().OpenCL)
return LangAS::opencl_generic;
return LangAS::Default;
}
void Sema::EmitCurrentDiagnostic(unsigned DiagID) {
// FIXME: It doesn't make sense to me that DiagID is an incoming argument here
// and yet we also use the current diag ID on the DiagnosticsEngine. This has
// been made more painfully obvious by the refactor that introduced this
// function, but it is possible that the incoming argument can be
// eliminated. If it truly cannot be (for example, there is some reentrancy
// issue I am not seeing yet), then there should at least be a clarifying
// comment somewhere.
if (Optional<TemplateDeductionInfo*> Info = isSFINAEContext()) {
switch (DiagnosticIDs::getDiagnosticSFINAEResponse(
Diags.getCurrentDiagID())) {
case DiagnosticIDs::SFINAE_Report:
// We'll report the diagnostic below.
break;
case DiagnosticIDs::SFINAE_SubstitutionFailure:
// Count this failure so that we know that template argument deduction
// has failed.
++NumSFINAEErrors;
// Make a copy of this suppressed diagnostic and store it with the
// template-deduction information.
if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
Diagnostic DiagInfo(&Diags);
(*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(),
PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
}
Diags.setLastDiagnosticIgnored(true);
Diags.Clear();
return;
case DiagnosticIDs::SFINAE_AccessControl: {
// Per C++ Core Issue 1170, access control is part of SFINAE.
2012-03-14 02:30:54 +08:00
// Additionally, the AccessCheckingSFINAE flag can be used to temporarily
// make access control a part of SFINAE for the purposes of checking
// type traits.
if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11)
break;
SourceLocation Loc = Diags.getCurrentDiagLoc();
// Suppress this diagnostic.
++NumSFINAEErrors;
// Make a copy of this suppressed diagnostic and store it with the
// template-deduction information.
if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
Diagnostic DiagInfo(&Diags);
(*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(),
PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
}
Diags.setLastDiagnosticIgnored(true);
Diags.Clear();
// Now the diagnostic state is clear, produce a C++98 compatibility
// warning.
Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control);
// The last diagnostic which Sema produced was ignored. Suppress any
// notes attached to it.
Diags.setLastDiagnosticIgnored(true);
return;
}
case DiagnosticIDs::SFINAE_Suppress:
// Make a copy of this suppressed diagnostic and store it with the
// template-deduction information;
if (*Info) {
Diagnostic DiagInfo(&Diags);
(*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(),
PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
}
// Suppress this diagnostic.
Diags.setLastDiagnosticIgnored(true);
Diags.Clear();
return;
}
}
// Copy the diagnostic printing policy over the ASTContext printing policy.
// TODO: Stop doing that. See: https://reviews.llvm.org/D45093#1090292
Context.setPrintingPolicy(getPrintingPolicy());
// Emit the diagnostic.
if (!Diags.EmitCurrentDiagnostic())
return;
// If this is not a note, and we're in a template instantiation
// that is different from the last template instantiation where
// we emitted an error, print a template instantiation
// backtrace.
if (!DiagnosticIDs::isBuiltinNote(DiagID))
PrintContextStack();
}
Add support for retrieving the Doxygen comment associated with a given declaration in the AST. The new ASTContext::getCommentForDecl function searches for a comment that is attached to the given declaration, and returns that comment, which may be composed of several comment blocks. Comments are always available in an AST. However, to avoid harming performance, we don't actually parse the comments. Rather, we keep the source ranges of all of the comments within a large, sorted vector, then lazily extract comments via a binary search in that vector only when needed (which never occurs in a "normal" compile). Comments are written to a precompiled header/AST file as a blob of source ranges. That blob is only lazily loaded when one requests a comment for a declaration (this never occurs in a "normal" compile). The indexer testbed now supports comment extraction. When the -point-at location points to a declaration with a Doxygen-style comment, the indexer testbed prints the associated comment block(s). See test/Index/comments.c for an example. Some notes: - We don't actually attempt to parse the comment blocks themselves, beyond identifying them as Doxygen comment blocks to associate them with a declaration. - We won't find comment blocks that aren't adjacent to the declaration, because we start our search based on the location of the declaration. - We don't go through the necessary hops to find, for example, whether some redeclaration of a declaration has comments when our current declaration does not. Similarly, we don't attempt to associate a \param Foo marker in a function body comment with the parameter named Foo (although that is certainly possible). - Verification of my "no performance impact" claims is still "to be done". llvm-svn: 74704
2009-07-03 01:08:52 +08:00
Sema::SemaDiagnosticBuilder
Sema::Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint) {
return Diag(Loc, PD.getDiagID(), DeferHint) << PD;
}
bool Sema::hasUncompilableErrorOccurred() const {
if (getDiagnostics().hasUncompilableErrorOccurred())
return true;
auto *FD = dyn_cast<FunctionDecl>(CurContext);
if (!FD)
return false;
auto Loc = DeviceDeferredDiags.find(FD);
if (Loc == DeviceDeferredDiags.end())
return false;
for (auto PDAt : Loc->second) {
if (DiagnosticIDs::isDefaultMappingAsError(PDAt.second.getDiagID()))
return true;
}
return false;
}
// Print notes showing how we can reach FD starting from an a priori
// known-callable function.
static void emitCallStackNotes(Sema &S, FunctionDecl *FD) {
auto FnIt = S.DeviceKnownEmittedFns.find(FD);
while (FnIt != S.DeviceKnownEmittedFns.end()) {
// Respect error limit.
if (S.Diags.hasFatalErrorOccurred())
return;
DiagnosticBuilder Builder(
S.Diags.Report(FnIt->second.Loc, diag::note_called_by));
Builder << FnIt->second.FD;
FnIt = S.DeviceKnownEmittedFns.find(FnIt->second.FD);
}
}
namespace {
/// Helper class that emits deferred diagnostic messages if an entity directly
/// or indirectly using the function that causes the deferred diagnostic
/// messages is known to be emitted.
///
/// During parsing of AST, certain diagnostic messages are recorded as deferred
/// diagnostics since it is unknown whether the functions containing such
/// diagnostics will be emitted. A list of potentially emitted functions and
/// variables that may potentially trigger emission of functions are also
/// recorded. DeferredDiagnosticsEmitter recursively visits used functions
/// by each function to emit deferred diagnostics.
///
/// During the visit, certain OpenMP directives or initializer of variables
/// with certain OpenMP attributes will cause subsequent visiting of any
/// functions enter a state which is called OpenMP device context in this
/// implementation. The state is exited when the directive or initializer is
/// exited. This state can change the emission states of subsequent uses
/// of functions.
///
/// Conceptually the functions or variables to be visited form a use graph
/// where the parent node uses the child node. At any point of the visit,
/// the tree nodes traversed from the tree root to the current node form a use
/// stack. The emission state of the current node depends on two factors:
/// 1. the emission state of the root node
/// 2. whether the current node is in OpenMP device context
/// If the function is decided to be emitted, its contained deferred diagnostics
/// are emitted, together with the information about the use stack.
///
class DeferredDiagnosticsEmitter
: public UsedDeclVisitor<DeferredDiagnosticsEmitter> {
public:
typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited;
// Whether the function is already in the current use-path.
llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath;
// The current use-path.
llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath;
// Whether the visiting of the function has been done. Done[0] is for the
// case not in OpenMP device context. Done[1] is for the case in OpenMP
// device context. We need two sets because diagnostics emission may be
// different depending on whether it is in OpenMP device context.
llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2];
// Emission state of the root node of the current use graph.
bool ShouldEmitRootNode;
// Current OpenMP device context level. It is initialized to 0 and each
// entering of device context increases it by 1 and each exit decreases
// it by 1. Non-zero value indicates it is currently in device context.
unsigned InOMPDeviceContext;
DeferredDiagnosticsEmitter(Sema &S)
: Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {}
void VisitOMPTargetDirective(OMPTargetDirective *Node) {
++InOMPDeviceContext;
Inherited::VisitOMPTargetDirective(Node);
--InOMPDeviceContext;
}
void visitUsedDecl(SourceLocation Loc, Decl *D) {
if (isa<VarDecl>(D))
return;
if (auto *FD = dyn_cast<FunctionDecl>(D))
checkFunc(Loc, FD);
else
Inherited::visitUsedDecl(Loc, D);
}
void checkVar(VarDecl *VD) {
assert(VD->isFileVarDecl() &&
"Should only check file-scope variables");
if (auto *Init = VD->getInit()) {
auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD);
bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost ||
*DevTy == OMPDeclareTargetDeclAttr::DT_Any);
if (IsDev)
++InOMPDeviceContext;
this->Visit(Init);
if (IsDev)
--InOMPDeviceContext;
}
}
void checkFunc(SourceLocation Loc, FunctionDecl *FD) {
auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0];
FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back();
if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) ||
S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD))
return;
// Finalize analysis of OpenMP-specific constructs.
if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 &&
(ShouldEmitRootNode || InOMPDeviceContext))
S.finalizeOpenMPDelayedAnalysis(Caller, FD, Loc);
if (Caller)
S.DeviceKnownEmittedFns[FD] = {Caller, Loc};
// Always emit deferred diagnostics for the direct users. This does not
// lead to explosion of diagnostics since each user is visited at most
// twice.
if (ShouldEmitRootNode || InOMPDeviceContext)
emitDeferredDiags(FD, Caller);
// Do not revisit a function if the function body has been completely
// visited before.
if (!Done.insert(FD).second)
return;
InUsePath.insert(FD);
UsePath.push_back(FD);
if (auto *S = FD->getBody()) {
this->Visit(S);
}
UsePath.pop_back();
InUsePath.erase(FD);
}
void checkRecordedDecl(Decl *D) {
if (auto *FD = dyn_cast<FunctionDecl>(D)) {
ShouldEmitRootNode = S.getEmissionStatus(FD, /*Final=*/true) ==
Sema::FunctionEmissionStatus::Emitted;
checkFunc(SourceLocation(), FD);
} else
checkVar(cast<VarDecl>(D));
}
// Emit any deferred diagnostics for FD
void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) {
auto It = S.DeviceDeferredDiags.find(FD);
if (It == S.DeviceDeferredDiags.end())
return;
bool HasWarningOrError = false;
bool FirstDiag = true;
for (PartialDiagnosticAt &PDAt : It->second) {
// Respect error limit.
if (S.Diags.hasFatalErrorOccurred())
return;
const SourceLocation &Loc = PDAt.first;
const PartialDiagnostic &PD = PDAt.second;
HasWarningOrError |=
S.getDiagnostics().getDiagnosticLevel(PD.getDiagID(), Loc) >=
DiagnosticsEngine::Warning;
{
DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID()));
PD.Emit(Builder);
}
// Emit the note on the first diagnostic in case too many diagnostics
// cause the note not emitted.
if (FirstDiag && HasWarningOrError && ShowCallStack) {
emitCallStackNotes(S, FD);
FirstDiag = false;
}
}
}
};
} // namespace
void Sema::emitDeferredDiags() {
if (ExternalSource)
ExternalSource->ReadDeclsToCheckForDeferredDiags(
DeclsToCheckForDeferredDiags);
if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) ||
DeclsToCheckForDeferredDiags.empty())
return;
DeferredDiagnosticsEmitter DDE(*this);
for (auto D : DeclsToCheckForDeferredDiags)
DDE.checkRecordedDecl(D);
}
// In CUDA, there are some constructs which may appear in semantically-valid
// code, but trigger errors if we ever generate code for the function in which
// they appear. Essentially every construct you're not allowed to use on the
// device falls into this category, because you are allowed to use these
// constructs in a __host__ __device__ function, but only if that function is
// never codegen'ed on the device.
//
// To handle semantic checking for these constructs, we keep track of the set of
// functions we know will be emitted, either because we could tell a priori that
// they would be emitted, or because they were transitively called by a
// known-emitted function.
//
// We also keep a partial call graph of which not-known-emitted functions call
// which other not-known-emitted functions.
//
// When we see something which is illegal if the current function is emitted
// (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or
// CheckCUDACall), we first check if the current function is known-emitted. If
// so, we immediately output the diagnostic.
//
// Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags
// until we discover that the function is known-emitted, at which point we take
// it out of this map and emit the diagnostic.
Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc,
unsigned DiagID,
FunctionDecl *Fn, Sema &S)
: S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
switch (K) {
case K_Nop:
break;
case K_Immediate:
case K_ImmediateWithCallStack:
ImmediateDiag.emplace(
ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID));
break;
case K_Deferred:
assert(Fn && "Must have a function to attach the deferred diag to.");
auto &Diags = S.DeviceDeferredDiags[Fn];
PartialDiagId.emplace(Diags.size());
Diags.emplace_back(Loc, S.PDiag(DiagID));
break;
}
}
Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D)
: S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn),
ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag),
PartialDiagId(D.PartialDiagId) {
// Clean the previous diagnostics.
D.ShowCallStack = false;
D.ImmediateDiag.reset();
D.PartialDiagId.reset();
}
Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
if (ImmediateDiag) {
// Emit our diagnostic and, if it was a warning or error, output a callstack
// if Fn isn't a priori known-emitted.
bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel(
DiagID, Loc) >= DiagnosticsEngine::Warning;
ImmediateDiag.reset(); // Emit the immediate diag.
if (IsWarningOrError && ShowCallStack)
emitCallStackNotes(S, Fn);
} else {
assert((!PartialDiagId || ShowCallStack) &&
"Must always show call stack for deferred diags.");
}
}
Sema::SemaDiagnosticBuilder
Sema::targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD) {
FD = FD ? FD : getCurFunctionDecl();
if (LangOpts.OpenMP)
return LangOpts.OpenMPIsDevice ? diagIfOpenMPDeviceCode(Loc, DiagID, FD)
: diagIfOpenMPHostCode(Loc, DiagID, FD);
if (getLangOpts().CUDA)
return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID)
: CUDADiagIfHostCode(Loc, DiagID);
if (getLangOpts().SYCLIsDevice)
return SYCLDiagIfDeviceCode(Loc, DiagID);
return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID,
FD, *this);
}
Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID,
bool DeferHint) {
bool IsError = Diags.getDiagnosticIDs()->isDefaultMappingAsError(DiagID);
bool ShouldDefer = getLangOpts().CUDA && LangOpts.GPUDeferDiag &&
DiagnosticIDs::isDeferrable(DiagID) &&
(DeferHint || !IsError);
auto SetIsLastErrorImmediate = [&](bool Flag) {
if (IsError)
IsLastErrorImmediate = Flag;
};
if (!ShouldDefer) {
SetIsLastErrorImmediate(true);
return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc,
DiagID, getCurFunctionDecl(), *this);
}
SemaDiagnosticBuilder DB = getLangOpts().CUDAIsDevice
? CUDADiagIfDeviceCode(Loc, DiagID)
: CUDADiagIfHostCode(Loc, DiagID);
SetIsLastErrorImmediate(DB.isImmediate());
return DB;
}
void Sema::checkDeviceDecl(ValueDecl *D, SourceLocation Loc) {
if (isUnevaluatedContext())
return;
Decl *C = cast<Decl>(getCurLexicalContext());
// Memcpy operations for structs containing a member with unsupported type
// are ok, though.
if (const auto *MD = dyn_cast<CXXMethodDecl>(C)) {
if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
MD->isTrivial())
return;
if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(MD))
if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial())
return;
}
// Try to associate errors with the lexical context, if that is a function, or
// the value declaration otherwise.
FunctionDecl *FD =
isa<FunctionDecl>(C) ? cast<FunctionDecl>(C) : dyn_cast<FunctionDecl>(D);
auto CheckType = [&](QualType Ty) {
if (Ty->isDependentType())
return;
if (Ty->isExtIntType()) {
if (!Context.getTargetInfo().hasExtIntType()) {
targetDiag(Loc, diag::err_device_unsupported_type, FD)
<< D << false /*show bit size*/ << 0 /*bitsize*/
<< Ty << Context.getTargetInfo().getTriple().str();
}
return;
}
if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
((Ty->isFloat128Type() ||
(Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128)) &&
!Context.getTargetInfo().hasFloat128Type()) ||
(Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
!Context.getTargetInfo().hasInt128Type())) {
if (targetDiag(Loc, diag::err_device_unsupported_type, FD)
<< D << true /*show bit size*/
<< static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
<< Context.getTargetInfo().getTriple().str())
D->setInvalidDecl();
targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
}
};
QualType Ty = D->getType();
CheckType(Ty);
if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) {
for (const auto &ParamTy : FPTy->param_types())
CheckType(ParamTy);
CheckType(FPTy->getReturnType());
}
if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Ty))
CheckType(FNPTy->getReturnType());
}
/// Looks through the macro-expansion chain for the given
/// location, looking for a macro expansion with the given name.
/// If one is found, returns true and sets the location to that
/// expansion loc.
bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) {
SourceLocation loc = locref;
if (!loc.isMacroID()) return false;
// There's no good way right now to look at the intermediate
// expansions, so just jump to the expansion location.
loc = getSourceManager().getExpansionLoc(loc);
// If that's written with the name, stop here.
SmallString<16> buffer;
if (getPreprocessor().getSpelling(loc, buffer) == name) {
locref = loc;
return true;
}
return false;
}
/// Determines the active Scope associated with the given declaration
/// context.
///
/// This routine maps a declaration context to the active Scope object that
/// represents that declaration context in the parser. It is typically used
/// from "scope-less" code (e.g., template instantiation, lazy creation of
/// declarations) that injects a name for name-lookup purposes and, therefore,
/// must update the Scope.
///
/// \returns The scope corresponding to the given declaraion context, or NULL
/// if no such scope is open.
Scope *Sema::getScopeForContext(DeclContext *Ctx) {
if (!Ctx)
return nullptr;
Ctx = Ctx->getPrimaryContext();
for (Scope *S = getCurScope(); S; S = S->getParent()) {
// Ignore scopes that cannot have declarations. This is important for
// out-of-line definitions of static class members.
if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope))
if (DeclContext *Entity = S->getEntity())
if (Ctx == Entity->getPrimaryContext())
return S;
}
return nullptr;
}
/// Enter a new function scope
void Sema::PushFunctionScope() {
if (FunctionScopes.empty() && CachedFunctionScope) {
// Use CachedFunctionScope to avoid allocating memory when possible.
CachedFunctionScope->Clear();
FunctionScopes.push_back(CachedFunctionScope.release());
} else {
FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics()));
}
if (LangOpts.OpenMP)
pushOpenMPFunctionRegion();
}
void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(),
BlockScope, Block));
}
LambdaScopeInfo *Sema::PushLambdaScope() {
LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics());
FunctionScopes.push_back(LSI);
return LSI;
Implement a rudimentary form of generic lambdas. Specifically, the following features are not included in this commit: - any sort of capturing within generic lambdas - generic lambdas within template functions and nested within other generic lambdas - conversion operator for captureless lambdas - ensuring all visitors are generic lambda aware (Although I have gotten some useful feedback on my patches of the above and will be incorporating that as I submit those patches for commit) As an example of what compiles through this commit: template <class F1, class F2> struct overload : F1, F2 { using F1::operator(); using F2::operator(); overload(F1 f1, F2 f2) : F1(f1), F2(f2) { } }; auto Recursive = [](auto Self, auto h, auto ... rest) { return 1 + Self(Self, rest...); }; auto Base = [](auto Self, auto h) { return 1; }; overload<decltype(Base), decltype(Recursive)> O(Base, Recursive); int num_params = O(O, 5, 3, "abc", 3.14, 'a'); Please see attached tests for more examples. This patch has been reviewed by Doug and Richard. Minor changes (non-functionality affecting) have been made since both of them formally looked at it, but the changes involve removal of supernumerary return type deduction changes (since they are now redundant, with richard having committed a recent patch to address return type deduction for C++11 lambdas using C++14 semantics). Some implementation notes: - Add a new Declarator context => LambdaExprParameterContext to clang::Declarator to allow the use of 'auto' in declaring generic lambda parameters - Add various helpers to CXXRecordDecl to facilitate identifying and querying a closure class - LambdaScopeInfo (which maintains the current lambda's Sema state) was augmented to house the current depth of the template being parsed (id est the Parser calls Sema::RecordParsingTemplateParameterDepth) so that SemaType.cpp::ConvertDeclSpecToType may use it to immediately generate a template-parameter-type when 'auto' is parsed in a generic lambda parameter context. (i.e we do NOT use AutoType deduced to a template parameter type - Richard seemed ok with this approach). We encode that this template type was generated from an auto by simply adding $auto to the name which can be used for better diagnostics if needed. - SemaLambda.h was added to hold some common lambda utility functions (this file is likely to grow ...) - Teach Sema::ActOnStartOfFunctionDef to check whether it is being called to instantiate a generic lambda's call operator, and if so, push an appropriately prepared LambdaScopeInfo object on the stack. - various tests were added - but much more will be needed. There is obviously more work to be done, and both Richard (weakly) and Doug (strongly) have requested that LambdaExpr be removed form the CXXRecordDecl LambdaDefinitionaData in a future patch which is forthcoming. A greatful thanks to all reviewers including Eli Friedman, James Dennett, and especially the two gracious wizards (Richard Smith and Doug Gregor) who spent hours providing feedback (in person in Chicago and on the mailing lists). And yet I am certain that I have allowed unidentified bugs to creep in; bugs, that I will do my best to slay, once identified! Thanks! llvm-svn: 191453
2013-09-27 03:54:12 +08:00
}
void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) {
if (LambdaScopeInfo *const LSI = getCurLambda()) {
LSI->AutoTemplateParameterDepth = Depth;
return;
}
llvm_unreachable(
Implement a rudimentary form of generic lambdas. Specifically, the following features are not included in this commit: - any sort of capturing within generic lambdas - generic lambdas within template functions and nested within other generic lambdas - conversion operator for captureless lambdas - ensuring all visitors are generic lambda aware (Although I have gotten some useful feedback on my patches of the above and will be incorporating that as I submit those patches for commit) As an example of what compiles through this commit: template <class F1, class F2> struct overload : F1, F2 { using F1::operator(); using F2::operator(); overload(F1 f1, F2 f2) : F1(f1), F2(f2) { } }; auto Recursive = [](auto Self, auto h, auto ... rest) { return 1 + Self(Self, rest...); }; auto Base = [](auto Self, auto h) { return 1; }; overload<decltype(Base), decltype(Recursive)> O(Base, Recursive); int num_params = O(O, 5, 3, "abc", 3.14, 'a'); Please see attached tests for more examples. This patch has been reviewed by Doug and Richard. Minor changes (non-functionality affecting) have been made since both of them formally looked at it, but the changes involve removal of supernumerary return type deduction changes (since they are now redundant, with richard having committed a recent patch to address return type deduction for C++11 lambdas using C++14 semantics). Some implementation notes: - Add a new Declarator context => LambdaExprParameterContext to clang::Declarator to allow the use of 'auto' in declaring generic lambda parameters - Add various helpers to CXXRecordDecl to facilitate identifying and querying a closure class - LambdaScopeInfo (which maintains the current lambda's Sema state) was augmented to house the current depth of the template being parsed (id est the Parser calls Sema::RecordParsingTemplateParameterDepth) so that SemaType.cpp::ConvertDeclSpecToType may use it to immediately generate a template-parameter-type when 'auto' is parsed in a generic lambda parameter context. (i.e we do NOT use AutoType deduced to a template parameter type - Richard seemed ok with this approach). We encode that this template type was generated from an auto by simply adding $auto to the name which can be used for better diagnostics if needed. - SemaLambda.h was added to hold some common lambda utility functions (this file is likely to grow ...) - Teach Sema::ActOnStartOfFunctionDef to check whether it is being called to instantiate a generic lambda's call operator, and if so, push an appropriately prepared LambdaScopeInfo object on the stack. - various tests were added - but much more will be needed. There is obviously more work to be done, and both Richard (weakly) and Doug (strongly) have requested that LambdaExpr be removed form the CXXRecordDecl LambdaDefinitionaData in a future patch which is forthcoming. A greatful thanks to all reviewers including Eli Friedman, James Dennett, and especially the two gracious wizards (Richard Smith and Doug Gregor) who spent hours providing feedback (in person in Chicago and on the mailing lists). And yet I am certain that I have allowed unidentified bugs to creep in; bugs, that I will do my best to slay, once identified! Thanks! llvm-svn: 191453
2013-09-27 03:54:12 +08:00
"Remove assertion if intentionally called in a non-lambda context.");
}
// Check that the type of the VarDecl has an accessible copy constructor and
2019-04-30 08:19:43 +08:00
// resolve its destructor's exception specification.
static void checkEscapingByref(VarDecl *VD, Sema &S) {
QualType T = VD->getType();
EnterExpressionEvaluationContext scope(
S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
SourceLocation Loc = VD->getLocation();
Expr *VarRef =
new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc);
ExprResult Result = S.PerformMoveOrCopyInitialization(
InitializedEntity::InitializeBlock(Loc, T, false), VD, VD->getType(),
VarRef, /*AllowNRVO=*/true);
if (!Result.isInvalid()) {
Result = S.MaybeCreateExprWithCleanups(Result);
Expr *Init = Result.getAs<Expr>();
S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init));
}
2019-04-30 08:19:43 +08:00
// The destructor's exception specification is needed when IRGen generates
// block copy/destroy functions. Resolve it here.
if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
if (CXXDestructorDecl *DD = RD->getDestructor()) {
auto *FPT = DD->getType()->getAs<FunctionProtoType>();
S.ResolveExceptionSpec(Loc, FPT);
}
}
static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) {
// Set the EscapingByref flag of __block variables captured by
// escaping blocks.
for (const BlockDecl *BD : FSI.Blocks) {
for (const BlockDecl::Capture &BC : BD->captures()) {
VarDecl *VD = BC.getVariable();
if (VD->hasAttr<BlocksAttr>()) {
// Nothing to do if this is a __block variable captured by a
// non-escaping block.
if (BD->doesNotEscape())
continue;
VD->setEscapingByref();
}
// Check whether the captured variable is or contains an object of
// non-trivial C union type.
QualType CapType = BC.getVariable()->getType();
if (CapType.hasNonTrivialToPrimitiveDestructCUnion() ||
CapType.hasNonTrivialToPrimitiveCopyCUnion())
S.checkNonTrivialCUnion(BC.getVariable()->getType(),
BD->getCaretLocation(),
Sema::NTCUC_BlockCapture,
Sema::NTCUK_Destruct|Sema::NTCUK_Copy);
}
}
for (VarDecl *VD : FSI.ByrefBlockVars) {
// __block variables might require us to capture a copy-initializer.
if (!VD->isEscapingByref())
continue;
// It's currently invalid to ever have a __block variable with an
// array type; should we diagnose that here?
// Regardless, we don't want to ignore array nesting when
// constructing this copy.
if (VD->getType()->isStructureOrClassType())
checkEscapingByref(VD, S);
}
}
/// Pop a function (or block or lambda or captured region) scope from the stack.
///
/// \param WP The warning policy to use for CFG-based warnings, or null if such
/// warnings should not be produced.
/// \param D The declaration corresponding to this function scope, if producing
/// CFG-based warnings.
/// \param BlockType The type of the block expression, if D is a BlockDecl.
Sema::PoppedFunctionScopePtr
Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
const Decl *D, QualType BlockType) {
assert(!FunctionScopes.empty() && "mismatched push/pop!");
markEscapingByrefs(*FunctionScopes.back(), *this);
PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(),
PoppedFunctionScopeDeleter(this));
if (LangOpts.OpenMP)
popOpenMPFunctionRegion(Scope.get());
// Issue any analysis-based warnings.
if (WP && D)
AnalysisWarnings.IssueWarnings(*WP, Scope.get(), D, BlockType);
else
for (const auto &PUD : Scope->PossiblyUnreachableDiags)
Diag(PUD.Loc, PUD.PD);
return Scope;
}
void Sema::PoppedFunctionScopeDeleter::
operator()(sema::FunctionScopeInfo *Scope) const {
// Stash the function scope for later reuse if it's for a normal function.
if (Scope->isPlainFunction() && !Self->CachedFunctionScope)
Self->CachedFunctionScope.reset(Scope);
else
delete Scope;
}
void Sema::PushCompoundScope(bool IsStmtExpr) {
getCurFunction()->CompoundScopes.push_back(CompoundScopeInfo(IsStmtExpr));
}
void Sema::PopCompoundScope() {
FunctionScopeInfo *CurFunction = getCurFunction();
assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop");
CurFunction->CompoundScopes.pop_back();
}
/// Determine whether any errors occurred within this function/method/
/// block.
bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
return getCurFunction()->hasUnrecoverableErrorOccurred();
}
void Sema::setFunctionHasBranchIntoScope() {
if (!FunctionScopes.empty())
FunctionScopes.back()->setHasBranchIntoScope();
}
void Sema::setFunctionHasBranchProtectedScope() {
if (!FunctionScopes.empty())
FunctionScopes.back()->setHasBranchProtectedScope();
}
void Sema::setFunctionHasIndirectGoto() {
if (!FunctionScopes.empty())
FunctionScopes.back()->setHasIndirectGoto();
}
BlockScopeInfo *Sema::getCurBlock() {
if (FunctionScopes.empty())
return nullptr;
auto CurBSI = dyn_cast<BlockScopeInfo>(FunctionScopes.back());
if (CurBSI && CurBSI->TheDecl &&
!CurBSI->TheDecl->Encloses(CurContext)) {
// We have switched contexts due to template instantiation.
assert(!CodeSynthesisContexts.empty());
return nullptr;
}
return CurBSI;
}
FunctionScopeInfo *Sema::getEnclosingFunction() const {
if (FunctionScopes.empty())
return nullptr;
for (int e = FunctionScopes.size() - 1; e >= 0; --e) {
if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
continue;
return FunctionScopes[e];
}
return nullptr;
}
LambdaScopeInfo *Sema::getEnclosingLambda() const {
for (auto *Scope : llvm::reverse(FunctionScopes)) {
if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope)) {
if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext)) {
// We have switched contexts due to template instantiation.
// FIXME: We should swap out the FunctionScopes during code synthesis
// so that we don't need to check for this.
assert(!CodeSynthesisContexts.empty());
return nullptr;
}
return LSI;
}
}
return nullptr;
}
LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) {
if (FunctionScopes.empty())
return nullptr;
auto I = FunctionScopes.rbegin();
if (IgnoreNonLambdaCapturingScope) {
auto E = FunctionScopes.rend();
while (I != E && isa<CapturingScopeInfo>(*I) && !isa<LambdaScopeInfo>(*I))
++I;
if (I == E)
return nullptr;
}
auto *CurLSI = dyn_cast<LambdaScopeInfo>(*I);
if (CurLSI && CurLSI->Lambda &&
!CurLSI->Lambda->Encloses(CurContext)) {
// We have switched contexts due to template instantiation.
assert(!CodeSynthesisContexts.empty());
return nullptr;
}
return CurLSI;
}
// We have a generic lambda if we parsed auto parameters, or we have
Implement a rudimentary form of generic lambdas. Specifically, the following features are not included in this commit: - any sort of capturing within generic lambdas - generic lambdas within template functions and nested within other generic lambdas - conversion operator for captureless lambdas - ensuring all visitors are generic lambda aware (Although I have gotten some useful feedback on my patches of the above and will be incorporating that as I submit those patches for commit) As an example of what compiles through this commit: template <class F1, class F2> struct overload : F1, F2 { using F1::operator(); using F2::operator(); overload(F1 f1, F2 f2) : F1(f1), F2(f2) { } }; auto Recursive = [](auto Self, auto h, auto ... rest) { return 1 + Self(Self, rest...); }; auto Base = [](auto Self, auto h) { return 1; }; overload<decltype(Base), decltype(Recursive)> O(Base, Recursive); int num_params = O(O, 5, 3, "abc", 3.14, 'a'); Please see attached tests for more examples. This patch has been reviewed by Doug and Richard. Minor changes (non-functionality affecting) have been made since both of them formally looked at it, but the changes involve removal of supernumerary return type deduction changes (since they are now redundant, with richard having committed a recent patch to address return type deduction for C++11 lambdas using C++14 semantics). Some implementation notes: - Add a new Declarator context => LambdaExprParameterContext to clang::Declarator to allow the use of 'auto' in declaring generic lambda parameters - Add various helpers to CXXRecordDecl to facilitate identifying and querying a closure class - LambdaScopeInfo (which maintains the current lambda's Sema state) was augmented to house the current depth of the template being parsed (id est the Parser calls Sema::RecordParsingTemplateParameterDepth) so that SemaType.cpp::ConvertDeclSpecToType may use it to immediately generate a template-parameter-type when 'auto' is parsed in a generic lambda parameter context. (i.e we do NOT use AutoType deduced to a template parameter type - Richard seemed ok with this approach). We encode that this template type was generated from an auto by simply adding $auto to the name which can be used for better diagnostics if needed. - SemaLambda.h was added to hold some common lambda utility functions (this file is likely to grow ...) - Teach Sema::ActOnStartOfFunctionDef to check whether it is being called to instantiate a generic lambda's call operator, and if so, push an appropriately prepared LambdaScopeInfo object on the stack. - various tests were added - but much more will be needed. There is obviously more work to be done, and both Richard (weakly) and Doug (strongly) have requested that LambdaExpr be removed form the CXXRecordDecl LambdaDefinitionaData in a future patch which is forthcoming. A greatful thanks to all reviewers including Eli Friedman, James Dennett, and especially the two gracious wizards (Richard Smith and Doug Gregor) who spent hours providing feedback (in person in Chicago and on the mailing lists). And yet I am certain that I have allowed unidentified bugs to creep in; bugs, that I will do my best to slay, once identified! Thanks! llvm-svn: 191453
2013-09-27 03:54:12 +08:00
// an associated template parameter list.
LambdaScopeInfo *Sema::getCurGenericLambda() {
if (LambdaScopeInfo *LSI = getCurLambda()) {
return (LSI->TemplateParams.size() ||
LSI->GLTemplateParameterList) ? LSI : nullptr;
Implement a rudimentary form of generic lambdas. Specifically, the following features are not included in this commit: - any sort of capturing within generic lambdas - generic lambdas within template functions and nested within other generic lambdas - conversion operator for captureless lambdas - ensuring all visitors are generic lambda aware (Although I have gotten some useful feedback on my patches of the above and will be incorporating that as I submit those patches for commit) As an example of what compiles through this commit: template <class F1, class F2> struct overload : F1, F2 { using F1::operator(); using F2::operator(); overload(F1 f1, F2 f2) : F1(f1), F2(f2) { } }; auto Recursive = [](auto Self, auto h, auto ... rest) { return 1 + Self(Self, rest...); }; auto Base = [](auto Self, auto h) { return 1; }; overload<decltype(Base), decltype(Recursive)> O(Base, Recursive); int num_params = O(O, 5, 3, "abc", 3.14, 'a'); Please see attached tests for more examples. This patch has been reviewed by Doug and Richard. Minor changes (non-functionality affecting) have been made since both of them formally looked at it, but the changes involve removal of supernumerary return type deduction changes (since they are now redundant, with richard having committed a recent patch to address return type deduction for C++11 lambdas using C++14 semantics). Some implementation notes: - Add a new Declarator context => LambdaExprParameterContext to clang::Declarator to allow the use of 'auto' in declaring generic lambda parameters - Add various helpers to CXXRecordDecl to facilitate identifying and querying a closure class - LambdaScopeInfo (which maintains the current lambda's Sema state) was augmented to house the current depth of the template being parsed (id est the Parser calls Sema::RecordParsingTemplateParameterDepth) so that SemaType.cpp::ConvertDeclSpecToType may use it to immediately generate a template-parameter-type when 'auto' is parsed in a generic lambda parameter context. (i.e we do NOT use AutoType deduced to a template parameter type - Richard seemed ok with this approach). We encode that this template type was generated from an auto by simply adding $auto to the name which can be used for better diagnostics if needed. - SemaLambda.h was added to hold some common lambda utility functions (this file is likely to grow ...) - Teach Sema::ActOnStartOfFunctionDef to check whether it is being called to instantiate a generic lambda's call operator, and if so, push an appropriately prepared LambdaScopeInfo object on the stack. - various tests were added - but much more will be needed. There is obviously more work to be done, and both Richard (weakly) and Doug (strongly) have requested that LambdaExpr be removed form the CXXRecordDecl LambdaDefinitionaData in a future patch which is forthcoming. A greatful thanks to all reviewers including Eli Friedman, James Dennett, and especially the two gracious wizards (Richard Smith and Doug Gregor) who spent hours providing feedback (in person in Chicago and on the mailing lists). And yet I am certain that I have allowed unidentified bugs to creep in; bugs, that I will do my best to slay, once identified! Thanks! llvm-svn: 191453
2013-09-27 03:54:12 +08:00
}
return nullptr;
Implement a rudimentary form of generic lambdas. Specifically, the following features are not included in this commit: - any sort of capturing within generic lambdas - generic lambdas within template functions and nested within other generic lambdas - conversion operator for captureless lambdas - ensuring all visitors are generic lambda aware (Although I have gotten some useful feedback on my patches of the above and will be incorporating that as I submit those patches for commit) As an example of what compiles through this commit: template <class F1, class F2> struct overload : F1, F2 { using F1::operator(); using F2::operator(); overload(F1 f1, F2 f2) : F1(f1), F2(f2) { } }; auto Recursive = [](auto Self, auto h, auto ... rest) { return 1 + Self(Self, rest...); }; auto Base = [](auto Self, auto h) { return 1; }; overload<decltype(Base), decltype(Recursive)> O(Base, Recursive); int num_params = O(O, 5, 3, "abc", 3.14, 'a'); Please see attached tests for more examples. This patch has been reviewed by Doug and Richard. Minor changes (non-functionality affecting) have been made since both of them formally looked at it, but the changes involve removal of supernumerary return type deduction changes (since they are now redundant, with richard having committed a recent patch to address return type deduction for C++11 lambdas using C++14 semantics). Some implementation notes: - Add a new Declarator context => LambdaExprParameterContext to clang::Declarator to allow the use of 'auto' in declaring generic lambda parameters - Add various helpers to CXXRecordDecl to facilitate identifying and querying a closure class - LambdaScopeInfo (which maintains the current lambda's Sema state) was augmented to house the current depth of the template being parsed (id est the Parser calls Sema::RecordParsingTemplateParameterDepth) so that SemaType.cpp::ConvertDeclSpecToType may use it to immediately generate a template-parameter-type when 'auto' is parsed in a generic lambda parameter context. (i.e we do NOT use AutoType deduced to a template parameter type - Richard seemed ok with this approach). We encode that this template type was generated from an auto by simply adding $auto to the name which can be used for better diagnostics if needed. - SemaLambda.h was added to hold some common lambda utility functions (this file is likely to grow ...) - Teach Sema::ActOnStartOfFunctionDef to check whether it is being called to instantiate a generic lambda's call operator, and if so, push an appropriately prepared LambdaScopeInfo object on the stack. - various tests were added - but much more will be needed. There is obviously more work to be done, and both Richard (weakly) and Doug (strongly) have requested that LambdaExpr be removed form the CXXRecordDecl LambdaDefinitionaData in a future patch which is forthcoming. A greatful thanks to all reviewers including Eli Friedman, James Dennett, and especially the two gracious wizards (Richard Smith and Doug Gregor) who spent hours providing feedback (in person in Chicago and on the mailing lists). And yet I am certain that I have allowed unidentified bugs to creep in; bugs, that I will do my best to slay, once identified! Thanks! llvm-svn: 191453
2013-09-27 03:54:12 +08:00
}
void Sema::ActOnComment(SourceRange Comment) {
if (!LangOpts.RetainCommentsFromSystemHeaders &&
SourceMgr.isInSystemHeader(Comment.getBegin()))
return;
RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false);
if (RC.isAlmostTrailingComment()) {
SourceRange MagicMarkerRange(Comment.getBegin(),
Comment.getBegin().getLocWithOffset(3));
StringRef MagicMarkerText;
switch (RC.getKind()) {
case RawComment::RCK_OrdinaryBCPL:
MagicMarkerText = "///<";
break;
case RawComment::RCK_OrdinaryC:
MagicMarkerText = "/**<";
break;
default:
llvm_unreachable("if this is an almost Doxygen comment, "
"it should be ordinary");
}
Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) <<
FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText);
}
Context.addComment(RC);
}
// Pin this vtable to this file.
ExternalSemaSource::~ExternalSemaSource() {}
char ExternalSemaSource::ID;
void ExternalSemaSource::ReadMethodPool(Selector Sel) { }
void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { }
void ExternalSemaSource::ReadKnownNamespaces(
SmallVectorImpl<NamespaceDecl *> &Namespaces) {
}
void ExternalSemaSource::ReadUndefinedButUsed(
llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {}
void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector<
FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {}
/// Figure out if an expression could be turned into a call.
///
/// Use this when trying to recover from an error where the programmer may have
/// written just the name of a function instead of actually calling it.
///
/// \param E - The expression to examine.
/// \param ZeroArgCallReturnTy - If the expression can be turned into a call
/// with no arguments, this parameter is set to the type returned by such a
/// call; otherwise, it is set to an empty QualType.
/// \param OverloadSet - If the expression is an overloaded function
/// name, this parameter is populated with the decls of the various overloads.
bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &OverloadSet) {
ZeroArgCallReturnTy = QualType();
OverloadSet.clear();
const OverloadExpr *Overloads = nullptr;
bool IsMemExpr = false;
if (E.getType() == Context.OverloadTy) {
OverloadExpr::FindResult FR = OverloadExpr::find(const_cast<Expr*>(&E));
// Ignore overloads that are pointer-to-member constants.
if (FR.HasFormOfMemberPointer)
return false;
Overloads = FR.Expression;
} else if (E.getType() == Context.BoundMemberTy) {
Overloads = dyn_cast<UnresolvedMemberExpr>(E.IgnoreParens());
IsMemExpr = true;
}
bool Ambiguous = false;
Implement cpu_dispatch/cpu_specific Multiversioning As documented here: https://software.intel.com/en-us/node/682969 and https://software.intel.com/en-us/node/523346. cpu_dispatch multiversioning is an ICC feature that provides for function multiversioning. This feature is implemented with two attributes: First, cpu_specific, which specifies the individual function versions. Second, cpu_dispatch, which specifies the location of the resolver function and the list of resolvable functions. This is valuable since it provides a mechanism where the resolver's TU can be specified in one location, and the individual implementions each in their own translation units. The goal of this patch is to be source-compatible with ICC, so this implementation diverges from the ICC implementation in a few ways: 1- Linux x86/64 only: This implementation uses ifuncs in order to properly dispatch functions. This is is a valuable performance benefit over the ICC implementation. A future patch will be provided to enable this feature on Windows, but it will obviously more closely fit ICC's implementation. 2- CPU Identification functions: ICC uses a set of custom functions to identify the feature list of the host processor. This patch uses the cpu_supports functionality in order to better align with 'target' multiversioning. 1- cpu_dispatch function def/decl: ICC's cpu_dispatch requires that the function marked cpu_dispatch be an empty definition. This patch supports that as well, however declarations are also permitted, since the linker will solve the issue of multiple emissions. Differential Revision: https://reviews.llvm.org/D47474 llvm-svn: 337552
2018-07-20 22:13:28 +08:00
bool IsMV = false;
if (Overloads) {
for (OverloadExpr::decls_iterator it = Overloads->decls_begin(),
DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) {
OverloadSet.addDecl(*it);
// Check whether the function is a non-template, non-member which takes no
// arguments.
if (IsMemExpr)
continue;
if (const FunctionDecl *OverloadDecl
= dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) {
if (OverloadDecl->getMinRequiredArguments() == 0) {
Implement cpu_dispatch/cpu_specific Multiversioning As documented here: https://software.intel.com/en-us/node/682969 and https://software.intel.com/en-us/node/523346. cpu_dispatch multiversioning is an ICC feature that provides for function multiversioning. This feature is implemented with two attributes: First, cpu_specific, which specifies the individual function versions. Second, cpu_dispatch, which specifies the location of the resolver function and the list of resolvable functions. This is valuable since it provides a mechanism where the resolver's TU can be specified in one location, and the individual implementions each in their own translation units. The goal of this patch is to be source-compatible with ICC, so this implementation diverges from the ICC implementation in a few ways: 1- Linux x86/64 only: This implementation uses ifuncs in order to properly dispatch functions. This is is a valuable performance benefit over the ICC implementation. A future patch will be provided to enable this feature on Windows, but it will obviously more closely fit ICC's implementation. 2- CPU Identification functions: ICC uses a set of custom functions to identify the feature list of the host processor. This patch uses the cpu_supports functionality in order to better align with 'target' multiversioning. 1- cpu_dispatch function def/decl: ICC's cpu_dispatch requires that the function marked cpu_dispatch be an empty definition. This patch supports that as well, however declarations are also permitted, since the linker will solve the issue of multiple emissions. Differential Revision: https://reviews.llvm.org/D47474 llvm-svn: 337552
2018-07-20 22:13:28 +08:00
if (!ZeroArgCallReturnTy.isNull() && !Ambiguous &&
(!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() ||
OverloadDecl->isCPUSpecificMultiVersion()))) {
ZeroArgCallReturnTy = QualType();
Ambiguous = true;
Implement cpu_dispatch/cpu_specific Multiversioning As documented here: https://software.intel.com/en-us/node/682969 and https://software.intel.com/en-us/node/523346. cpu_dispatch multiversioning is an ICC feature that provides for function multiversioning. This feature is implemented with two attributes: First, cpu_specific, which specifies the individual function versions. Second, cpu_dispatch, which specifies the location of the resolver function and the list of resolvable functions. This is valuable since it provides a mechanism where the resolver's TU can be specified in one location, and the individual implementions each in their own translation units. The goal of this patch is to be source-compatible with ICC, so this implementation diverges from the ICC implementation in a few ways: 1- Linux x86/64 only: This implementation uses ifuncs in order to properly dispatch functions. This is is a valuable performance benefit over the ICC implementation. A future patch will be provided to enable this feature on Windows, but it will obviously more closely fit ICC's implementation. 2- CPU Identification functions: ICC uses a set of custom functions to identify the feature list of the host processor. This patch uses the cpu_supports functionality in order to better align with 'target' multiversioning. 1- cpu_dispatch function def/decl: ICC's cpu_dispatch requires that the function marked cpu_dispatch be an empty definition. This patch supports that as well, however declarations are also permitted, since the linker will solve the issue of multiple emissions. Differential Revision: https://reviews.llvm.org/D47474 llvm-svn: 337552
2018-07-20 22:13:28 +08:00
} else {
ZeroArgCallReturnTy = OverloadDecl->getReturnType();
Implement cpu_dispatch/cpu_specific Multiversioning As documented here: https://software.intel.com/en-us/node/682969 and https://software.intel.com/en-us/node/523346. cpu_dispatch multiversioning is an ICC feature that provides for function multiversioning. This feature is implemented with two attributes: First, cpu_specific, which specifies the individual function versions. Second, cpu_dispatch, which specifies the location of the resolver function and the list of resolvable functions. This is valuable since it provides a mechanism where the resolver's TU can be specified in one location, and the individual implementions each in their own translation units. The goal of this patch is to be source-compatible with ICC, so this implementation diverges from the ICC implementation in a few ways: 1- Linux x86/64 only: This implementation uses ifuncs in order to properly dispatch functions. This is is a valuable performance benefit over the ICC implementation. A future patch will be provided to enable this feature on Windows, but it will obviously more closely fit ICC's implementation. 2- CPU Identification functions: ICC uses a set of custom functions to identify the feature list of the host processor. This patch uses the cpu_supports functionality in order to better align with 'target' multiversioning. 1- cpu_dispatch function def/decl: ICC's cpu_dispatch requires that the function marked cpu_dispatch be an empty definition. This patch supports that as well, however declarations are also permitted, since the linker will solve the issue of multiple emissions. Differential Revision: https://reviews.llvm.org/D47474 llvm-svn: 337552
2018-07-20 22:13:28 +08:00
IsMV = OverloadDecl->isCPUDispatchMultiVersion() ||
OverloadDecl->isCPUSpecificMultiVersion();
}
}
}
}
// If it's not a member, use better machinery to try to resolve the call
if (!IsMemExpr)
return !ZeroArgCallReturnTy.isNull();
}
// Attempt to call the member with no arguments - this will correctly handle
// member templates with defaults/deduction of template arguments, overloads
// with default arguments, etc.
if (IsMemExpr && !E.isTypeDependent()) {
Sema::TentativeAnalysisScope Trap(*this);
ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(),
None, SourceLocation());
if (R.isUsable()) {
ZeroArgCallReturnTy = R.get()->getType();
return true;
}
return false;
}
if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) {
if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) {
if (Fun->getMinRequiredArguments() == 0)
ZeroArgCallReturnTy = Fun->getReturnType();
return true;
}
}
// We don't have an expression that's convenient to get a FunctionDecl from,
// but we can at least check if the type is "function of 0 arguments".
QualType ExprTy = E.getType();
const FunctionType *FunTy = nullptr;
QualType PointeeTy = ExprTy->getPointeeType();
if (!PointeeTy.isNull())
FunTy = PointeeTy->getAs<FunctionType>();
if (!FunTy)
FunTy = ExprTy->getAs<FunctionType>();
if (const FunctionProtoType *FPT =
dyn_cast_or_null<FunctionProtoType>(FunTy)) {
if (FPT->getNumParams() == 0)
ZeroArgCallReturnTy = FunTy->getReturnType();
return true;
}
return false;
}
/// Give notes for a set of overloads.
///
/// A companion to tryExprAsCall. In cases when the name that the programmer
/// wrote was an overloaded function, we may be able to make some guesses about
/// plausible overloads based on their return types; such guesses can be handed
/// off to this method to be emitted as notes.
///
/// \param Overloads - The overloads to note.
/// \param FinalNoteLoc - If we've suppressed printing some overloads due to
/// -fshow-overloads=best, this is the location to attach to the note about too
/// many candidates. Typically this will be the location of the original
/// ill-formed expression.
static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
const SourceLocation FinalNoteLoc) {
int ShownOverloads = 0;
int SuppressedOverloads = 0;
for (UnresolvedSetImpl::iterator It = Overloads.begin(),
DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
// FIXME: Magic number for max shown overloads stolen from
// OverloadCandidateSet::NoteCandidates.
if (ShownOverloads >= 4 && S.Diags.getShowOverloads() == Ovl_Best) {
++SuppressedOverloads;
continue;
}
NamedDecl *Fn = (*It)->getUnderlyingDecl();
// Don't print overloads for non-default multiversioned functions.
if (const auto *FD = Fn->getAsFunction()) {
Implement cpu_dispatch/cpu_specific Multiversioning As documented here: https://software.intel.com/en-us/node/682969 and https://software.intel.com/en-us/node/523346. cpu_dispatch multiversioning is an ICC feature that provides for function multiversioning. This feature is implemented with two attributes: First, cpu_specific, which specifies the individual function versions. Second, cpu_dispatch, which specifies the location of the resolver function and the list of resolvable functions. This is valuable since it provides a mechanism where the resolver's TU can be specified in one location, and the individual implementions each in their own translation units. The goal of this patch is to be source-compatible with ICC, so this implementation diverges from the ICC implementation in a few ways: 1- Linux x86/64 only: This implementation uses ifuncs in order to properly dispatch functions. This is is a valuable performance benefit over the ICC implementation. A future patch will be provided to enable this feature on Windows, but it will obviously more closely fit ICC's implementation. 2- CPU Identification functions: ICC uses a set of custom functions to identify the feature list of the host processor. This patch uses the cpu_supports functionality in order to better align with 'target' multiversioning. 1- cpu_dispatch function def/decl: ICC's cpu_dispatch requires that the function marked cpu_dispatch be an empty definition. This patch supports that as well, however declarations are also permitted, since the linker will solve the issue of multiple emissions. Differential Revision: https://reviews.llvm.org/D47474 llvm-svn: 337552
2018-07-20 22:13:28 +08:00
if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() &&
!FD->getAttr<TargetAttr>()->isDefaultVersion())
continue;
}
S.Diag(Fn->getLocation(), diag::note_possible_target_of_call);
++ShownOverloads;
}
if (SuppressedOverloads)
S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates)
<< SuppressedOverloads;
}
static void notePlausibleOverloads(Sema &S, SourceLocation Loc,
const UnresolvedSetImpl &Overloads,
bool (*IsPlausibleResult)(QualType)) {
if (!IsPlausibleResult)
return noteOverloads(S, Overloads, Loc);
UnresolvedSet<2> PlausibleOverloads;
for (OverloadExpr::decls_iterator It = Overloads.begin(),
DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*It);
QualType OverloadResultTy = OverloadDecl->getReturnType();
if (IsPlausibleResult(OverloadResultTy))
PlausibleOverloads.addDecl(It.getDecl());
}
noteOverloads(S, PlausibleOverloads, Loc);
}
/// Determine whether the given expression can be called by just
/// putting parentheses after it. Notably, expressions with unary
/// operators can't be because the unary operator will start parsing
/// outside the call.
static bool IsCallableWithAppend(Expr *E) {
E = E->IgnoreImplicit();
return (!isa<CStyleCastExpr>(E) &&
!isa<UnaryOperator>(E) &&
!isa<BinaryOperator>(E) &&
!isa<CXXOperatorCallExpr>(E));
}
Implement cpu_dispatch/cpu_specific Multiversioning As documented here: https://software.intel.com/en-us/node/682969 and https://software.intel.com/en-us/node/523346. cpu_dispatch multiversioning is an ICC feature that provides for function multiversioning. This feature is implemented with two attributes: First, cpu_specific, which specifies the individual function versions. Second, cpu_dispatch, which specifies the location of the resolver function and the list of resolvable functions. This is valuable since it provides a mechanism where the resolver's TU can be specified in one location, and the individual implementions each in their own translation units. The goal of this patch is to be source-compatible with ICC, so this implementation diverges from the ICC implementation in a few ways: 1- Linux x86/64 only: This implementation uses ifuncs in order to properly dispatch functions. This is is a valuable performance benefit over the ICC implementation. A future patch will be provided to enable this feature on Windows, but it will obviously more closely fit ICC's implementation. 2- CPU Identification functions: ICC uses a set of custom functions to identify the feature list of the host processor. This patch uses the cpu_supports functionality in order to better align with 'target' multiversioning. 1- cpu_dispatch function def/decl: ICC's cpu_dispatch requires that the function marked cpu_dispatch be an empty definition. This patch supports that as well, however declarations are also permitted, since the linker will solve the issue of multiple emissions. Differential Revision: https://reviews.llvm.org/D47474 llvm-svn: 337552
2018-07-20 22:13:28 +08:00
static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) {
if (const auto *UO = dyn_cast<UnaryOperator>(E))
E = UO->getSubExpr();
if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
if (ULE->getNumDecls() == 0)
return false;
const NamedDecl *ND = *ULE->decls_begin();
if (const auto *FD = dyn_cast<FunctionDecl>(ND))
return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion();
}
return false;
}
bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain,
bool (*IsPlausibleResult)(QualType)) {
SourceLocation Loc = E.get()->getExprLoc();
SourceRange Range = E.get()->getSourceRange();
QualType ZeroArgCallTy;
UnresolvedSet<4> Overloads;
if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) &&
!ZeroArgCallTy.isNull() &&
(!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) {
// At this point, we know E is potentially callable with 0
// arguments and that it returns something of a reasonable type,
// so we can emit a fixit and carry on pretending that E was
// actually a CallExpr.
SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd());
Implement cpu_dispatch/cpu_specific Multiversioning As documented here: https://software.intel.com/en-us/node/682969 and https://software.intel.com/en-us/node/523346. cpu_dispatch multiversioning is an ICC feature that provides for function multiversioning. This feature is implemented with two attributes: First, cpu_specific, which specifies the individual function versions. Second, cpu_dispatch, which specifies the location of the resolver function and the list of resolvable functions. This is valuable since it provides a mechanism where the resolver's TU can be specified in one location, and the individual implementions each in their own translation units. The goal of this patch is to be source-compatible with ICC, so this implementation diverges from the ICC implementation in a few ways: 1- Linux x86/64 only: This implementation uses ifuncs in order to properly dispatch functions. This is is a valuable performance benefit over the ICC implementation. A future patch will be provided to enable this feature on Windows, but it will obviously more closely fit ICC's implementation. 2- CPU Identification functions: ICC uses a set of custom functions to identify the feature list of the host processor. This patch uses the cpu_supports functionality in order to better align with 'target' multiversioning. 1- cpu_dispatch function def/decl: ICC's cpu_dispatch requires that the function marked cpu_dispatch be an empty definition. This patch supports that as well, however declarations are also permitted, since the linker will solve the issue of multiple emissions. Differential Revision: https://reviews.llvm.org/D47474 llvm-svn: 337552
2018-07-20 22:13:28 +08:00
bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range
<< (IsCallableWithAppend(E.get())
? FixItHint::CreateInsertion(ParenInsertionLoc, "()")
: FixItHint());
if (!IsMV)
notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
// FIXME: Try this before emitting the fixit, and suppress diagnostics
// while doing so.
E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), None,
Range.getEnd().getLocWithOffset(1));
return true;
}
if (!ForceComplain) return false;
Implement cpu_dispatch/cpu_specific Multiversioning As documented here: https://software.intel.com/en-us/node/682969 and https://software.intel.com/en-us/node/523346. cpu_dispatch multiversioning is an ICC feature that provides for function multiversioning. This feature is implemented with two attributes: First, cpu_specific, which specifies the individual function versions. Second, cpu_dispatch, which specifies the location of the resolver function and the list of resolvable functions. This is valuable since it provides a mechanism where the resolver's TU can be specified in one location, and the individual implementions each in their own translation units. The goal of this patch is to be source-compatible with ICC, so this implementation diverges from the ICC implementation in a few ways: 1- Linux x86/64 only: This implementation uses ifuncs in order to properly dispatch functions. This is is a valuable performance benefit over the ICC implementation. A future patch will be provided to enable this feature on Windows, but it will obviously more closely fit ICC's implementation. 2- CPU Identification functions: ICC uses a set of custom functions to identify the feature list of the host processor. This patch uses the cpu_supports functionality in order to better align with 'target' multiversioning. 1- cpu_dispatch function def/decl: ICC's cpu_dispatch requires that the function marked cpu_dispatch be an empty definition. This patch supports that as well, however declarations are also permitted, since the linker will solve the issue of multiple emissions. Differential Revision: https://reviews.llvm.org/D47474 llvm-svn: 337552
2018-07-20 22:13:28 +08:00
bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range;
if (!IsMV)
notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
E = ExprError();
return true;
}
IdentifierInfo *Sema::getSuperIdentifier() const {
if (!Ident_super)
Ident_super = &Context.Idents.get("super");
return Ident_super;
}
IdentifierInfo *Sema::getFloat128Identifier() const {
if (!Ident___float128)
Ident___float128 = &Context.Idents.get("__float128");
return Ident___float128;
}
void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
CapturedRegionKind K,
unsigned OpenMPCaptureLevel) {
auto *CSI = new CapturedRegionScopeInfo(
getDiagnostics(), S, CD, RD, CD->getContextParam(), K,
(getLangOpts().OpenMP && K == CR_OpenMP) ? getOpenMPNestingLevel() : 0,
OpenMPCaptureLevel);
CSI->ReturnType = Context.VoidTy;
FunctionScopes.push_back(CSI);
}
CapturedRegionScopeInfo *Sema::getCurCapturedRegion() {
if (FunctionScopes.empty())
return nullptr;
return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back());
}
const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> &
Sema::getMismatchingDeleteExpressions() const {
return DeleteExprs;
}
void Sema::setOpenCLExtensionForType(QualType T, llvm::StringRef ExtStr) {
if (ExtStr.empty())
return;
llvm::SmallVector<StringRef, 1> Exts;
ExtStr.split(Exts, " ", /* limit */ -1, /* keep empty */ false);
auto CanT = T.getCanonicalType().getTypePtr();
for (auto &I : Exts)
OpenCLTypeExtMap[CanT].insert(I.str());
}
void Sema::setOpenCLExtensionForDecl(Decl *FD, StringRef ExtStr) {
llvm::SmallVector<StringRef, 1> Exts;
ExtStr.split(Exts, " ", /* limit */ -1, /* keep empty */ false);
if (Exts.empty())
return;
for (auto &I : Exts)
OpenCLDeclExtMap[FD].insert(I.str());
}
void Sema::setCurrentOpenCLExtensionForType(QualType T) {
if (CurrOpenCLExtension.empty())
return;
setOpenCLExtensionForType(T, CurrOpenCLExtension);
}
void Sema::setCurrentOpenCLExtensionForDecl(Decl *D) {
if (CurrOpenCLExtension.empty())
return;
setOpenCLExtensionForDecl(D, CurrOpenCLExtension);
}
std::string Sema::getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD) {
if (!OpenCLDeclExtMap.empty())
return getOpenCLExtensionsFromExtMap(FD, OpenCLDeclExtMap);
return "";
}
std::string Sema::getOpenCLExtensionsFromTypeExtMap(FunctionType *FT) {
if (!OpenCLTypeExtMap.empty())
return getOpenCLExtensionsFromExtMap(FT, OpenCLTypeExtMap);
return "";
}
template <typename T, typename MapT>
std::string Sema::getOpenCLExtensionsFromExtMap(T *FDT, MapT &Map) {
auto Loc = Map.find(FDT);
2020-04-11 23:18:54 +08:00
return llvm::join(Loc->second, " ");
}
bool Sema::isOpenCLDisabledDecl(Decl *FD) {
auto Loc = OpenCLDeclExtMap.find(FD);
if (Loc == OpenCLDeclExtMap.end())
return false;
for (auto &I : Loc->second) {
if (!getOpenCLOptions().isEnabled(I))
return true;
}
return false;
}
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool Sema::checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc,
DiagInfoT DiagInfo, MapT &Map,
unsigned Selector,
SourceRange SrcRange) {
auto Loc = Map.find(D);
if (Loc == Map.end())
return false;
bool Disabled = false;
for (auto &I : Loc->second) {
if (I != CurrOpenCLExtension && !getOpenCLOptions().isEnabled(I)) {
Diag(DiagLoc, diag::err_opencl_requires_extension) << Selector << DiagInfo
<< I << SrcRange;
Disabled = true;
}
}
return Disabled;
}
bool Sema::checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType QT) {
// Check extensions for declared types.
Decl *Decl = nullptr;
if (auto TypedefT = dyn_cast<TypedefType>(QT.getTypePtr()))
Decl = TypedefT->getDecl();
if (auto TagT = dyn_cast<TagType>(QT.getCanonicalType().getTypePtr()))
Decl = TagT->getDecl();
auto Loc = DS.getTypeSpecTypeLoc();
// Check extensions for vector types.
// e.g. double4 is not allowed when cl_khr_fp64 is absent.
if (QT->isExtVectorType()) {
auto TypePtr = QT->castAs<ExtVectorType>()->getElementType().getTypePtr();
return checkOpenCLDisabledTypeOrDecl(TypePtr, Loc, QT, OpenCLTypeExtMap);
}
if (checkOpenCLDisabledTypeOrDecl(Decl, Loc, QT, OpenCLDeclExtMap))
return true;
// Check extensions for builtin types.
return checkOpenCLDisabledTypeOrDecl(QT.getCanonicalType().getTypePtr(), Loc,
QT, OpenCLTypeExtMap);
}
bool Sema::checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E) {
IdentifierInfo *FnName = D.getIdentifier();
return checkOpenCLDisabledTypeOrDecl(&D, E.getBeginLoc(), FnName,
OpenCLDeclExtMap, 1, D.getSourceRange());
}