2010-02-25 12:37:45 +08:00
|
|
|
//===--- CodeGenAction.cpp - LLVM Code Generation Frontend Action ---------===//
|
2008-10-22 07:49:24 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2008-10-22 07:49:24 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
[CodeGen] [CUDA] Add the ability set default attrs on functions in linked modules.
Summary:
Now when you ask clang to link in a bitcode module, you can tell it to
set attributes on that module's functions to match what we would have
set if we'd emitted those functions ourselves.
This is particularly important for fast-math attributes in CUDA
compilations.
Each CUDA compilation links in libdevice, a bitcode library provided by
nvidia as part of the CUDA distribution. Without this patch, if we have
a user-function F that is compiled with -ffast-math that calls a
function G from libdevice, F will have the unsafe-fp-math=true (etc.)
attributes, but G will have no attributes.
Since F calls G, the inliner will merge G's attributes into F's. It
considers the lack of an unsafe-fp-math=true attribute on G to be
tantamount to unsafe-fp-math=false, so it "merges" these by setting
unsafe-fp-math=false on F.
This then continues up the call graph, until every function that
(transitively) calls something in libdevice gets unsafe-fp-math=false
set, thus disabling fastmath in almost all CUDA code.
Reviewers: echristo
Subscribers: hfinkel, llvm-commits, mehdi_amini
Differential Revision: https://reviews.llvm.org/D28538
llvm-svn: 293097
2017-01-26 05:29:48 +08:00
|
|
|
#include "clang/CodeGen/CodeGenAction.h"
|
|
|
|
#include "CodeGenModule.h"
|
2014-08-05 02:41:51 +08:00
|
|
|
#include "CoverageMappingGen.h"
|
2017-02-10 06:07:24 +08:00
|
|
|
#include "MacroPPCallbacks.h"
|
2008-10-22 07:49:24 +08:00
|
|
|
#include "clang/AST/ASTConsumer.h"
|
2009-11-15 14:48:46 +08:00
|
|
|
#include "clang/AST/ASTContext.h"
|
2014-05-24 04:37:38 +08:00
|
|
|
#include "clang/AST/DeclCXX.h"
|
2015-01-14 19:29:14 +08:00
|
|
|
#include "clang/AST/DeclGroup.h"
|
2019-09-12 00:19:50 +08:00
|
|
|
#include "clang/Basic/DiagnosticFrontend.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "clang/Basic/FileManager.h"
|
2019-08-05 21:59:26 +08:00
|
|
|
#include "clang/Basic/LangStandard.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "clang/Basic/SourceManager.h"
|
|
|
|
#include "clang/Basic/TargetInfo.h"
|
2010-06-16 01:48:49 +08:00
|
|
|
#include "clang/CodeGen/BackendUtil.h"
|
2009-11-15 14:48:46 +08:00
|
|
|
#include "clang/CodeGen/ModuleBuilder.h"
|
2019-03-13 05:22:27 +08:00
|
|
|
#include "clang/Driver/DriverDiagnostic.h"
|
2010-02-25 12:37:45 +08:00
|
|
|
#include "clang/Frontend/CompilerInstance.h"
|
2009-12-03 17:12:54 +08:00
|
|
|
#include "clang/Frontend/FrontendDiagnostic.h"
|
2015-01-14 19:29:14 +08:00
|
|
|
#include "clang/Lex/Preprocessor.h"
|
2021-09-15 04:09:23 +08:00
|
|
|
#include "llvm/ADT/Hashing.h"
|
2016-11-11 13:35:12 +08:00
|
|
|
#include "llvm/Bitcode/BitcodeReader.h"
|
2017-01-26 12:07:11 +08:00
|
|
|
#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
|
2021-09-24 04:54:24 +08:00
|
|
|
#include "llvm/Demangle/Demangle.h"
|
2014-04-17 00:54:24 +08:00
|
|
|
#include "llvm/IR/DebugInfo.h"
|
2014-02-07 02:30:43 +08:00
|
|
|
#include "llvm/IR/DiagnosticInfo.h"
|
|
|
|
#include "llvm/IR/DiagnosticPrinter.h"
|
2017-03-14 02:08:11 +08:00
|
|
|
#include "llvm/IR/GlobalValue.h"
|
2013-01-02 19:45:17 +08:00
|
|
|
#include "llvm/IR/LLVMContext.h"
|
2019-10-29 05:53:31 +08:00
|
|
|
#include "llvm/IR/LLVMRemarkStreamer.h"
|
2013-01-02 19:45:17 +08:00
|
|
|
#include "llvm/IR/Module.h"
|
2013-03-26 10:25:54 +08:00
|
|
|
#include "llvm/IRReader/IRReader.h"
|
2020-09-17 03:08:15 +08:00
|
|
|
#include "llvm/LTO/LTOBackend.h"
|
2014-03-06 11:46:44 +08:00
|
|
|
#include "llvm/Linker/Linker.h"
|
2010-06-08 07:21:04 +08:00
|
|
|
#include "llvm/Pass.h"
|
2010-04-07 02:38:50 +08:00
|
|
|
#include "llvm/Support/MemoryBuffer.h"
|
|
|
|
#include "llvm/Support/SourceMgr.h"
|
2019-08-20 06:58:26 +08:00
|
|
|
#include "llvm/Support/TimeProfiler.h"
|
2009-02-18 09:37:30 +08:00
|
|
|
#include "llvm/Support/Timer.h"
|
2016-10-11 08:26:09 +08:00
|
|
|
#include "llvm/Support/ToolOutputFile.h"
|
|
|
|
#include "llvm/Support/YAMLTraits.h"
|
2017-03-14 02:08:11 +08:00
|
|
|
#include "llvm/Transforms/IPO/Internalize.h"
|
|
|
|
|
2014-03-09 19:36:40 +08:00
|
|
|
#include <memory>
|
2008-10-22 07:49:24 +08:00
|
|
|
using namespace clang;
|
|
|
|
using namespace llvm;
|
|
|
|
|
2021-10-20 06:17:07 +08:00
|
|
|
#define DEBUG_TYPE "codegenaction"
|
|
|
|
|
2011-01-26 04:34:14 +08:00
|
|
|
namespace clang {
|
2017-09-16 04:09:55 +08:00
|
|
|
class BackendConsumer;
|
|
|
|
class ClangDiagnosticHandler final : public DiagnosticHandler {
|
|
|
|
public:
|
|
|
|
ClangDiagnosticHandler(const CodeGenOptions &CGOpts, BackendConsumer *BCon)
|
|
|
|
: CodeGenOpts(CGOpts), BackendCon(BCon) {}
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2017-09-16 04:09:55 +08:00
|
|
|
bool handleDiagnostics(const DiagnosticInfo &DI) override;
|
2017-09-20 01:59:40 +08:00
|
|
|
|
|
|
|
bool isAnalysisRemarkEnabled(StringRef PassName) const override {
|
2021-02-25 16:05:08 +08:00
|
|
|
return CodeGenOpts.OptimizationRemarkAnalysis.patternMatches(PassName);
|
2017-09-16 04:09:55 +08:00
|
|
|
}
|
2017-09-20 01:59:40 +08:00
|
|
|
bool isMissedOptRemarkEnabled(StringRef PassName) const override {
|
2021-02-25 16:05:08 +08:00
|
|
|
return CodeGenOpts.OptimizationRemarkMissed.patternMatches(PassName);
|
2017-09-16 04:09:55 +08:00
|
|
|
}
|
2017-09-20 01:59:40 +08:00
|
|
|
bool isPassedOptRemarkEnabled(StringRef PassName) const override {
|
2021-02-25 16:05:08 +08:00
|
|
|
return CodeGenOpts.OptimizationRemark.patternMatches(PassName);
|
2017-09-16 04:09:55 +08:00
|
|
|
}
|
2017-09-20 01:59:40 +08:00
|
|
|
|
2017-09-20 07:00:59 +08:00
|
|
|
bool isAnyRemarkEnabled() const override {
|
2021-02-25 16:05:08 +08:00
|
|
|
return CodeGenOpts.OptimizationRemarkAnalysis.hasValidPattern() ||
|
|
|
|
CodeGenOpts.OptimizationRemarkMissed.hasValidPattern() ||
|
|
|
|
CodeGenOpts.OptimizationRemark.hasValidPattern();
|
2017-09-20 07:00:59 +08:00
|
|
|
}
|
|
|
|
|
2017-09-16 04:09:55 +08:00
|
|
|
private:
|
|
|
|
const CodeGenOptions &CodeGenOpts;
|
|
|
|
BackendConsumer *BackendCon;
|
|
|
|
};
|
|
|
|
|
2019-12-05 23:23:47 +08:00
|
|
|
static void reportOptRecordError(Error E, DiagnosticsEngine &Diags,
|
|
|
|
const CodeGenOptions CodeGenOpts) {
|
|
|
|
handleAllErrors(
|
|
|
|
std::move(E),
|
2019-10-29 05:53:31 +08:00
|
|
|
[&](const LLVMRemarkSetupFileError &E) {
|
2019-12-05 23:23:47 +08:00
|
|
|
Diags.Report(diag::err_cannot_open_file)
|
|
|
|
<< CodeGenOpts.OptRecordFile << E.message();
|
|
|
|
},
|
2019-10-29 05:53:31 +08:00
|
|
|
[&](const LLVMRemarkSetupPatternError &E) {
|
2019-12-05 23:23:47 +08:00
|
|
|
Diags.Report(diag::err_drv_optimization_remark_pattern)
|
|
|
|
<< E.message() << CodeGenOpts.OptRecordPasses;
|
|
|
|
},
|
2019-10-29 05:53:31 +08:00
|
|
|
[&](const LLVMRemarkSetupFormatError &E) {
|
2019-12-05 23:23:47 +08:00
|
|
|
Diags.Report(diag::err_drv_optimization_remark_format)
|
|
|
|
<< CodeGenOpts.OptRecordFormat;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2009-11-28 18:07:24 +08:00
|
|
|
class BackendConsumer : public ASTConsumer {
|
[CodeGen] [CUDA] Add the ability set default attrs on functions in linked modules.
Summary:
Now when you ask clang to link in a bitcode module, you can tell it to
set attributes on that module's functions to match what we would have
set if we'd emitted those functions ourselves.
This is particularly important for fast-math attributes in CUDA
compilations.
Each CUDA compilation links in libdevice, a bitcode library provided by
nvidia as part of the CUDA distribution. Without this patch, if we have
a user-function F that is compiled with -ffast-math that calls a
function G from libdevice, F will have the unsafe-fp-math=true (etc.)
attributes, but G will have no attributes.
Since F calls G, the inliner will merge G's attributes into F's. It
considers the lack of an unsafe-fp-math=true attribute on G to be
tantamount to unsafe-fp-math=false, so it "merges" these by setting
unsafe-fp-math=false on F.
This then continues up the call graph, until every function that
(transitively) calls something in libdevice gets unsafe-fp-math=false
set, thus disabling fastmath in almost all CUDA code.
Reviewers: echristo
Subscribers: hfinkel, llvm-commits, mehdi_amini
Differential Revision: https://reviews.llvm.org/D28538
llvm-svn: 293097
2017-01-26 05:29:48 +08:00
|
|
|
using LinkModule = CodeGenAction::LinkModule;
|
|
|
|
|
2011-12-20 10:48:34 +08:00
|
|
|
virtual void anchor();
|
2011-09-26 07:23:43 +08:00
|
|
|
DiagnosticsEngine &Diags;
|
2008-10-22 07:49:24 +08:00
|
|
|
BackendAction Action;
|
2017-01-06 00:02:32 +08:00
|
|
|
const HeaderSearchOptions &HeaderSearchOpts;
|
2009-11-30 16:39:32 +08:00
|
|
|
const CodeGenOptions &CodeGenOpts;
|
|
|
|
const TargetOptions &TargetOpts;
|
2011-07-06 06:02:36 +08:00
|
|
|
const LangOptions &LangOpts;
|
2016-07-15 08:55:40 +08:00
|
|
|
std::unique_ptr<raw_pwrite_stream> AsmOutStream;
|
2009-03-05 16:00:35 +08:00
|
|
|
ASTContext *Context;
|
2018-04-11 02:53:28 +08:00
|
|
|
|
|
|
|
Timer LLVMIRGeneration;
|
|
|
|
unsigned LLVMIRGenerationRefCount;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2016-11-30 08:25:36 +08:00
|
|
|
/// True if we've finished generating IR. This prevents us from generating
|
|
|
|
/// additional LLVM IR after emitting output in HandleTranslationUnit. This
|
|
|
|
/// can happen when Clang plugins trigger additional AST deserialization.
|
|
|
|
bool IRGenFinished = false;
|
|
|
|
|
2020-12-03 02:18:18 +08:00
|
|
|
bool TimerIsEnabled = false;
|
|
|
|
|
2014-03-08 04:03:18 +08:00
|
|
|
std::unique_ptr<CodeGenerator> Gen;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
[CodeGen] [CUDA] Add the ability set default attrs on functions in linked modules.
Summary:
Now when you ask clang to link in a bitcode module, you can tell it to
set attributes on that module's functions to match what we would have
set if we'd emitted those functions ourselves.
This is particularly important for fast-math attributes in CUDA
compilations.
Each CUDA compilation links in libdevice, a bitcode library provided by
nvidia as part of the CUDA distribution. Without this patch, if we have
a user-function F that is compiled with -ffast-math that calls a
function G from libdevice, F will have the unsafe-fp-math=true (etc.)
attributes, but G will have no attributes.
Since F calls G, the inliner will merge G's attributes into F's. It
considers the lack of an unsafe-fp-math=true attribute on G to be
tantamount to unsafe-fp-math=false, so it "merges" these by setting
unsafe-fp-math=false on F.
This then continues up the call graph, until every function that
(transitively) calls something in libdevice gets unsafe-fp-math=false
set, thus disabling fastmath in almost all CUDA code.
Reviewers: echristo
Subscribers: hfinkel, llvm-commits, mehdi_amini
Differential Revision: https://reviews.llvm.org/D28538
llvm-svn: 293097
2017-01-26 05:29:48 +08:00
|
|
|
SmallVector<LinkModule, 4> LinkModules;
|
2008-10-22 07:49:24 +08:00
|
|
|
|
2021-09-15 04:09:23 +08:00
|
|
|
// A map from mangled names to their function's source location, used for
|
|
|
|
// backend diagnostics as the Clang AST may be unavailable. We actually use
|
|
|
|
// the mangled name's hash as the key because mangled names can be very
|
|
|
|
// long and take up lots of space. Using a hash can cause name collision,
|
|
|
|
// but that is rare and the consequences are pointing to a wrong source
|
|
|
|
// location which is not severe. This is a vector instead of an actual map
|
|
|
|
// because we optimize for time building this map rather than time
|
|
|
|
// retrieving an entry, as backend diagnostics are uncommon.
|
|
|
|
std::vector<std::pair<llvm::hash_code, FullSourceLoc>>
|
|
|
|
ManglingFullSourceLocs;
|
|
|
|
|
2015-12-15 07:17:07 +08:00
|
|
|
// This is here so that the diagnostic printer knows the module a diagnostic
|
|
|
|
// refers to.
|
|
|
|
llvm::Module *CurLinkModule = nullptr;
|
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
public:
|
[CodeGen] [CUDA] Add the ability set default attrs on functions in linked modules.
Summary:
Now when you ask clang to link in a bitcode module, you can tell it to
set attributes on that module's functions to match what we would have
set if we'd emitted those functions ourselves.
This is particularly important for fast-math attributes in CUDA
compilations.
Each CUDA compilation links in libdevice, a bitcode library provided by
nvidia as part of the CUDA distribution. Without this patch, if we have
a user-function F that is compiled with -ffast-math that calls a
function G from libdevice, F will have the unsafe-fp-math=true (etc.)
attributes, but G will have no attributes.
Since F calls G, the inliner will merge G's attributes into F's. It
considers the lack of an unsafe-fp-math=true attribute on G to be
tantamount to unsafe-fp-math=false, so it "merges" these by setting
unsafe-fp-math=false on F.
This then continues up the call graph, until every function that
(transitively) calls something in libdevice gets unsafe-fp-math=false
set, thus disabling fastmath in almost all CUDA code.
Reviewers: echristo
Subscribers: hfinkel, llvm-commits, mehdi_amini
Differential Revision: https://reviews.llvm.org/D28538
llvm-svn: 293097
2017-01-26 05:29:48 +08:00
|
|
|
BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags,
|
|
|
|
const HeaderSearchOptions &HeaderSearchOpts,
|
|
|
|
const PreprocessorOptions &PPOpts,
|
|
|
|
const CodeGenOptions &CodeGenOpts,
|
|
|
|
const TargetOptions &TargetOpts,
|
2020-12-03 02:18:18 +08:00
|
|
|
const LangOptions &LangOpts, const std::string &InFile,
|
[CodeGen] [CUDA] Add the ability set default attrs on functions in linked modules.
Summary:
Now when you ask clang to link in a bitcode module, you can tell it to
set attributes on that module's functions to match what we would have
set if we'd emitted those functions ourselves.
This is particularly important for fast-math attributes in CUDA
compilations.
Each CUDA compilation links in libdevice, a bitcode library provided by
nvidia as part of the CUDA distribution. Without this patch, if we have
a user-function F that is compiled with -ffast-math that calls a
function G from libdevice, F will have the unsafe-fp-math=true (etc.)
attributes, but G will have no attributes.
Since F calls G, the inliner will merge G's attributes into F's. It
considers the lack of an unsafe-fp-math=true attribute on G to be
tantamount to unsafe-fp-math=false, so it "merges" these by setting
unsafe-fp-math=false on F.
This then continues up the call graph, until every function that
(transitively) calls something in libdevice gets unsafe-fp-math=false
set, thus disabling fastmath in almost all CUDA code.
Reviewers: echristo
Subscribers: hfinkel, llvm-commits, mehdi_amini
Differential Revision: https://reviews.llvm.org/D28538
llvm-svn: 293097
2017-01-26 05:29:48 +08:00
|
|
|
SmallVector<LinkModule, 4> LinkModules,
|
|
|
|
std::unique_ptr<raw_pwrite_stream> OS, LLVMContext &C,
|
|
|
|
CoverageSourceInfo *CoverageInfo = nullptr)
|
2017-01-06 00:02:32 +08:00
|
|
|
: Diags(Diags), Action(Action), HeaderSearchOpts(HeaderSearchOpts),
|
|
|
|
CodeGenOpts(CodeGenOpts), TargetOpts(TargetOpts), LangOpts(LangOpts),
|
2016-07-15 08:55:40 +08:00
|
|
|
AsmOutStream(std::move(OS)), Context(nullptr),
|
2018-04-11 02:53:28 +08:00
|
|
|
LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
|
|
|
|
LLVMIRGenerationRefCount(0),
|
2015-06-30 10:26:03 +08:00
|
|
|
Gen(CreateLLVMCodeGen(Diags, InFile, HeaderSearchOpts, PPOpts,
|
[CodeGen] [CUDA] Add the ability set default attrs on functions in linked modules.
Summary:
Now when you ask clang to link in a bitcode module, you can tell it to
set attributes on that module's functions to match what we would have
set if we'd emitted those functions ourselves.
This is particularly important for fast-math attributes in CUDA
compilations.
Each CUDA compilation links in libdevice, a bitcode library provided by
nvidia as part of the CUDA distribution. Without this patch, if we have
a user-function F that is compiled with -ffast-math that calls a
function G from libdevice, F will have the unsafe-fp-math=true (etc.)
attributes, but G will have no attributes.
Since F calls G, the inliner will merge G's attributes into F's. It
considers the lack of an unsafe-fp-math=true attribute on G to be
tantamount to unsafe-fp-math=false, so it "merges" these by setting
unsafe-fp-math=false on F.
This then continues up the call graph, until every function that
(transitively) calls something in libdevice gets unsafe-fp-math=false
set, thus disabling fastmath in almost all CUDA code.
Reviewers: echristo
Subscribers: hfinkel, llvm-commits, mehdi_amini
Differential Revision: https://reviews.llvm.org/D28538
llvm-svn: 293097
2017-01-26 05:29:48 +08:00
|
|
|
CodeGenOpts, C, CoverageInfo)),
|
|
|
|
LinkModules(std::move(LinkModules)) {
|
2020-12-03 02:18:18 +08:00
|
|
|
TimerIsEnabled = CodeGenOpts.TimePasses;
|
|
|
|
llvm::TimePassesIsEnabled = CodeGenOpts.TimePasses;
|
|
|
|
llvm::TimePassesPerRun = CodeGenOpts.TimePassesPerRun;
|
2009-02-18 09:23:44 +08:00
|
|
|
}
|
2020-01-15 07:33:15 +08:00
|
|
|
|
|
|
|
// This constructor is used in installing an empty BackendConsumer
|
|
|
|
// to use the clang diagnostic handler for IR input files. It avoids
|
|
|
|
// initializing the OS field.
|
|
|
|
BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags,
|
|
|
|
const HeaderSearchOptions &HeaderSearchOpts,
|
|
|
|
const PreprocessorOptions &PPOpts,
|
|
|
|
const CodeGenOptions &CodeGenOpts,
|
|
|
|
const TargetOptions &TargetOpts,
|
2021-08-21 08:14:32 +08:00
|
|
|
const LangOptions &LangOpts, llvm::Module *Module,
|
2020-01-15 07:33:15 +08:00
|
|
|
SmallVector<LinkModule, 4> LinkModules, LLVMContext &C,
|
|
|
|
CoverageSourceInfo *CoverageInfo = nullptr)
|
|
|
|
: Diags(Diags), Action(Action), HeaderSearchOpts(HeaderSearchOpts),
|
|
|
|
CodeGenOpts(CodeGenOpts), TargetOpts(TargetOpts), LangOpts(LangOpts),
|
|
|
|
Context(nullptr),
|
|
|
|
LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
|
|
|
|
LLVMIRGenerationRefCount(0),
|
|
|
|
Gen(CreateLLVMCodeGen(Diags, "", HeaderSearchOpts, PPOpts,
|
|
|
|
CodeGenOpts, C, CoverageInfo)),
|
2021-08-21 08:14:32 +08:00
|
|
|
LinkModules(std::move(LinkModules)), CurLinkModule(Module) {
|
2020-12-03 02:18:18 +08:00
|
|
|
TimerIsEnabled = CodeGenOpts.TimePasses;
|
|
|
|
llvm::TimePassesIsEnabled = CodeGenOpts.TimePasses;
|
|
|
|
llvm::TimePassesPerRun = CodeGenOpts.TimePassesPerRun;
|
2020-01-15 07:33:15 +08:00
|
|
|
}
|
2016-02-19 00:42:09 +08:00
|
|
|
llvm::Module *getModule() const { return Gen->GetModule(); }
|
|
|
|
std::unique_ptr<llvm::Module> takeModule() {
|
|
|
|
return std::unique_ptr<llvm::Module>(Gen->ReleaseModule());
|
|
|
|
}
|
2010-02-25 12:37:50 +08:00
|
|
|
|
2017-02-10 06:07:24 +08:00
|
|
|
CodeGenerator *getCodeGenerator() { return Gen.get(); }
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) override {
|
2012-03-08 23:51:03 +08:00
|
|
|
Gen->HandleCXXStaticMemberVarInstantiation(VD);
|
2012-03-05 18:54:55 +08:00
|
|
|
}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Initialize(ASTContext &Ctx) override {
|
2015-08-19 04:39:29 +08:00
|
|
|
assert(!Context && "initialized multiple times");
|
2018-04-11 02:53:28 +08:00
|
|
|
|
2009-03-28 10:18:25 +08:00
|
|
|
Context = &Ctx;
|
2018-04-11 02:53:28 +08:00
|
|
|
|
2020-12-03 02:18:18 +08:00
|
|
|
if (TimerIsEnabled)
|
2018-04-11 02:53:28 +08:00
|
|
|
LLVMIRGeneration.startTimer();
|
|
|
|
|
2009-03-28 10:18:25 +08:00
|
|
|
Gen->Initialize(Ctx);
|
2018-04-11 02:53:28 +08:00
|
|
|
|
2020-12-03 02:18:18 +08:00
|
|
|
if (TimerIsEnabled)
|
2018-04-11 02:53:28 +08:00
|
|
|
LLVMIRGeneration.stopTimer();
|
2008-10-22 07:49:24 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
bool HandleTopLevelDecl(DeclGroupRef D) override {
|
2009-03-30 00:50:03 +08:00
|
|
|
PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(),
|
2009-03-05 16:00:35 +08:00
|
|
|
Context->getSourceManager(),
|
|
|
|
"LLVM IR generation of declaration");
|
2018-04-11 02:53:28 +08:00
|
|
|
|
|
|
|
// Recurse.
|
2020-12-03 02:18:18 +08:00
|
|
|
if (TimerIsEnabled) {
|
2018-04-11 02:53:28 +08:00
|
|
|
LLVMIRGenerationRefCount += 1;
|
|
|
|
if (LLVMIRGenerationRefCount == 1)
|
|
|
|
LLVMIRGeneration.startTimer();
|
|
|
|
}
|
2009-03-30 00:50:03 +08:00
|
|
|
|
2008-10-22 07:49:24 +08:00
|
|
|
Gen->HandleTopLevelDecl(D);
|
2018-04-11 02:53:28 +08:00
|
|
|
|
2020-12-03 02:18:18 +08:00
|
|
|
if (TimerIsEnabled) {
|
2018-04-11 02:53:28 +08:00
|
|
|
LLVMIRGenerationRefCount -= 1;
|
|
|
|
if (LLVMIRGenerationRefCount == 0)
|
|
|
|
LLVMIRGeneration.stopTimer();
|
|
|
|
}
|
|
|
|
|
2011-11-18 08:26:59 +08:00
|
|
|
return true;
|
2008-10-22 07:49:24 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2016-03-30 14:27:31 +08:00
|
|
|
void HandleInlineFunctionDefinition(FunctionDecl *D) override {
|
2014-05-24 04:37:38 +08:00
|
|
|
PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
|
|
|
|
Context->getSourceManager(),
|
2016-03-30 14:27:31 +08:00
|
|
|
"LLVM IR generation of inline function");
|
2020-12-03 02:18:18 +08:00
|
|
|
if (TimerIsEnabled)
|
2018-04-11 02:53:28 +08:00
|
|
|
LLVMIRGeneration.startTimer();
|
2014-05-24 04:37:38 +08:00
|
|
|
|
2016-03-30 14:27:31 +08:00
|
|
|
Gen->HandleInlineFunctionDefinition(D);
|
2018-04-11 02:53:28 +08:00
|
|
|
|
2020-12-03 02:18:18 +08:00
|
|
|
if (TimerIsEnabled)
|
2018-04-11 02:53:28 +08:00
|
|
|
LLVMIRGeneration.stopTimer();
|
2014-05-24 04:37:38 +08:00
|
|
|
}
|
|
|
|
|
2016-11-30 09:32:53 +08:00
|
|
|
void HandleInterestingDecl(DeclGroupRef D) override {
|
2016-11-30 08:25:36 +08:00
|
|
|
// Ignore interesting decls from the AST reader after IRGen is finished.
|
|
|
|
if (!IRGenFinished)
|
|
|
|
HandleTopLevelDecl(D);
|
|
|
|
}
|
|
|
|
|
[CodeGen] [CUDA] Add the ability set default attrs on functions in linked modules.
Summary:
Now when you ask clang to link in a bitcode module, you can tell it to
set attributes on that module's functions to match what we would have
set if we'd emitted those functions ourselves.
This is particularly important for fast-math attributes in CUDA
compilations.
Each CUDA compilation links in libdevice, a bitcode library provided by
nvidia as part of the CUDA distribution. Without this patch, if we have
a user-function F that is compiled with -ffast-math that calls a
function G from libdevice, F will have the unsafe-fp-math=true (etc.)
attributes, but G will have no attributes.
Since F calls G, the inliner will merge G's attributes into F's. It
considers the lack of an unsafe-fp-math=true attribute on G to be
tantamount to unsafe-fp-math=false, so it "merges" these by setting
unsafe-fp-math=false on F.
This then continues up the call graph, until every function that
(transitively) calls something in libdevice gets unsafe-fp-math=false
set, thus disabling fastmath in almost all CUDA code.
Reviewers: echristo
Subscribers: hfinkel, llvm-commits, mehdi_amini
Differential Revision: https://reviews.llvm.org/D28538
llvm-svn: 293097
2017-01-26 05:29:48 +08:00
|
|
|
// Links each entry in LinkModules into our module. Returns true on error.
|
|
|
|
bool LinkInModules() {
|
|
|
|
for (auto &LM : LinkModules) {
|
|
|
|
if (LM.PropagateAttrs)
|
2020-09-16 20:52:02 +08:00
|
|
|
for (Function &F : *LM.Module) {
|
|
|
|
// Skip intrinsics. Keep consistent with how intrinsics are created
|
|
|
|
// in LLVM IR.
|
|
|
|
if (F.isIntrinsic())
|
|
|
|
continue;
|
2020-05-17 02:44:54 +08:00
|
|
|
Gen->CGM().addDefaultFunctionDefinitionAttributes(F);
|
2020-09-16 20:52:02 +08:00
|
|
|
}
|
[CodeGen] [CUDA] Add the ability set default attrs on functions in linked modules.
Summary:
Now when you ask clang to link in a bitcode module, you can tell it to
set attributes on that module's functions to match what we would have
set if we'd emitted those functions ourselves.
This is particularly important for fast-math attributes in CUDA
compilations.
Each CUDA compilation links in libdevice, a bitcode library provided by
nvidia as part of the CUDA distribution. Without this patch, if we have
a user-function F that is compiled with -ffast-math that calls a
function G from libdevice, F will have the unsafe-fp-math=true (etc.)
attributes, but G will have no attributes.
Since F calls G, the inliner will merge G's attributes into F's. It
considers the lack of an unsafe-fp-math=true attribute on G to be
tantamount to unsafe-fp-math=false, so it "merges" these by setting
unsafe-fp-math=false on F.
This then continues up the call graph, until every function that
(transitively) calls something in libdevice gets unsafe-fp-math=false
set, thus disabling fastmath in almost all CUDA code.
Reviewers: echristo
Subscribers: hfinkel, llvm-commits, mehdi_amini
Differential Revision: https://reviews.llvm.org/D28538
llvm-svn: 293097
2017-01-26 05:29:48 +08:00
|
|
|
|
|
|
|
CurLinkModule = LM.Module.get();
|
2017-03-14 02:08:11 +08:00
|
|
|
|
|
|
|
bool Err;
|
|
|
|
if (LM.Internalize) {
|
|
|
|
Err = Linker::linkModules(
|
|
|
|
*getModule(), std::move(LM.Module), LM.LinkFlags,
|
|
|
|
[](llvm::Module &M, const llvm::StringSet<> &GVS) {
|
2017-03-14 06:33:07 +08:00
|
|
|
internalizeModule(M, [&GVS](const llvm::GlobalValue &GV) {
|
2017-03-14 02:08:11 +08:00
|
|
|
return !GV.hasName() || (GVS.count(GV.getName()) == 0);
|
|
|
|
});
|
|
|
|
});
|
|
|
|
} else {
|
|
|
|
Err = Linker::linkModules(*getModule(), std::move(LM.Module),
|
|
|
|
LM.LinkFlags);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Err)
|
[CodeGen] [CUDA] Add the ability set default attrs on functions in linked modules.
Summary:
Now when you ask clang to link in a bitcode module, you can tell it to
set attributes on that module's functions to match what we would have
set if we'd emitted those functions ourselves.
This is particularly important for fast-math attributes in CUDA
compilations.
Each CUDA compilation links in libdevice, a bitcode library provided by
nvidia as part of the CUDA distribution. Without this patch, if we have
a user-function F that is compiled with -ffast-math that calls a
function G from libdevice, F will have the unsafe-fp-math=true (etc.)
attributes, but G will have no attributes.
Since F calls G, the inliner will merge G's attributes into F's. It
considers the lack of an unsafe-fp-math=true attribute on G to be
tantamount to unsafe-fp-math=false, so it "merges" these by setting
unsafe-fp-math=false on F.
This then continues up the call graph, until every function that
(transitively) calls something in libdevice gets unsafe-fp-math=false
set, thus disabling fastmath in almost all CUDA code.
Reviewers: echristo
Subscribers: hfinkel, llvm-commits, mehdi_amini
Differential Revision: https://reviews.llvm.org/D28538
llvm-svn: 293097
2017-01-26 05:29:48 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false; // success
|
|
|
|
}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void HandleTranslationUnit(ASTContext &C) override {
|
2009-03-05 16:00:35 +08:00
|
|
|
{
|
2019-12-11 19:49:42 +08:00
|
|
|
llvm::TimeTraceScope TimeScope("Frontend");
|
2009-03-06 14:46:31 +08:00
|
|
|
PrettyStackTraceString CrashInfo("Per-file LLVM IR generation");
|
2020-12-03 02:18:18 +08:00
|
|
|
if (TimerIsEnabled) {
|
2018-04-11 02:53:28 +08:00
|
|
|
LLVMIRGenerationRefCount += 1;
|
|
|
|
if (LLVMIRGenerationRefCount == 1)
|
|
|
|
LLVMIRGeneration.startTimer();
|
|
|
|
}
|
2009-02-18 09:37:30 +08:00
|
|
|
|
2009-03-28 12:11:33 +08:00
|
|
|
Gen->HandleTranslationUnit(C);
|
2018-04-11 02:53:28 +08:00
|
|
|
|
2020-12-03 02:18:18 +08:00
|
|
|
if (TimerIsEnabled) {
|
2018-04-11 02:53:28 +08:00
|
|
|
LLVMIRGenerationRefCount -= 1;
|
|
|
|
if (LLVMIRGenerationRefCount == 0)
|
|
|
|
LLVMIRGeneration.stopTimer();
|
|
|
|
}
|
|
|
|
|
2018-07-20 16:19:20 +08:00
|
|
|
IRGenFinished = true;
|
2009-03-05 16:00:35 +08:00
|
|
|
}
|
2009-02-18 09:37:30 +08:00
|
|
|
|
2010-06-08 07:20:08 +08:00
|
|
|
// Silently ignore if we weren't initialized for some reason.
|
2016-02-19 00:42:09 +08:00
|
|
|
if (!getModule())
|
2010-06-08 07:20:08 +08:00
|
|
|
return;
|
|
|
|
|
2016-02-19 00:42:09 +08:00
|
|
|
LLVMContext &Ctx = getModule()->getContext();
|
2017-09-16 04:09:55 +08:00
|
|
|
std::unique_ptr<DiagnosticHandler> OldDiagnosticHandler =
|
2014-02-07 02:30:43 +08:00
|
|
|
Ctx.getDiagnosticHandler();
|
2019-08-15 07:04:18 +08:00
|
|
|
Ctx.setDiagnosticHandler(std::make_unique<ClangDiagnosticHandler>(
|
2017-09-16 04:09:55 +08:00
|
|
|
CodeGenOpts, this));
|
2019-06-14 08:05:56 +08:00
|
|
|
|
2019-06-15 00:20:51 +08:00
|
|
|
Expected<std::unique_ptr<llvm::ToolOutputFile>> OptRecordFileOrErr =
|
2019-10-29 05:53:31 +08:00
|
|
|
setupLLVMOptimizationRemarks(
|
2019-12-05 23:23:47 +08:00
|
|
|
Ctx, CodeGenOpts.OptRecordFile, CodeGenOpts.OptRecordPasses,
|
|
|
|
CodeGenOpts.OptRecordFormat, CodeGenOpts.DiagnosticsWithHotness,
|
|
|
|
CodeGenOpts.DiagnosticsHotnessThreshold);
|
2019-06-15 00:20:51 +08:00
|
|
|
|
|
|
|
if (Error E = OptRecordFileOrErr.takeError()) {
|
2019-12-05 23:23:47 +08:00
|
|
|
reportOptRecordError(std::move(E), Diags, CodeGenOpts);
|
2019-06-15 00:20:51 +08:00
|
|
|
return;
|
2019-06-14 08:05:56 +08:00
|
|
|
}
|
2019-12-05 23:23:47 +08:00
|
|
|
|
2019-06-15 00:20:51 +08:00
|
|
|
std::unique_ptr<llvm::ToolOutputFile> OptRecordFile =
|
|
|
|
std::move(*OptRecordFileOrErr);
|
|
|
|
|
|
|
|
if (OptRecordFile &&
|
|
|
|
CodeGenOpts.getProfileUse() != CodeGenOptions::ProfileNone)
|
|
|
|
Ctx.setDiagnosticsHotnessRequested(true);
|
2016-10-11 08:26:09 +08:00
|
|
|
|
[CodeGen] [CUDA] Add the ability set default attrs on functions in linked modules.
Summary:
Now when you ask clang to link in a bitcode module, you can tell it to
set attributes on that module's functions to match what we would have
set if we'd emitted those functions ourselves.
This is particularly important for fast-math attributes in CUDA
compilations.
Each CUDA compilation links in libdevice, a bitcode library provided by
nvidia as part of the CUDA distribution. Without this patch, if we have
a user-function F that is compiled with -ffast-math that calls a
function G from libdevice, F will have the unsafe-fp-math=true (etc.)
attributes, but G will have no attributes.
Since F calls G, the inliner will merge G's attributes into F's. It
considers the lack of an unsafe-fp-math=true attribute on G to be
tantamount to unsafe-fp-math=false, so it "merges" these by setting
unsafe-fp-math=false on F.
This then continues up the call graph, until every function that
(transitively) calls something in libdevice gets unsafe-fp-math=false
set, thus disabling fastmath in almost all CUDA code.
Reviewers: echristo
Subscribers: hfinkel, llvm-commits, mehdi_amini
Differential Revision: https://reviews.llvm.org/D28538
llvm-svn: 293097
2017-01-26 05:29:48 +08:00
|
|
|
// Link each LinkModule into our module.
|
|
|
|
if (LinkInModules())
|
|
|
|
return;
|
2015-12-15 07:17:07 +08:00
|
|
|
|
2021-09-15 04:09:23 +08:00
|
|
|
for (auto &F : getModule()->functions()) {
|
|
|
|
if (const Decl *FD = Gen->GetDeclForMangledName(F.getName())) {
|
|
|
|
auto Loc = FD->getASTContext().getFullLoc(FD->getLocation());
|
|
|
|
// TODO: use a fast content hash when available.
|
|
|
|
auto NameHash = llvm::hash_value(F.getName());
|
|
|
|
ManglingFullSourceLocs.push_back(std::make_pair(NameHash, Loc));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-14 02:51:45 +08:00
|
|
|
if (CodeGenOpts.ClearASTBeforeBackend) {
|
2021-10-20 06:17:07 +08:00
|
|
|
LLVM_DEBUG(llvm::dbgs() << "Clearing AST...\n");
|
2021-10-14 02:51:45 +08:00
|
|
|
// Access to the AST is no longer available after this.
|
|
|
|
// Other things that the ASTContext manages are still available, e.g.
|
|
|
|
// the SourceManager. It'd be nice if we could separate out all the
|
|
|
|
// things in ASTContext used after this point and null out the
|
|
|
|
// ASTContext, but too many various parts of the ASTContext are still
|
|
|
|
// used in various parts.
|
|
|
|
C.cleanup();
|
2021-09-15 04:09:23 +08:00
|
|
|
C.getAllocator().Reset();
|
2021-10-14 02:51:45 +08:00
|
|
|
}
|
2021-09-15 04:09:23 +08:00
|
|
|
|
2016-05-12 00:26:03 +08:00
|
|
|
EmbedBitcode(getModule(), CodeGenOpts, llvm::MemoryBufferRef());
|
|
|
|
|
2017-01-06 00:02:32 +08:00
|
|
|
EmitBackendOutput(Diags, HeaderSearchOpts, CodeGenOpts, TargetOpts,
|
2021-04-20 01:39:20 +08:00
|
|
|
LangOpts, C.getTargetInfo().getDataLayoutString(),
|
2016-07-15 08:55:40 +08:00
|
|
|
getModule(), Action, std::move(AsmOutStream));
|
2013-12-19 00:38:48 +08:00
|
|
|
|
2017-09-16 04:09:55 +08:00
|
|
|
Ctx.setDiagnosticHandler(std::move(OldDiagnosticHandler));
|
2016-10-11 08:26:09 +08:00
|
|
|
|
|
|
|
if (OptRecordFile)
|
|
|
|
OptRecordFile->keep();
|
2008-10-22 07:49:24 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void HandleTagDeclDefinition(TagDecl *D) override {
|
2009-03-05 16:00:35 +08:00
|
|
|
PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
|
|
|
|
Context->getSourceManager(),
|
|
|
|
"LLVM IR generation of declaration");
|
2008-10-22 07:49:24 +08:00
|
|
|
Gen->HandleTagDeclDefinition(D);
|
|
|
|
}
|
2009-04-22 01:11:58 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void HandleTagDeclRequiredDefinition(const TagDecl *D) override {
|
2013-07-14 05:08:14 +08:00
|
|
|
Gen->HandleTagDeclRequiredDefinition(D);
|
|
|
|
}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void CompleteTentativeDefinition(VarDecl *D) override {
|
2009-04-22 01:11:58 +08:00
|
|
|
Gen->CompleteTentativeDefinition(D);
|
|
|
|
}
|
2010-04-30 00:29:09 +08:00
|
|
|
|
2019-11-23 00:45:37 +08:00
|
|
|
void CompleteExternalDeclaration(VarDecl *D) override {
|
|
|
|
Gen->CompleteExternalDeclaration(D);
|
|
|
|
}
|
|
|
|
|
2016-01-27 03:30:26 +08:00
|
|
|
void AssignInheritanceModel(CXXRecordDecl *RD) override {
|
|
|
|
Gen->AssignInheritanceModel(RD);
|
|
|
|
}
|
|
|
|
|
2015-01-15 12:07:35 +08:00
|
|
|
void HandleVTable(CXXRecordDecl *RD) override {
|
|
|
|
Gen->HandleVTable(RD);
|
Rework when and how vtables are emitted, by tracking where vtables are
"used" (e.g., we will refer to the vtable in the generated code) and
when they are defined (i.e., because we've seen the key function
definition). Previously, we were effectively tracking "potential
definitions" rather than uses, so we were a bit too eager about emitting
vtables for classes without key functions.
The new scheme:
- For every use of a vtable, Sema calls MarkVTableUsed() to indicate
the use. For example, this occurs when calling a virtual member
function of the class, defining a constructor of that class type,
dynamic_cast'ing from that type to a derived class, casting
to/through a virtual base class, etc.
- For every definition of a vtable, Sema calls MarkVTableUsed() to
indicate the definition. This happens at the end of the translation
unit for classes whose key function has been defined (so we can
delay computation of the key function; see PR6564), and will also
occur with explicit template instantiation definitions.
- For every vtable defined/used, we mark all of the virtual member
functions of that vtable as defined/used, unless we know that the key
function is in another translation unit. This instantiates virtual
member functions when needed.
- At the end of the translation unit, Sema tells CodeGen (via the
ASTConsumer) which vtables must be defined (CodeGen will define
them) and which may be used (for which CodeGen will define the
vtables lazily).
From a language perspective, both the old and the new schemes are
permissible: we're allowed to instantiate virtual member functions
whenever we want per the standard. However, all other C++ compilers
were more lazy than we were, and our eagerness was both a performance
issue (we instantiated too much) and a portability problem (we broke
Boost test cases, which now pass).
Notes:
(1) There's a ton of churn in the tests, because the order in which
vtables get emitted to IR has changed. I've tried to isolate some of
the larger tests from these issues.
(2) Some diagnostics related to
implicitly-instantiated/implicitly-defined virtual member functions
have moved to the point of first use/definition. It's better this
way.
(3) I could use a review of the places where we MarkVTableUsed, to
see if I missed any place where the language effectively requires a
vtable.
Fixes PR7114 and PR6564.
llvm-svn: 103718
2010-05-14 00:44:06 +08:00
|
|
|
}
|
|
|
|
|
2016-02-02 21:52:52 +08:00
|
|
|
/// Get the best possible source location to represent a diagnostic that
|
|
|
|
/// may have associated debug info.
|
|
|
|
const FullSourceLoc
|
2017-02-18 01:34:49 +08:00
|
|
|
getBestLocationFromDebugLoc(const llvm::DiagnosticInfoWithLocationBase &D,
|
2016-02-02 21:52:52 +08:00
|
|
|
bool &BadDebugInfo, StringRef &Filename,
|
|
|
|
unsigned &Line, unsigned &Column) const;
|
|
|
|
|
2021-09-15 04:09:23 +08:00
|
|
|
Optional<FullSourceLoc> getFunctionSourceLocation(const Function &F) const;
|
|
|
|
|
2014-02-07 02:30:43 +08:00
|
|
|
void DiagnosticHandlerImpl(const llvm::DiagnosticInfo &DI);
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Specialized handler for InlineAsm diagnostic.
|
2014-02-07 02:30:43 +08:00
|
|
|
/// \return True if the diagnostic has been successfully reported, false
|
|
|
|
/// otherwise.
|
|
|
|
bool InlineAsmDiagHandler(const llvm::DiagnosticInfoInlineAsm &D);
|
2021-02-24 01:47:15 +08:00
|
|
|
/// Specialized handler for diagnostics reported using SMDiagnostic.
|
|
|
|
void SrcMgrDiagHandler(const llvm::DiagnosticInfoSrcMgr &D);
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Specialized handler for StackSize diagnostic.
|
2014-02-07 02:30:43 +08:00
|
|
|
/// \return True if the diagnostic has been successfully reported, false
|
|
|
|
/// otherwise.
|
|
|
|
bool StackSizeDiagHandler(const llvm::DiagnosticInfoStackSize &D);
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Specialized handler for unsupported backend feature diagnostic.
|
2016-02-02 21:52:52 +08:00
|
|
|
void UnsupportedDiagHandler(const llvm::DiagnosticInfoUnsupported &D);
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Specialized handlers for optimization remarks.
|
2014-05-30 03:55:06 +08:00
|
|
|
/// Note that these handlers only accept remarks and they always handle
|
2014-04-17 00:54:24 +08:00
|
|
|
/// them.
|
2014-07-19 03:40:19 +08:00
|
|
|
void EmitOptimizationMessage(const llvm::DiagnosticInfoOptimizationBase &D,
|
|
|
|
unsigned DiagID);
|
2017-01-26 12:07:11 +08:00
|
|
|
void
|
|
|
|
OptimizationRemarkHandler(const llvm::DiagnosticInfoOptimizationBase &D);
|
2014-05-30 03:55:06 +08:00
|
|
|
void OptimizationRemarkHandler(
|
2016-09-28 06:19:29 +08:00
|
|
|
const llvm::OptimizationRemarkAnalysisFPCommute &D);
|
2014-05-30 03:55:06 +08:00
|
|
|
void OptimizationRemarkHandler(
|
2016-09-28 06:19:29 +08:00
|
|
|
const llvm::OptimizationRemarkAnalysisAliasing &D);
|
2014-07-19 03:40:19 +08:00
|
|
|
void OptimizationFailureHandler(
|
|
|
|
const llvm::DiagnosticInfoOptimizationFailure &D);
|
2021-08-26 01:18:13 +08:00
|
|
|
void DontCallDiagHandler(const DiagnosticInfoDontCall &D);
|
2009-09-09 23:08:12 +08:00
|
|
|
};
|
2017-03-14 02:08:11 +08:00
|
|
|
|
2011-12-20 10:48:34 +08:00
|
|
|
void BackendConsumer::anchor() {}
|
2015-06-23 07:07:51 +08:00
|
|
|
}
|
2008-10-22 07:49:24 +08:00
|
|
|
|
2017-09-16 04:09:55 +08:00
|
|
|
bool ClangDiagnosticHandler::handleDiagnostics(const DiagnosticInfo &DI) {
|
|
|
|
BackendCon->DiagnosticHandlerImpl(DI);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-04-08 08:23:06 +08:00
|
|
|
/// ConvertBackendLocation - Convert a location in a temporary llvm::SourceMgr
|
|
|
|
/// buffer to be a valid FullSourceLoc.
|
|
|
|
static FullSourceLoc ConvertBackendLocation(const llvm::SMDiagnostic &D,
|
|
|
|
SourceManager &CSM) {
|
|
|
|
// Get both the clang and llvm source managers. The location is relative to
|
|
|
|
// a memory buffer that the LLVM Source Manager is handling, we need to add
|
2010-04-30 00:29:09 +08:00
|
|
|
// a copy to the Clang source manager.
|
2010-04-08 08:23:06 +08:00
|
|
|
const llvm::SourceMgr &LSM = *D.getSourceMgr();
|
2010-04-30 00:29:09 +08:00
|
|
|
|
2010-04-08 08:23:06 +08:00
|
|
|
// We need to copy the underlying LLVM memory buffer because llvm::SourceMgr
|
|
|
|
// already owns its one and clang::SourceManager wants to own its one.
|
|
|
|
const MemoryBuffer *LBuf =
|
|
|
|
LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc()));
|
2010-04-30 00:29:09 +08:00
|
|
|
|
2010-04-08 08:23:06 +08:00
|
|
|
// Create the copy and transfer ownership to clang::SourceManager.
|
2014-06-27 14:02:00 +08:00
|
|
|
// TODO: Avoid copying files into memory.
|
2014-08-28 04:03:29 +08:00
|
|
|
std::unique_ptr<llvm::MemoryBuffer> CBuf =
|
|
|
|
llvm::MemoryBuffer::getMemBufferCopy(LBuf->getBuffer(),
|
|
|
|
LBuf->getBufferIdentifier());
|
2014-06-27 14:02:00 +08:00
|
|
|
// FIXME: Keep a file ID map instead of creating new IDs for each location.
|
2014-08-29 15:59:55 +08:00
|
|
|
FileID FID = CSM.createFileID(std::move(CBuf));
|
2010-04-30 00:29:09 +08:00
|
|
|
|
2010-04-08 08:23:06 +08:00
|
|
|
// Translate the offset into the file.
|
2014-06-27 14:02:00 +08:00
|
|
|
unsigned Offset = D.getLoc().getPointer() - LBuf->getBufferStart();
|
2010-04-30 00:29:09 +08:00
|
|
|
SourceLocation NewLoc =
|
2011-09-20 04:40:19 +08:00
|
|
|
CSM.getLocForStartOfFile(FID).getLocWithOffset(Offset);
|
2010-04-08 08:23:06 +08:00
|
|
|
return FullSourceLoc(NewLoc, CSM);
|
|
|
|
}
|
|
|
|
|
2014-02-07 02:30:43 +08:00
|
|
|
#define ComputeDiagID(Severity, GroupName, DiagID) \
|
|
|
|
do { \
|
|
|
|
switch (Severity) { \
|
|
|
|
case llvm::DS_Error: \
|
|
|
|
DiagID = diag::err_fe_##GroupName; \
|
|
|
|
break; \
|
|
|
|
case llvm::DS_Warning: \
|
|
|
|
DiagID = diag::warn_fe_##GroupName; \
|
|
|
|
break; \
|
2014-02-28 17:11:08 +08:00
|
|
|
case llvm::DS_Remark: \
|
|
|
|
llvm_unreachable("'remark' severity not expected"); \
|
|
|
|
break; \
|
|
|
|
case llvm::DS_Note: \
|
|
|
|
DiagID = diag::note_fe_##GroupName; \
|
|
|
|
break; \
|
|
|
|
} \
|
|
|
|
} while (false)
|
|
|
|
|
|
|
|
#define ComputeDiagRemarkID(Severity, GroupName, DiagID) \
|
|
|
|
do { \
|
|
|
|
switch (Severity) { \
|
|
|
|
case llvm::DS_Error: \
|
|
|
|
DiagID = diag::err_fe_##GroupName; \
|
|
|
|
break; \
|
|
|
|
case llvm::DS_Warning: \
|
|
|
|
DiagID = diag::warn_fe_##GroupName; \
|
|
|
|
break; \
|
|
|
|
case llvm::DS_Remark: \
|
|
|
|
DiagID = diag::remark_fe_##GroupName; \
|
|
|
|
break; \
|
2014-02-07 02:30:43 +08:00
|
|
|
case llvm::DS_Note: \
|
|
|
|
DiagID = diag::note_fe_##GroupName; \
|
|
|
|
break; \
|
|
|
|
} \
|
|
|
|
} while (false)
|
|
|
|
|
2021-02-24 01:47:15 +08:00
|
|
|
void BackendConsumer::SrcMgrDiagHandler(const llvm::DiagnosticInfoSrcMgr &DI) {
|
|
|
|
const llvm::SMDiagnostic &D = DI.getSMDiag();
|
|
|
|
|
|
|
|
unsigned DiagID;
|
|
|
|
if (DI.isInlineAsmDiag())
|
|
|
|
ComputeDiagID(DI.getSeverity(), inline_asm, DiagID);
|
|
|
|
else
|
|
|
|
ComputeDiagID(DI.getSeverity(), source_mgr, DiagID);
|
|
|
|
|
|
|
|
// This is for the empty BackendConsumer that uses the clang diagnostic
|
|
|
|
// handler for IR input files.
|
|
|
|
if (!Context) {
|
|
|
|
D.print(nullptr, llvm::errs());
|
|
|
|
Diags.Report(DiagID).AddString("cannot compile inline asm");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// There are a couple of different kinds of errors we could get here.
|
|
|
|
// First, we re-format the SMDiagnostic in terms of a clang diagnostic.
|
|
|
|
|
|
|
|
// Strip "error: " off the start of the message string.
|
|
|
|
StringRef Message = D.getMessage();
|
|
|
|
(void)Message.consume_front("error: ");
|
|
|
|
|
|
|
|
// If the SMDiagnostic has an inline asm source location, translate it.
|
|
|
|
FullSourceLoc Loc;
|
|
|
|
if (D.getLoc() != SMLoc())
|
|
|
|
Loc = ConvertBackendLocation(D, Context->getSourceManager());
|
|
|
|
|
|
|
|
// If this problem has clang-level source location information, report the
|
|
|
|
// issue in the source with a note showing the instantiated
|
|
|
|
// code.
|
|
|
|
if (DI.isInlineAsmDiag()) {
|
|
|
|
SourceLocation LocCookie =
|
|
|
|
SourceLocation::getFromRawEncoding(DI.getLocCookie());
|
|
|
|
if (LocCookie.isValid()) {
|
|
|
|
Diags.Report(LocCookie, DiagID).AddString(Message);
|
|
|
|
|
|
|
|
if (D.getLoc().isValid()) {
|
|
|
|
DiagnosticBuilder B = Diags.Report(Loc, diag::note_fe_inline_asm_here);
|
|
|
|
// Convert the SMDiagnostic ranges into SourceRange and attach them
|
|
|
|
// to the diagnostic.
|
|
|
|
for (const std::pair<unsigned, unsigned> &Range : D.getRanges()) {
|
|
|
|
unsigned Column = D.getColumnNo();
|
|
|
|
B << SourceRange(Loc.getLocWithOffset(Range.first - Column),
|
|
|
|
Loc.getLocWithOffset(Range.second - Column));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, report the backend issue as occurring in the generated .s file.
|
|
|
|
// If Loc is invalid, we still need to report the issue, it just gets no
|
|
|
|
// location info.
|
|
|
|
Diags.Report(Loc, DiagID).AddString(Message);
|
|
|
|
}
|
|
|
|
|
2014-02-07 02:30:43 +08:00
|
|
|
bool
|
|
|
|
BackendConsumer::InlineAsmDiagHandler(const llvm::DiagnosticInfoInlineAsm &D) {
|
|
|
|
unsigned DiagID;
|
|
|
|
ComputeDiagID(D.getSeverity(), inline_asm, DiagID);
|
|
|
|
std::string Message = D.getMsgStr().str();
|
|
|
|
|
|
|
|
// If this problem has clang-level source location information, report the
|
2014-02-26 18:21:56 +08:00
|
|
|
// issue as being a problem in the source with a note showing the instantiated
|
2014-02-07 02:30:43 +08:00
|
|
|
// code.
|
|
|
|
SourceLocation LocCookie =
|
|
|
|
SourceLocation::getFromRawEncoding(D.getLocCookie());
|
|
|
|
if (LocCookie.isValid())
|
|
|
|
Diags.Report(LocCookie, DiagID).AddString(Message);
|
|
|
|
else {
|
|
|
|
// Otherwise, report the backend diagnostic as occurring in the generated
|
|
|
|
// .s file.
|
|
|
|
// If Loc is invalid, we still need to report the diagnostic, it just gets
|
|
|
|
// no location info.
|
|
|
|
FullSourceLoc Loc;
|
|
|
|
Diags.Report(Loc, DiagID).AddString(Message);
|
|
|
|
}
|
|
|
|
// We handled all the possible severities.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
BackendConsumer::StackSizeDiagHandler(const llvm::DiagnosticInfoStackSize &D) {
|
|
|
|
if (D.getSeverity() != llvm::DS_Warning)
|
|
|
|
// For now, the only support we have for StackSize diagnostic is warning.
|
|
|
|
// We do not know how to format other severities.
|
|
|
|
return false;
|
|
|
|
|
2021-09-15 04:09:23 +08:00
|
|
|
auto Loc = getFunctionSourceLocation(D.getFunction());
|
|
|
|
if (!Loc)
|
|
|
|
return false;
|
2014-06-06 06:10:59 +08:00
|
|
|
|
2021-09-15 04:09:23 +08:00
|
|
|
// FIXME: Shouldn't need to truncate to uint32_t
|
|
|
|
Diags.Report(*Loc, diag::warn_fe_frame_larger_than)
|
|
|
|
<< static_cast<uint32_t>(D.getStackSize())
|
|
|
|
<< static_cast<uint32_t>(D.getStackLimit())
|
|
|
|
<< llvm::demangle(D.getFunction().getName().str());
|
|
|
|
return true;
|
2014-02-07 02:30:43 +08:00
|
|
|
}
|
|
|
|
|
2016-02-02 21:52:52 +08:00
|
|
|
const FullSourceLoc BackendConsumer::getBestLocationFromDebugLoc(
|
2017-02-18 01:34:49 +08:00
|
|
|
const llvm::DiagnosticInfoWithLocationBase &D, bool &BadDebugInfo,
|
|
|
|
StringRef &Filename, unsigned &Line, unsigned &Column) const {
|
2014-05-30 03:55:06 +08:00
|
|
|
SourceManager &SourceMgr = Context->getSourceManager();
|
|
|
|
FileManager &FileMgr = SourceMgr.getFileManager();
|
2014-06-06 06:11:12 +08:00
|
|
|
SourceLocation DILoc;
|
2015-05-09 04:59:56 +08:00
|
|
|
|
|
|
|
if (D.isLocationAvailable()) {
|
2018-12-07 02:44:50 +08:00
|
|
|
D.getLocation(Filename, Line, Column);
|
|
|
|
if (Line > 0) {
|
2019-08-02 05:31:56 +08:00
|
|
|
auto FE = FileMgr.getFile(Filename);
|
2018-12-07 02:44:50 +08:00
|
|
|
if (!FE)
|
|
|
|
FE = FileMgr.getFile(D.getAbsolutePath());
|
|
|
|
if (FE) {
|
|
|
|
// If -gcolumn-info was not used, Column will be 0. This upsets the
|
|
|
|
// source manager, so pass 1 if Column is not set.
|
2019-08-02 05:31:56 +08:00
|
|
|
DILoc = SourceMgr.translateFileLineCol(*FE, Line, Column ? Column : 1);
|
2018-12-07 02:44:50 +08:00
|
|
|
}
|
2015-05-09 04:59:56 +08:00
|
|
|
}
|
2016-02-02 21:52:52 +08:00
|
|
|
BadDebugInfo = DILoc.isInvalid();
|
2014-04-17 00:54:24 +08:00
|
|
|
}
|
2014-06-06 06:11:12 +08:00
|
|
|
|
|
|
|
// If a location isn't available, try to approximate it using the associated
|
|
|
|
// function definition. We use the definition's right brace to differentiate
|
|
|
|
// from diagnostics that genuinely relate to the function itself.
|
|
|
|
FullSourceLoc Loc(DILoc, SourceMgr);
|
2021-09-15 04:09:23 +08:00
|
|
|
if (Loc.isInvalid()) {
|
|
|
|
if (auto MaybeLoc = getFunctionSourceLocation(D.getFunction()))
|
|
|
|
Loc = *MaybeLoc;
|
|
|
|
}
|
2016-02-02 21:52:52 +08:00
|
|
|
|
|
|
|
if (DILoc.isInvalid() && D.isLocationAvailable())
|
|
|
|
// If we were not able to translate the file:line:col information
|
|
|
|
// back to a SourceLocation, at least emit a note stating that
|
|
|
|
// we could not translate this location. This can happen in the
|
|
|
|
// case of #line directives.
|
|
|
|
Diags.Report(Loc, diag::note_fe_backend_invalid_loc)
|
2016-05-06 21:29:46 +08:00
|
|
|
<< Filename << Line << Column;
|
2016-02-02 21:52:52 +08:00
|
|
|
|
|
|
|
return Loc;
|
|
|
|
}
|
|
|
|
|
2021-09-15 04:09:23 +08:00
|
|
|
Optional<FullSourceLoc>
|
|
|
|
BackendConsumer::getFunctionSourceLocation(const Function &F) const {
|
|
|
|
auto Hash = llvm::hash_value(F.getName());
|
|
|
|
for (const auto &Pair : ManglingFullSourceLocs) {
|
|
|
|
if (Pair.first == Hash)
|
|
|
|
return Pair.second;
|
|
|
|
}
|
|
|
|
return Optional<FullSourceLoc>();
|
|
|
|
}
|
|
|
|
|
2016-02-02 21:52:52 +08:00
|
|
|
void BackendConsumer::UnsupportedDiagHandler(
|
|
|
|
const llvm::DiagnosticInfoUnsupported &D) {
|
2020-04-23 05:17:59 +08:00
|
|
|
// We only support warnings or errors.
|
|
|
|
assert(D.getSeverity() == llvm::DS_Error ||
|
|
|
|
D.getSeverity() == llvm::DS_Warning);
|
2016-02-02 21:52:52 +08:00
|
|
|
|
|
|
|
StringRef Filename;
|
|
|
|
unsigned Line, Column;
|
2017-05-12 08:10:49 +08:00
|
|
|
bool BadDebugInfo = false;
|
2020-01-15 07:33:15 +08:00
|
|
|
FullSourceLoc Loc;
|
|
|
|
std::string Msg;
|
|
|
|
raw_string_ostream MsgStream(Msg);
|
2016-02-02 21:52:52 +08:00
|
|
|
|
2020-01-15 07:33:15 +08:00
|
|
|
// Context will be nullptr for IR input files, we will construct the diag
|
|
|
|
// message from llvm::DiagnosticInfoUnsupported.
|
|
|
|
if (Context != nullptr) {
|
|
|
|
Loc = getBestLocationFromDebugLoc(D, BadDebugInfo, Filename, Line, Column);
|
|
|
|
MsgStream << D.getMessage();
|
|
|
|
} else {
|
|
|
|
DiagnosticPrinterRawOStream DP(MsgStream);
|
|
|
|
D.print(DP);
|
|
|
|
}
|
2020-04-23 05:17:59 +08:00
|
|
|
|
|
|
|
auto DiagType = D.getSeverity() == llvm::DS_Error
|
|
|
|
? diag::err_fe_backend_unsupported
|
|
|
|
: diag::warn_fe_backend_unsupported;
|
|
|
|
Diags.Report(Loc, DiagType) << MsgStream.str();
|
2016-02-02 21:52:52 +08:00
|
|
|
|
|
|
|
if (BadDebugInfo)
|
|
|
|
// If we were not able to translate the file:line:col information
|
|
|
|
// back to a SourceLocation, at least emit a note stating that
|
|
|
|
// we could not translate this location. This can happen in the
|
|
|
|
// case of #line directives.
|
|
|
|
Diags.Report(Loc, diag::note_fe_backend_invalid_loc)
|
|
|
|
<< Filename << Line << Column;
|
|
|
|
}
|
|
|
|
|
|
|
|
void BackendConsumer::EmitOptimizationMessage(
|
|
|
|
const llvm::DiagnosticInfoOptimizationBase &D, unsigned DiagID) {
|
|
|
|
// We only support warnings and remarks.
|
|
|
|
assert(D.getSeverity() == llvm::DS_Remark ||
|
|
|
|
D.getSeverity() == llvm::DS_Warning);
|
|
|
|
|
|
|
|
StringRef Filename;
|
|
|
|
unsigned Line, Column;
|
|
|
|
bool BadDebugInfo = false;
|
2020-01-15 07:33:15 +08:00
|
|
|
FullSourceLoc Loc;
|
2016-09-13 12:32:40 +08:00
|
|
|
std::string Msg;
|
|
|
|
raw_string_ostream MsgStream(Msg);
|
2020-01-15 07:33:15 +08:00
|
|
|
|
|
|
|
// Context will be nullptr for IR input files, we will construct the remark
|
|
|
|
// message from llvm::DiagnosticInfoOptimizationBase.
|
|
|
|
if (Context != nullptr) {
|
|
|
|
Loc = getBestLocationFromDebugLoc(D, BadDebugInfo, Filename, Line, Column);
|
|
|
|
MsgStream << D.getMsg();
|
|
|
|
} else {
|
|
|
|
DiagnosticPrinterRawOStream DP(MsgStream);
|
|
|
|
D.print(DP);
|
|
|
|
}
|
2016-09-13 12:32:40 +08:00
|
|
|
|
|
|
|
if (D.getHotness())
|
|
|
|
MsgStream << " (hotness: " << *D.getHotness() << ")";
|
|
|
|
|
2014-07-19 03:40:19 +08:00
|
|
|
Diags.Report(Loc, DiagID)
|
2016-10-01 10:56:57 +08:00
|
|
|
<< AddFlagValue(D.getPassName())
|
2016-09-13 12:32:40 +08:00
|
|
|
<< MsgStream.str();
|
2014-05-30 03:55:06 +08:00
|
|
|
|
2016-02-02 21:52:52 +08:00
|
|
|
if (BadDebugInfo)
|
2014-05-30 03:55:06 +08:00
|
|
|
// If we were not able to translate the file:line:col information
|
|
|
|
// back to a SourceLocation, at least emit a note stating that
|
|
|
|
// we could not translate this location. This can happen in the
|
|
|
|
// case of #line directives.
|
2016-02-02 21:52:52 +08:00
|
|
|
Diags.Report(Loc, diag::note_fe_backend_invalid_loc)
|
2014-05-30 03:55:06 +08:00
|
|
|
<< Filename << Line << Column;
|
|
|
|
}
|
|
|
|
|
|
|
|
void BackendConsumer::OptimizationRemarkHandler(
|
2017-01-26 12:07:11 +08:00
|
|
|
const llvm::DiagnosticInfoOptimizationBase &D) {
|
2017-10-04 12:25:31 +08:00
|
|
|
// Without hotness information, don't show noisy remarks.
|
|
|
|
if (D.isVerbose() && !D.getHotness())
|
|
|
|
return;
|
|
|
|
|
2017-01-26 12:07:11 +08:00
|
|
|
if (D.isPassed()) {
|
|
|
|
// Optimization remarks are active only if the -Rpass flag has a regular
|
|
|
|
// expression that matches the name of the pass name in \p D.
|
2021-02-25 16:05:08 +08:00
|
|
|
if (CodeGenOpts.OptimizationRemark.patternMatches(D.getPassName()))
|
2017-01-26 12:07:11 +08:00
|
|
|
EmitOptimizationMessage(D, diag::remark_fe_backend_optimization_remark);
|
|
|
|
} else if (D.isMissed()) {
|
|
|
|
// Missed optimization remarks are active only if the -Rpass-missed
|
|
|
|
// flag has a regular expression that matches the name of the pass
|
|
|
|
// name in \p D.
|
2021-02-25 16:05:08 +08:00
|
|
|
if (CodeGenOpts.OptimizationRemarkMissed.patternMatches(D.getPassName()))
|
2017-01-26 12:07:11 +08:00
|
|
|
EmitOptimizationMessage(
|
|
|
|
D, diag::remark_fe_backend_optimization_remark_missed);
|
|
|
|
} else {
|
|
|
|
assert(D.isAnalysis() && "Unknown remark type");
|
|
|
|
|
|
|
|
bool ShouldAlwaysPrint = false;
|
|
|
|
if (auto *ORA = dyn_cast<llvm::OptimizationRemarkAnalysis>(&D))
|
|
|
|
ShouldAlwaysPrint = ORA->shouldAlwaysPrint();
|
|
|
|
|
|
|
|
if (ShouldAlwaysPrint ||
|
2021-02-25 16:05:08 +08:00
|
|
|
CodeGenOpts.OptimizationRemarkAnalysis.patternMatches(D.getPassName()))
|
2017-01-26 12:07:11 +08:00
|
|
|
EmitOptimizationMessage(
|
|
|
|
D, diag::remark_fe_backend_optimization_remark_analysis);
|
|
|
|
}
|
2014-04-17 00:54:24 +08:00
|
|
|
}
|
|
|
|
|
2015-08-11 03:56:40 +08:00
|
|
|
void BackendConsumer::OptimizationRemarkHandler(
|
2016-09-28 06:19:29 +08:00
|
|
|
const llvm::OptimizationRemarkAnalysisFPCommute &D) {
|
2015-08-11 09:10:08 +08:00
|
|
|
// Optimization analysis remarks are active if the pass name is set to
|
|
|
|
// llvm::DiagnosticInfo::AlwasyPrint or if the -Rpass-analysis flag has a
|
|
|
|
// regular expression that matches the name of the pass name in \p D.
|
|
|
|
|
2016-06-29 12:55:31 +08:00
|
|
|
if (D.shouldAlwaysPrint() ||
|
2021-02-25 16:05:08 +08:00
|
|
|
CodeGenOpts.OptimizationRemarkAnalysis.patternMatches(D.getPassName()))
|
2015-08-11 03:56:40 +08:00
|
|
|
EmitOptimizationMessage(
|
|
|
|
D, diag::remark_fe_backend_optimization_remark_analysis_fpcommute);
|
|
|
|
}
|
|
|
|
|
2015-08-11 07:05:16 +08:00
|
|
|
void BackendConsumer::OptimizationRemarkHandler(
|
2016-09-28 06:19:29 +08:00
|
|
|
const llvm::OptimizationRemarkAnalysisAliasing &D) {
|
2015-08-11 09:10:08 +08:00
|
|
|
// Optimization analysis remarks are active if the pass name is set to
|
|
|
|
// llvm::DiagnosticInfo::AlwasyPrint or if the -Rpass-analysis flag has a
|
|
|
|
// regular expression that matches the name of the pass name in \p D.
|
|
|
|
|
2016-06-29 12:55:31 +08:00
|
|
|
if (D.shouldAlwaysPrint() ||
|
2021-02-25 16:05:08 +08:00
|
|
|
CodeGenOpts.OptimizationRemarkAnalysis.patternMatches(D.getPassName()))
|
2015-08-11 07:05:16 +08:00
|
|
|
EmitOptimizationMessage(
|
|
|
|
D, diag::remark_fe_backend_optimization_remark_analysis_aliasing);
|
|
|
|
}
|
|
|
|
|
2014-07-19 03:40:19 +08:00
|
|
|
void BackendConsumer::OptimizationFailureHandler(
|
|
|
|
const llvm::DiagnosticInfoOptimizationFailure &D) {
|
|
|
|
EmitOptimizationMessage(D, diag::warn_fe_backend_optimization_failure);
|
|
|
|
}
|
|
|
|
|
2021-08-26 01:18:13 +08:00
|
|
|
void BackendConsumer::DontCallDiagHandler(const DiagnosticInfoDontCall &D) {
|
2021-09-24 04:54:24 +08:00
|
|
|
SourceLocation LocCookie =
|
|
|
|
SourceLocation::getFromRawEncoding(D.getLocCookie());
|
|
|
|
|
|
|
|
// FIXME: we can't yet diagnose indirect calls. When/if we can, we
|
|
|
|
// should instead assert that LocCookie.isValid().
|
|
|
|
if (!LocCookie.isValid())
|
|
|
|
return;
|
|
|
|
|
|
|
|
Diags.Report(LocCookie, D.getSeverity() == DiagnosticSeverity::DS_Error
|
|
|
|
? diag::err_fe_backend_error_attr
|
|
|
|
: diag::warn_fe_backend_warning_attr)
|
|
|
|
<< llvm::demangle(D.getFunctionName().str()) << D.getNote();
|
2021-08-26 01:18:13 +08:00
|
|
|
}
|
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// This function is invoked when the backend needs
|
2014-02-07 02:30:43 +08:00
|
|
|
/// to report something to the user.
|
|
|
|
void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) {
|
|
|
|
unsigned DiagID = diag::err_fe_inline_asm;
|
|
|
|
llvm::DiagnosticSeverity Severity = DI.getSeverity();
|
|
|
|
// Get the diagnostic ID based.
|
|
|
|
switch (DI.getKind()) {
|
|
|
|
case llvm::DK_InlineAsm:
|
|
|
|
if (InlineAsmDiagHandler(cast<DiagnosticInfoInlineAsm>(DI)))
|
|
|
|
return;
|
|
|
|
ComputeDiagID(Severity, inline_asm, DiagID);
|
|
|
|
break;
|
2021-02-24 01:47:15 +08:00
|
|
|
case llvm::DK_SrcMgr:
|
|
|
|
SrcMgrDiagHandler(cast<DiagnosticInfoSrcMgr>(DI));
|
|
|
|
return;
|
2014-02-07 02:30:43 +08:00
|
|
|
case llvm::DK_StackSize:
|
|
|
|
if (StackSizeDiagHandler(cast<DiagnosticInfoStackSize>(DI)))
|
|
|
|
return;
|
|
|
|
ComputeDiagID(Severity, backend_frame_larger_than, DiagID);
|
|
|
|
break;
|
2015-12-15 07:17:07 +08:00
|
|
|
case DK_Linker:
|
2021-08-21 08:14:32 +08:00
|
|
|
ComputeDiagID(Severity, linking_module, DiagID);
|
2015-12-15 07:17:07 +08:00
|
|
|
break;
|
2014-04-17 00:54:24 +08:00
|
|
|
case llvm::DK_OptimizationRemark:
|
|
|
|
// Optimization remarks are always handled completely by this
|
|
|
|
// handler. There is no generic way of emitting them.
|
2016-09-28 06:19:29 +08:00
|
|
|
OptimizationRemarkHandler(cast<OptimizationRemark>(DI));
|
2014-04-17 00:54:24 +08:00
|
|
|
return;
|
2014-05-30 00:19:27 +08:00
|
|
|
case llvm::DK_OptimizationRemarkMissed:
|
2014-05-30 03:55:06 +08:00
|
|
|
// Optimization remarks are always handled completely by this
|
|
|
|
// handler. There is no generic way of emitting them.
|
2016-09-28 06:19:29 +08:00
|
|
|
OptimizationRemarkHandler(cast<OptimizationRemarkMissed>(DI));
|
2014-05-30 03:55:06 +08:00
|
|
|
return;
|
2014-05-30 00:19:27 +08:00
|
|
|
case llvm::DK_OptimizationRemarkAnalysis:
|
2014-05-30 03:55:06 +08:00
|
|
|
// Optimization remarks are always handled completely by this
|
|
|
|
// handler. There is no generic way of emitting them.
|
2016-09-28 06:19:29 +08:00
|
|
|
OptimizationRemarkHandler(cast<OptimizationRemarkAnalysis>(DI));
|
2014-05-30 00:19:27 +08:00
|
|
|
return;
|
2015-08-11 03:56:40 +08:00
|
|
|
case llvm::DK_OptimizationRemarkAnalysisFPCommute:
|
|
|
|
// Optimization remarks are always handled completely by this
|
|
|
|
// handler. There is no generic way of emitting them.
|
2016-09-28 06:19:29 +08:00
|
|
|
OptimizationRemarkHandler(cast<OptimizationRemarkAnalysisFPCommute>(DI));
|
2015-08-11 03:56:40 +08:00
|
|
|
return;
|
2015-08-11 07:05:16 +08:00
|
|
|
case llvm::DK_OptimizationRemarkAnalysisAliasing:
|
|
|
|
// Optimization remarks are always handled completely by this
|
|
|
|
// handler. There is no generic way of emitting them.
|
2016-09-28 06:19:29 +08:00
|
|
|
OptimizationRemarkHandler(cast<OptimizationRemarkAnalysisAliasing>(DI));
|
2015-08-11 07:05:16 +08:00
|
|
|
return;
|
2017-01-26 12:07:11 +08:00
|
|
|
case llvm::DK_MachineOptimizationRemark:
|
|
|
|
// Optimization remarks are always handled completely by this
|
|
|
|
// handler. There is no generic way of emitting them.
|
|
|
|
OptimizationRemarkHandler(cast<MachineOptimizationRemark>(DI));
|
|
|
|
return;
|
|
|
|
case llvm::DK_MachineOptimizationRemarkMissed:
|
|
|
|
// Optimization remarks are always handled completely by this
|
|
|
|
// handler. There is no generic way of emitting them.
|
|
|
|
OptimizationRemarkHandler(cast<MachineOptimizationRemarkMissed>(DI));
|
|
|
|
return;
|
|
|
|
case llvm::DK_MachineOptimizationRemarkAnalysis:
|
|
|
|
// Optimization remarks are always handled completely by this
|
|
|
|
// handler. There is no generic way of emitting them.
|
|
|
|
OptimizationRemarkHandler(cast<MachineOptimizationRemarkAnalysis>(DI));
|
|
|
|
return;
|
2014-07-19 03:40:19 +08:00
|
|
|
case llvm::DK_OptimizationFailure:
|
|
|
|
// Optimization failures are always handled completely by this
|
|
|
|
// handler.
|
|
|
|
OptimizationFailureHandler(cast<DiagnosticInfoOptimizationFailure>(DI));
|
|
|
|
return;
|
2016-02-02 21:52:52 +08:00
|
|
|
case llvm::DK_Unsupported:
|
|
|
|
UnsupportedDiagHandler(cast<DiagnosticInfoUnsupported>(DI));
|
|
|
|
return;
|
2021-08-26 01:18:13 +08:00
|
|
|
case llvm::DK_DontCall:
|
|
|
|
DontCallDiagHandler(cast<DiagnosticInfoDontCall>(DI));
|
|
|
|
return;
|
2014-02-07 02:30:43 +08:00
|
|
|
default:
|
|
|
|
// Plugin IDs are not bound to any value as they are set dynamically.
|
2014-02-28 17:11:08 +08:00
|
|
|
ComputeDiagRemarkID(Severity, backend_plugin, DiagID);
|
2014-02-07 02:30:43 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
std::string MsgStorage;
|
|
|
|
{
|
|
|
|
raw_string_ostream Stream(MsgStorage);
|
|
|
|
DiagnosticPrinterRawOStream DP(Stream);
|
|
|
|
DI.print(DP);
|
|
|
|
}
|
|
|
|
|
2021-08-21 08:14:32 +08:00
|
|
|
if (DI.getKind() == DK_Linker) {
|
|
|
|
assert(CurLinkModule && "CurLinkModule must be set for linker diagnostics");
|
|
|
|
Diags.Report(DiagID) << CurLinkModule->getModuleIdentifier() << MsgStorage;
|
2015-12-15 07:17:07 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-02-07 02:30:43 +08:00
|
|
|
// Report the backend message using the usual diagnostic mechanism.
|
|
|
|
FullSourceLoc Loc;
|
|
|
|
Diags.Report(Loc, DiagID).AddString(MsgStorage);
|
|
|
|
}
|
|
|
|
#undef ComputeDiagID
|
2010-02-25 12:37:45 +08:00
|
|
|
|
2011-02-20 07:03:58 +08:00
|
|
|
CodeGenAction::CodeGenAction(unsigned _Act, LLVMContext *_VMContext)
|
2015-10-28 01:56:59 +08:00
|
|
|
: Act(_Act), VMContext(_VMContext ? _VMContext : new LLVMContext),
|
2016-03-12 09:47:11 +08:00
|
|
|
OwnsVMContext(!_VMContext) {}
|
2011-02-20 07:03:58 +08:00
|
|
|
|
|
|
|
CodeGenAction::~CodeGenAction() {
|
|
|
|
TheModule.reset();
|
|
|
|
if (OwnsVMContext)
|
|
|
|
delete VMContext;
|
|
|
|
}
|
2010-02-26 04:37:44 +08:00
|
|
|
|
2010-06-08 07:27:59 +08:00
|
|
|
bool CodeGenAction::hasIRSupport() const { return true; }
|
|
|
|
|
2010-02-25 12:37:50 +08:00
|
|
|
void CodeGenAction::EndSourceFileAction() {
|
|
|
|
// If the consumer creation failed, do nothing.
|
|
|
|
if (!getCompilerInstance().hasASTConsumer())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Steal the module from the consumer.
|
2014-08-29 13:08:19 +08:00
|
|
|
TheModule = BEConsumer->takeModule();
|
2010-02-25 12:37:50 +08:00
|
|
|
}
|
|
|
|
|
2014-08-19 22:36:35 +08:00
|
|
|
std::unique_ptr<llvm::Module> CodeGenAction::takeModule() {
|
|
|
|
return std::move(TheModule);
|
|
|
|
}
|
|
|
|
|
2011-02-20 07:03:58 +08:00
|
|
|
llvm::LLVMContext *CodeGenAction::takeLLVMContext() {
|
|
|
|
OwnsVMContext = false;
|
|
|
|
return VMContext;
|
|
|
|
}
|
|
|
|
|
2021-05-13 13:41:44 +08:00
|
|
|
CodeGenerator *CodeGenAction::getCodeGenerator() const {
|
|
|
|
return BEConsumer->getCodeGenerator();
|
|
|
|
}
|
|
|
|
|
2016-07-15 08:55:40 +08:00
|
|
|
static std::unique_ptr<raw_pwrite_stream>
|
2015-04-14 23:15:49 +08:00
|
|
|
GetOutputStream(CompilerInstance &CI, StringRef InFile, BackendAction Action) {
|
2010-06-08 07:27:59 +08:00
|
|
|
switch (Action) {
|
2010-02-25 12:37:45 +08:00
|
|
|
case Backend_EmitAssembly:
|
2010-06-08 07:27:59 +08:00
|
|
|
return CI.createDefaultOutputFile(false, InFile, "s");
|
2010-02-25 12:37:45 +08:00
|
|
|
case Backend_EmitLL:
|
2010-06-08 07:27:59 +08:00
|
|
|
return CI.createDefaultOutputFile(false, InFile, "ll");
|
2010-02-25 12:37:45 +08:00
|
|
|
case Backend_EmitBC:
|
2010-06-08 07:27:59 +08:00
|
|
|
return CI.createDefaultOutputFile(true, InFile, "bc");
|
2010-02-25 12:37:45 +08:00
|
|
|
case Backend_EmitNothing:
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2010-05-26 02:41:01 +08:00
|
|
|
case Backend_EmitMCNull:
|
2014-06-04 01:23:34 +08:00
|
|
|
return CI.createNullOutputFile();
|
2010-02-25 12:37:45 +08:00
|
|
|
case Backend_EmitObj:
|
2010-06-08 07:27:59 +08:00
|
|
|
return CI.createDefaultOutputFile(true, InFile, "o");
|
2010-02-25 12:37:45 +08:00
|
|
|
}
|
2010-06-08 07:27:59 +08:00
|
|
|
|
2011-09-23 13:06:16 +08:00
|
|
|
llvm_unreachable("Invalid action!");
|
2010-06-08 07:27:59 +08:00
|
|
|
}
|
|
|
|
|
2014-08-11 03:56:51 +08:00
|
|
|
std::unique_ptr<ASTConsumer>
|
|
|
|
CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
|
2010-06-08 07:27:59 +08:00
|
|
|
BackendAction BA = static_cast<BackendAction>(Act);
|
2018-03-02 20:11:40 +08:00
|
|
|
std::unique_ptr<raw_pwrite_stream> OS = CI.takeOutputStream();
|
|
|
|
if (!OS)
|
|
|
|
OS = GetOutputStream(CI, InFile, BA);
|
|
|
|
|
2010-02-25 12:37:45 +08:00
|
|
|
if (BA != Backend_EmitNothing && !OS)
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2010-02-25 12:37:45 +08:00
|
|
|
|
2015-10-28 01:56:59 +08:00
|
|
|
// Load bitcode modules to link with, if we need to.
|
|
|
|
if (LinkModules.empty())
|
[CodeGen] [CUDA] Add the ability set default attrs on functions in linked modules.
Summary:
Now when you ask clang to link in a bitcode module, you can tell it to
set attributes on that module's functions to match what we would have
set if we'd emitted those functions ourselves.
This is particularly important for fast-math attributes in CUDA
compilations.
Each CUDA compilation links in libdevice, a bitcode library provided by
nvidia as part of the CUDA distribution. Without this patch, if we have
a user-function F that is compiled with -ffast-math that calls a
function G from libdevice, F will have the unsafe-fp-math=true (etc.)
attributes, but G will have no attributes.
Since F calls G, the inliner will merge G's attributes into F's. It
considers the lack of an unsafe-fp-math=true attribute on G to be
tantamount to unsafe-fp-math=false, so it "merges" these by setting
unsafe-fp-math=false on F.
This then continues up the call graph, until every function that
(transitively) calls something in libdevice gets unsafe-fp-math=false
set, thus disabling fastmath in almost all CUDA code.
Reviewers: echristo
Subscribers: hfinkel, llvm-commits, mehdi_amini
Differential Revision: https://reviews.llvm.org/D28538
llvm-svn: 293097
2017-01-26 05:29:48 +08:00
|
|
|
for (const CodeGenOptions::BitcodeFileToLink &F :
|
|
|
|
CI.getCodeGenOpts().LinkBitcodeFiles) {
|
|
|
|
auto BCBuf = CI.getFileManager().getBufferForFile(F.Filename);
|
2015-10-28 01:56:59 +08:00
|
|
|
if (!BCBuf) {
|
|
|
|
CI.getDiagnostics().Report(diag::err_cannot_open_file)
|
[CodeGen] [CUDA] Add the ability set default attrs on functions in linked modules.
Summary:
Now when you ask clang to link in a bitcode module, you can tell it to
set attributes on that module's functions to match what we would have
set if we'd emitted those functions ourselves.
This is particularly important for fast-math attributes in CUDA
compilations.
Each CUDA compilation links in libdevice, a bitcode library provided by
nvidia as part of the CUDA distribution. Without this patch, if we have
a user-function F that is compiled with -ffast-math that calls a
function G from libdevice, F will have the unsafe-fp-math=true (etc.)
attributes, but G will have no attributes.
Since F calls G, the inliner will merge G's attributes into F's. It
considers the lack of an unsafe-fp-math=true attribute on G to be
tantamount to unsafe-fp-math=false, so it "merges" these by setting
unsafe-fp-math=false on F.
This then continues up the call graph, until every function that
(transitively) calls something in libdevice gets unsafe-fp-math=false
set, thus disabling fastmath in almost all CUDA code.
Reviewers: echristo
Subscribers: hfinkel, llvm-commits, mehdi_amini
Differential Revision: https://reviews.llvm.org/D28538
llvm-svn: 293097
2017-01-26 05:29:48 +08:00
|
|
|
<< F.Filename << BCBuf.getError().message();
|
2015-10-28 01:56:59 +08:00
|
|
|
LinkModules.clear();
|
|
|
|
return nullptr;
|
|
|
|
}
|
2011-10-31 01:30:44 +08:00
|
|
|
|
2016-11-13 15:00:17 +08:00
|
|
|
Expected<std::unique_ptr<llvm::Module>> ModuleOrErr =
|
2016-11-08 14:03:43 +08:00
|
|
|
getOwningLazyBitcodeModule(std::move(*BCBuf), *VMContext);
|
2016-11-13 15:00:17 +08:00
|
|
|
if (!ModuleOrErr) {
|
|
|
|
handleAllErrors(ModuleOrErr.takeError(), [&](ErrorInfoBase &EIB) {
|
|
|
|
CI.getDiagnostics().Report(diag::err_cannot_open_file)
|
[CodeGen] [CUDA] Add the ability set default attrs on functions in linked modules.
Summary:
Now when you ask clang to link in a bitcode module, you can tell it to
set attributes on that module's functions to match what we would have
set if we'd emitted those functions ourselves.
This is particularly important for fast-math attributes in CUDA
compilations.
Each CUDA compilation links in libdevice, a bitcode library provided by
nvidia as part of the CUDA distribution. Without this patch, if we have
a user-function F that is compiled with -ffast-math that calls a
function G from libdevice, F will have the unsafe-fp-math=true (etc.)
attributes, but G will have no attributes.
Since F calls G, the inliner will merge G's attributes into F's. It
considers the lack of an unsafe-fp-math=true attribute on G to be
tantamount to unsafe-fp-math=false, so it "merges" these by setting
unsafe-fp-math=false on F.
This then continues up the call graph, until every function that
(transitively) calls something in libdevice gets unsafe-fp-math=false
set, thus disabling fastmath in almost all CUDA code.
Reviewers: echristo
Subscribers: hfinkel, llvm-commits, mehdi_amini
Differential Revision: https://reviews.llvm.org/D28538
llvm-svn: 293097
2017-01-26 05:29:48 +08:00
|
|
|
<< F.Filename << EIB.message();
|
2016-11-13 15:00:17 +08:00
|
|
|
});
|
2015-10-28 01:56:59 +08:00
|
|
|
LinkModules.clear();
|
|
|
|
return nullptr;
|
|
|
|
}
|
2017-03-14 02:08:11 +08:00
|
|
|
LinkModules.push_back({std::move(ModuleOrErr.get()), F.PropagateAttrs,
|
|
|
|
F.Internalize, F.LinkFlags});
|
2011-10-31 01:30:44 +08:00
|
|
|
}
|
|
|
|
|
2014-08-05 02:41:51 +08:00
|
|
|
CoverageSourceInfo *CoverageInfo = nullptr;
|
|
|
|
// Add the preprocessor callback only when the coverage mapping is generated.
|
2020-07-23 10:04:59 +08:00
|
|
|
if (CI.getCodeGenOpts().CoverageMapping)
|
|
|
|
CoverageInfo = CodeGen::CoverageMappingModuleGen::setUpCoverageCallbacks(
|
|
|
|
CI.getPreprocessor());
|
2015-10-28 01:56:59 +08:00
|
|
|
|
2014-08-11 05:06:17 +08:00
|
|
|
std::unique_ptr<BackendConsumer> Result(new BackendConsumer(
|
2015-06-30 10:26:03 +08:00
|
|
|
BA, CI.getDiagnostics(), CI.getHeaderSearchOpts(),
|
|
|
|
CI.getPreprocessorOpts(), CI.getCodeGenOpts(), CI.getTargetOpts(),
|
2020-12-03 02:18:18 +08:00
|
|
|
CI.getLangOpts(), std::string(InFile), std::move(LinkModules),
|
|
|
|
std::move(OS), *VMContext, CoverageInfo));
|
2014-08-11 03:56:51 +08:00
|
|
|
BEConsumer = Result.get();
|
2017-02-10 06:07:24 +08:00
|
|
|
|
|
|
|
// Enable generating macro debug info only when debug info is not disabled and
|
|
|
|
// also macro debug info is enabled.
|
|
|
|
if (CI.getCodeGenOpts().getDebugInfo() != codegenoptions::NoDebugInfo &&
|
|
|
|
CI.getCodeGenOpts().MacroDebugInfo) {
|
|
|
|
std::unique_ptr<PPCallbacks> Callbacks =
|
2019-08-15 07:04:18 +08:00
|
|
|
std::make_unique<MacroPPCallbacks>(BEConsumer->getCodeGenerator(),
|
2017-02-10 06:07:24 +08:00
|
|
|
CI.getPreprocessor());
|
|
|
|
CI.getPreprocessor().addPPCallbacks(std::move(Callbacks));
|
|
|
|
}
|
|
|
|
|
2014-08-11 03:56:51 +08:00
|
|
|
return std::move(Result);
|
2008-10-22 07:49:24 +08:00
|
|
|
}
|
2010-02-25 12:37:45 +08:00
|
|
|
|
2019-04-05 05:06:41 +08:00
|
|
|
std::unique_ptr<llvm::Module>
|
|
|
|
CodeGenAction::loadModule(MemoryBufferRef MBRef) {
|
2017-01-25 03:55:38 +08:00
|
|
|
CompilerInstance &CI = getCompilerInstance();
|
|
|
|
SourceManager &SM = CI.getSourceManager();
|
|
|
|
|
|
|
|
// For ThinLTO backend invocations, ensure that the context
|
2017-01-27 05:09:48 +08:00
|
|
|
// merges types based on ODR identifiers. We also need to read
|
|
|
|
// the correct module out of a multi-module bitcode file.
|
|
|
|
if (!CI.getCodeGenOpts().ThinLTOIndexFile.empty()) {
|
2017-01-25 03:55:38 +08:00
|
|
|
VMContext->enableDebugTypeODRUniquing();
|
|
|
|
|
2017-01-27 05:09:48 +08:00
|
|
|
auto DiagErrors = [&](Error E) -> std::unique_ptr<llvm::Module> {
|
|
|
|
unsigned DiagID =
|
|
|
|
CI.getDiagnostics().getCustomDiagID(DiagnosticsEngine::Error, "%0");
|
|
|
|
handleAllErrors(std::move(E), [&](ErrorInfoBase &EIB) {
|
|
|
|
CI.getDiagnostics().Report(DiagID) << EIB.message();
|
|
|
|
});
|
|
|
|
return {};
|
|
|
|
};
|
|
|
|
|
2018-02-17 07:34:16 +08:00
|
|
|
Expected<std::vector<BitcodeModule>> BMsOrErr = getBitcodeModuleList(MBRef);
|
|
|
|
if (!BMsOrErr)
|
|
|
|
return DiagErrors(BMsOrErr.takeError());
|
2020-09-17 03:08:15 +08:00
|
|
|
BitcodeModule *Bm = llvm::lto::findThinLTOModule(*BMsOrErr);
|
2018-02-17 07:34:16 +08:00
|
|
|
// We have nothing to do if the file contains no ThinLTO module. This is
|
|
|
|
// possible if ThinLTO compilation was not able to split module. Content of
|
|
|
|
// the file was already processed by indexing and will be passed to the
|
|
|
|
// linker using merged object file.
|
|
|
|
if (!Bm) {
|
2019-08-15 07:04:18 +08:00
|
|
|
auto M = std::make_unique<llvm::Module>("empty", *VMContext);
|
2018-02-17 07:34:16 +08:00
|
|
|
M->setTargetTriple(CI.getTargetOpts().Triple);
|
|
|
|
return M;
|
|
|
|
}
|
2017-01-27 05:09:48 +08:00
|
|
|
Expected<std::unique_ptr<llvm::Module>> MOrErr =
|
2018-02-17 07:34:16 +08:00
|
|
|
Bm->parseModule(*VMContext);
|
2017-01-27 05:09:48 +08:00
|
|
|
if (!MOrErr)
|
|
|
|
return DiagErrors(MOrErr.takeError());
|
|
|
|
return std::move(*MOrErr);
|
|
|
|
}
|
|
|
|
|
2017-01-25 03:55:38 +08:00
|
|
|
llvm::SMDiagnostic Err;
|
|
|
|
if (std::unique_ptr<llvm::Module> M = parseIR(MBRef, Err, *VMContext))
|
|
|
|
return M;
|
|
|
|
|
|
|
|
// Translate from the diagnostic info to the SourceManager location if
|
|
|
|
// available.
|
|
|
|
// TODO: Unify this with ConvertBackendLocation()
|
|
|
|
SourceLocation Loc;
|
|
|
|
if (Err.getLineNo() > 0) {
|
|
|
|
assert(Err.getColumnNo() >= 0);
|
|
|
|
Loc = SM.translateFileLineCol(SM.getFileEntryForID(SM.getMainFileID()),
|
|
|
|
Err.getLineNo(), Err.getColumnNo() + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Strip off a leading diagnostic code if there is one.
|
|
|
|
StringRef Msg = Err.getMessage();
|
|
|
|
if (Msg.startswith("error: "))
|
|
|
|
Msg = Msg.substr(7);
|
|
|
|
|
|
|
|
unsigned DiagID =
|
|
|
|
CI.getDiagnostics().getCustomDiagID(DiagnosticsEngine::Error, "%0");
|
|
|
|
|
|
|
|
CI.getDiagnostics().Report(Loc, DiagID) << Msg;
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2010-06-08 07:27:59 +08:00
|
|
|
void CodeGenAction::ExecuteAction() {
|
2020-12-24 08:24:43 +08:00
|
|
|
if (getCurrentFileKind().getLanguage() != Language::LLVM_IR) {
|
|
|
|
this->ASTFrontendAction::ExecuteAction();
|
|
|
|
return;
|
|
|
|
}
|
2010-06-08 07:27:59 +08:00
|
|
|
|
2020-12-24 08:24:43 +08:00
|
|
|
// If this is an IR file, we have to treat it specially.
|
|
|
|
BackendAction BA = static_cast<BackendAction>(Act);
|
|
|
|
CompilerInstance &CI = getCompilerInstance();
|
|
|
|
auto &CodeGenOpts = CI.getCodeGenOpts();
|
|
|
|
auto &Diagnostics = CI.getDiagnostics();
|
|
|
|
std::unique_ptr<raw_pwrite_stream> OS =
|
|
|
|
GetOutputStream(CI, getCurrentFile(), BA);
|
|
|
|
if (BA != Backend_EmitNothing && !OS)
|
|
|
|
return;
|
2017-01-25 03:55:38 +08:00
|
|
|
|
2020-12-24 08:24:43 +08:00
|
|
|
SourceManager &SM = CI.getSourceManager();
|
|
|
|
FileID FID = SM.getMainFileID();
|
|
|
|
Optional<MemoryBufferRef> MainFile = SM.getBufferOrNone(FID);
|
|
|
|
if (!MainFile)
|
|
|
|
return;
|
2010-06-08 07:27:59 +08:00
|
|
|
|
2020-12-24 08:24:43 +08:00
|
|
|
TheModule = loadModule(*MainFile);
|
|
|
|
if (!TheModule)
|
|
|
|
return;
|
2016-05-12 00:26:03 +08:00
|
|
|
|
2020-12-24 08:24:43 +08:00
|
|
|
const TargetOptions &TargetOpts = CI.getTargetOpts();
|
|
|
|
if (TheModule->getTargetTriple() != TargetOpts.Triple) {
|
|
|
|
Diagnostics.Report(SourceLocation(), diag::warn_fe_override_module)
|
|
|
|
<< TargetOpts.Triple;
|
|
|
|
TheModule->setTargetTriple(TargetOpts.Triple);
|
|
|
|
}
|
2019-12-05 23:23:47 +08:00
|
|
|
|
2021-12-04 04:48:36 +08:00
|
|
|
EmbedObject(TheModule.get(), CodeGenOpts, Diagnostics);
|
2020-12-24 08:24:43 +08:00
|
|
|
EmbedBitcode(TheModule.get(), CodeGenOpts, *MainFile);
|
|
|
|
|
|
|
|
LLVMContext &Ctx = TheModule->getContext();
|
|
|
|
|
2021-02-09 02:14:22 +08:00
|
|
|
// Restore any diagnostic handler previously set before returning from this
|
|
|
|
// function.
|
|
|
|
struct RAII {
|
|
|
|
LLVMContext &Ctx;
|
|
|
|
std::unique_ptr<DiagnosticHandler> PrevHandler = Ctx.getDiagnosticHandler();
|
|
|
|
~RAII() { Ctx.setDiagnosticHandler(std::move(PrevHandler)); }
|
|
|
|
} _{Ctx};
|
|
|
|
|
2020-12-24 08:24:43 +08:00
|
|
|
// Set clang diagnostic handler. To do this we need to create a fake
|
|
|
|
// BackendConsumer.
|
|
|
|
BackendConsumer Result(BA, CI.getDiagnostics(), CI.getHeaderSearchOpts(),
|
|
|
|
CI.getPreprocessorOpts(), CI.getCodeGenOpts(),
|
2021-08-21 08:14:32 +08:00
|
|
|
CI.getTargetOpts(), CI.getLangOpts(), TheModule.get(),
|
2020-12-24 08:24:43 +08:00
|
|
|
std::move(LinkModules), *VMContext, nullptr);
|
|
|
|
// PR44896: Force DiscardValueNames as false. DiscardValueNames cannot be
|
|
|
|
// true here because the valued names are needed for reading textual IR.
|
|
|
|
Ctx.setDiscardValueNames(false);
|
|
|
|
Ctx.setDiagnosticHandler(
|
|
|
|
std::make_unique<ClangDiagnosticHandler>(CodeGenOpts, &Result));
|
|
|
|
|
|
|
|
Expected<std::unique_ptr<llvm::ToolOutputFile>> OptRecordFileOrErr =
|
|
|
|
setupLLVMOptimizationRemarks(
|
|
|
|
Ctx, CodeGenOpts.OptRecordFile, CodeGenOpts.OptRecordPasses,
|
|
|
|
CodeGenOpts.OptRecordFormat, CodeGenOpts.DiagnosticsWithHotness,
|
|
|
|
CodeGenOpts.DiagnosticsHotnessThreshold);
|
|
|
|
|
|
|
|
if (Error E = OptRecordFileOrErr.takeError()) {
|
|
|
|
reportOptRecordError(std::move(E), Diagnostics, CodeGenOpts);
|
2010-06-08 07:27:59 +08:00
|
|
|
return;
|
|
|
|
}
|
2020-12-24 08:24:43 +08:00
|
|
|
std::unique_ptr<llvm::ToolOutputFile> OptRecordFile =
|
|
|
|
std::move(*OptRecordFileOrErr);
|
|
|
|
|
|
|
|
EmitBackendOutput(Diagnostics, CI.getHeaderSearchOpts(), CodeGenOpts,
|
|
|
|
TargetOpts, CI.getLangOpts(),
|
2021-04-20 01:39:20 +08:00
|
|
|
CI.getTarget().getDataLayoutString(), TheModule.get(), BA,
|
2020-12-24 08:24:43 +08:00
|
|
|
std::move(OS));
|
|
|
|
if (OptRecordFile)
|
|
|
|
OptRecordFile->keep();
|
2010-06-08 07:27:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
|
2011-12-20 10:48:34 +08:00
|
|
|
void EmitAssemblyAction::anchor() { }
|
2011-02-20 07:03:58 +08:00
|
|
|
EmitAssemblyAction::EmitAssemblyAction(llvm::LLVMContext *_VMContext)
|
|
|
|
: CodeGenAction(Backend_EmitAssembly, _VMContext) {}
|
2010-02-25 12:37:45 +08:00
|
|
|
|
2011-12-20 10:48:34 +08:00
|
|
|
void EmitBCAction::anchor() { }
|
2011-02-20 07:03:58 +08:00
|
|
|
EmitBCAction::EmitBCAction(llvm::LLVMContext *_VMContext)
|
|
|
|
: CodeGenAction(Backend_EmitBC, _VMContext) {}
|
2010-02-25 12:37:45 +08:00
|
|
|
|
2011-12-20 10:48:34 +08:00
|
|
|
void EmitLLVMAction::anchor() { }
|
2011-02-20 07:03:58 +08:00
|
|
|
EmitLLVMAction::EmitLLVMAction(llvm::LLVMContext *_VMContext)
|
|
|
|
: CodeGenAction(Backend_EmitLL, _VMContext) {}
|
2010-02-25 12:37:45 +08:00
|
|
|
|
2011-12-20 10:48:34 +08:00
|
|
|
void EmitLLVMOnlyAction::anchor() { }
|
2011-02-20 07:03:58 +08:00
|
|
|
EmitLLVMOnlyAction::EmitLLVMOnlyAction(llvm::LLVMContext *_VMContext)
|
|
|
|
: CodeGenAction(Backend_EmitNothing, _VMContext) {}
|
2010-02-25 12:37:45 +08:00
|
|
|
|
2011-12-20 10:48:34 +08:00
|
|
|
void EmitCodeGenOnlyAction::anchor() { }
|
2011-02-20 07:03:58 +08:00
|
|
|
EmitCodeGenOnlyAction::EmitCodeGenOnlyAction(llvm::LLVMContext *_VMContext)
|
|
|
|
: CodeGenAction(Backend_EmitMCNull, _VMContext) {}
|
2010-05-26 02:41:01 +08:00
|
|
|
|
2011-12-20 10:48:34 +08:00
|
|
|
void EmitObjAction::anchor() { }
|
2011-02-20 07:03:58 +08:00
|
|
|
EmitObjAction::EmitObjAction(llvm::LLVMContext *_VMContext)
|
|
|
|
: CodeGenAction(Backend_EmitObj, _VMContext) {}
|