2010-09-24 07:48:20 +08:00
|
|
|
//===--- Driver.cpp - Clang GCC Compatible Driver -------------------------===//
|
2009-03-03 03:59:07 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "clang/Driver/Driver.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "InputInfo.h"
|
|
|
|
#include "ToolChains.h"
|
|
|
|
#include "clang/Basic/Version.h"
|
2014-06-04 11:28:55 +08:00
|
|
|
#include "clang/Config/config.h"
|
2009-03-12 15:58:46 +08:00
|
|
|
#include "clang/Driver/Action.h"
|
2009-03-05 04:49:20 +08:00
|
|
|
#include "clang/Driver/Compilation.h"
|
2009-03-12 16:55:43 +08:00
|
|
|
#include "clang/Driver/DriverDiagnostic.h"
|
2009-03-16 14:56:51 +08:00
|
|
|
#include "clang/Driver/Job.h"
|
2009-03-05 04:49:20 +08:00
|
|
|
#include "clang/Driver/Options.h"
|
2015-02-21 04:30:56 +08:00
|
|
|
#include "clang/Driver/SanitizerArgs.h"
|
2009-03-16 14:56:51 +08:00
|
|
|
#include "clang/Driver/Tool.h"
|
|
|
|
#include "clang/Driver/ToolChain.h"
|
2011-03-23 12:04:01 +08:00
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
2013-07-27 08:23:45 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2014-06-19 01:21:50 +08:00
|
|
|
#include "llvm/ADT/StringExtras.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "llvm/ADT/StringSet.h"
|
2013-07-19 04:29:38 +08:00
|
|
|
#include "llvm/ADT/StringSwitch.h"
|
2013-06-15 01:17:23 +08:00
|
|
|
#include "llvm/Option/Arg.h"
|
|
|
|
#include "llvm/Option/ArgList.h"
|
2014-01-07 19:51:46 +08:00
|
|
|
#include "llvm/Option/OptSpecifier.h"
|
2013-06-15 01:17:23 +08:00
|
|
|
#include "llvm/Option/OptTable.h"
|
|
|
|
#include "llvm/Option/Option.h"
|
2013-02-05 15:29:57 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2011-09-23 13:57:42 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2010-12-18 05:22:22 +08:00
|
|
|
#include "llvm/Support/FileSystem.h"
|
2010-11-30 02:12:39 +08:00
|
|
|
#include "llvm/Support/Path.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "llvm/Support/PrettyStackTrace.h"
|
2014-06-19 01:21:50 +08:00
|
|
|
#include "llvm/Support/Process.h"
|
2010-11-30 02:12:39 +08:00
|
|
|
#include "llvm/Support/Program.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2012-02-02 08:40:14 +08:00
|
|
|
#include <map>
|
2014-03-09 19:36:40 +08:00
|
|
|
#include <memory>
|
2012-02-01 22:25:28 +08:00
|
|
|
|
2009-03-05 04:49:20 +08:00
|
|
|
using namespace clang::driver;
|
2009-03-26 13:56:24 +08:00
|
|
|
using namespace clang;
|
2013-06-15 01:17:23 +08:00
|
|
|
using namespace llvm::opt;
|
2009-03-05 04:49:20 +08:00
|
|
|
|
2015-02-03 06:41:48 +08:00
|
|
|
Driver::Driver(StringRef ClangExecutable, StringRef DefaultTargetTriple,
|
2011-09-26 07:23:43 +08:00
|
|
|
DiagnosticsEngine &Diags)
|
2015-02-03 06:41:48 +08:00
|
|
|
: Opts(createDriverOptTable()), Diags(Diags), Mode(GCCMode),
|
|
|
|
SaveTemps(SaveTempsNone), ClangExecutable(ClangExecutable),
|
|
|
|
SysRoot(DEFAULT_SYSROOT), UseStdLib(true),
|
|
|
|
DefaultTargetTriple(DefaultTargetTriple),
|
|
|
|
DriverTitle("clang LLVM compiler"), CCPrintOptionsFilename(nullptr),
|
|
|
|
CCPrintHeadersFilename(nullptr), CCLogDiagnosticsFilename(nullptr),
|
|
|
|
CCCPrintBindings(false), CCPrintHeaders(false), CCLogDiagnostics(false),
|
|
|
|
CCGenDiagnostics(false), CCCGenericGCCName(""), CheckInputsExist(true),
|
|
|
|
CCCUsePCH(true), SuppressMissingInputWarning(false) {
|
2010-01-20 10:35:16 +08:00
|
|
|
|
2015-03-04 04:43:12 +08:00
|
|
|
Name = llvm::sys::path::filename(ClangExecutable);
|
2015-06-26 23:47:46 +08:00
|
|
|
Dir = llvm::sys::path::parent_path(ClangExecutable);
|
2013-03-23 13:17:59 +08:00
|
|
|
|
|
|
|
// Compute the path to the resource directory.
|
|
|
|
StringRef ClangResourceDir(CLANG_RESOURCE_DIR);
|
|
|
|
SmallString<128> P(Dir);
|
2014-12-29 20:09:08 +08:00
|
|
|
if (ClangResourceDir != "") {
|
2013-03-23 13:17:59 +08:00
|
|
|
llvm::sys::path::append(P, ClangResourceDir);
|
2014-12-29 20:09:08 +08:00
|
|
|
} else {
|
|
|
|
StringRef ClangLibdirSuffix(CLANG_LIBDIR_SUFFIX);
|
|
|
|
llvm::sys::path::append(P, "..", Twine("lib") + ClangLibdirSuffix, "clang",
|
|
|
|
CLANG_VERSION_STRING);
|
|
|
|
}
|
2013-03-23 13:17:59 +08:00
|
|
|
ResourceDir = P.str();
|
2009-03-03 03:59:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Driver::~Driver() {
|
2009-03-05 04:49:20 +08:00
|
|
|
delete Opts;
|
Delete the driver's HostInfo class. This abstraction just never really
did anything. The two big pieces of functionality it tried to provide
was to cache the ToolChain objects for each target, and to figure out
the exact target based on the flag set coming in to an invocation.
However, it had a lot of flaws even with those goals:
- Neither of these have anything to do with the host, or its info.
- The HostInfo class was setup as a full blown class *hierarchy* with
a separate implementation for each "host" OS. This required
dispatching just to create the objects in the first place.
- The hierarchy claimed to represent the host, when in fact it was
based on the target OS.
- Each leaf in the hierarchy was responsible for implementing the flag
processing and caching, resulting in a *lot* of copy-paste code and
quite a few bugs.
- The caching was consistently done based on architecture alone, even
though *any* aspect of the targeted triple might change the behavior
of the configured toolchain.
- Flag processing was already being done in the Driver proper,
separating the flag handling even more than it already is.
Instead of this, we can simply have the dispatch logic in the Driver
which previously created a HostInfo object create the ToolChain objects.
Adding caching in the Driver layer is a tiny amount of code. Finally,
pulling the flag processing into the Driver puts it where it belongs and
consolidates it in one location.
The result is that two functions, and maybe 100 lines of new code
replace over 10 classes and 800 lines of code. Woot.
This also paves the way to introduce more detailed ToolChain objects for
various OSes without threading through a new HostInfo type as well, and
the accompanying boiler plate. That, of course, was the yak I started to
shave that began this entire refactoring escapade. Wheee!
llvm-svn: 148950
2012-01-25 19:01:57 +08:00
|
|
|
|
2014-02-20 07:44:52 +08:00
|
|
|
llvm::DeleteContainerSeconds(ToolChains);
|
2009-03-03 03:59:07 +08:00
|
|
|
}
|
|
|
|
|
2013-07-19 04:29:38 +08:00
|
|
|
void Driver::ParseDriverMode(ArrayRef<const char *> Args) {
|
|
|
|
const std::string OptName =
|
2015-06-26 23:47:46 +08:00
|
|
|
getOpts().getOption(options::OPT_driver_mode).getPrefixedName();
|
2013-07-19 04:29:38 +08:00
|
|
|
|
2015-06-26 02:51:37 +08:00
|
|
|
for (const char *ArgPtr : Args) {
|
2014-08-23 03:29:30 +08:00
|
|
|
// Ingore nullptrs, they are response file's EOL markers
|
2015-06-26 02:51:37 +08:00
|
|
|
if (ArgPtr == nullptr)
|
2014-08-23 03:29:30 +08:00
|
|
|
continue;
|
2015-06-26 02:51:37 +08:00
|
|
|
const StringRef Arg = ArgPtr;
|
2013-07-19 04:29:38 +08:00
|
|
|
if (!Arg.startswith(OptName))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const StringRef Value = Arg.drop_front(OptName.size());
|
|
|
|
const unsigned M = llvm::StringSwitch<unsigned>(Value)
|
2015-06-26 23:47:46 +08:00
|
|
|
.Case("gcc", GCCMode)
|
|
|
|
.Case("g++", GXXMode)
|
|
|
|
.Case("cpp", CPPMode)
|
|
|
|
.Case("cl", CLMode)
|
|
|
|
.Default(~0U);
|
2013-07-19 04:29:38 +08:00
|
|
|
|
|
|
|
if (M != ~0U)
|
|
|
|
Mode = static_cast<DriverMode>(M);
|
|
|
|
else
|
|
|
|
Diag(diag::err_drv_unsupported_option_argument) << OptName << Value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-23 06:07:27 +08:00
|
|
|
InputArgList Driver::ParseArgStrings(ArrayRef<const char *> ArgStrings) {
|
2009-03-18 09:38:48 +08:00
|
|
|
llvm::PrettyStackTraceString CrashInfo("Command line argument parsing");
|
2013-07-27 08:23:45 +08:00
|
|
|
|
|
|
|
unsigned IncludedFlagsBitmask;
|
|
|
|
unsigned ExcludedFlagsBitmask;
|
2014-03-02 21:01:17 +08:00
|
|
|
std::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
|
2015-06-26 23:47:46 +08:00
|
|
|
getIncludeExcludeOptionFlagMasks();
|
2013-07-27 08:23:45 +08:00
|
|
|
|
2009-11-19 14:35:06 +08:00
|
|
|
unsigned MissingArgIndex, MissingArgCount;
|
2015-06-23 06:07:27 +08:00
|
|
|
InputArgList Args =
|
2015-06-21 14:32:36 +08:00
|
|
|
getOpts().ParseArgs(ArgStrings, MissingArgIndex, MissingArgCount,
|
|
|
|
IncludedFlagsBitmask, ExcludedFlagsBitmask);
|
2009-11-19 14:35:06 +08:00
|
|
|
|
|
|
|
// Check for missing argument error.
|
|
|
|
if (MissingArgCount)
|
|
|
|
Diag(clang::diag::err_drv_missing_argument)
|
2015-06-23 06:07:27 +08:00
|
|
|
<< Args.getArgString(MissingArgIndex) << MissingArgCount;
|
2009-11-19 14:35:06 +08:00
|
|
|
|
|
|
|
// Check for unsupported options.
|
2015-06-23 06:07:27 +08:00
|
|
|
for (const Arg *A : Args) {
|
2012-10-20 06:37:06 +08:00
|
|
|
if (A->getOption().hasFlag(options::Unsupported)) {
|
2015-06-23 06:07:27 +08:00
|
|
|
Diag(clang::diag::err_drv_unsupported_opt) << A->getAsString(Args);
|
2009-03-23 07:26:43 +08:00
|
|
|
continue;
|
|
|
|
}
|
2012-02-23 01:55:22 +08:00
|
|
|
|
|
|
|
// Warn about -mcpu= without an argument.
|
2015-06-26 23:47:46 +08:00
|
|
|
if (A->getOption().matches(options::OPT_mcpu_EQ) && A->containsValue("")) {
|
2015-06-23 06:07:27 +08:00
|
|
|
Diag(clang::diag::warn_drv_empty_joined_argument) << A->getAsString(Args);
|
2012-02-23 01:55:22 +08:00
|
|
|
}
|
2009-03-05 14:38:47 +08:00
|
|
|
}
|
|
|
|
|
2015-06-23 06:07:27 +08:00
|
|
|
for (const Arg *A : Args.filtered(options::OPT_UNKNOWN))
|
|
|
|
Diags.Report(diag::err_drv_unknown_argument) << A->getAsString(Args);
|
2013-09-24 07:55:25 +08:00
|
|
|
|
2009-03-05 14:38:47 +08:00
|
|
|
return Args;
|
|
|
|
}
|
|
|
|
|
2011-07-28 07:36:45 +08:00
|
|
|
// Determine which compilation mode we are in. We look for options which
|
|
|
|
// affect the phase, starting with the earliest phases, and record which
|
|
|
|
// option we used to determine the final phase.
|
2015-06-26 23:47:46 +08:00
|
|
|
phases::ID Driver::getFinalPhase(const DerivedArgList &DAL,
|
|
|
|
Arg **FinalPhaseArg) const {
|
2014-05-18 00:56:41 +08:00
|
|
|
Arg *PhaseArg = nullptr;
|
2011-07-28 07:36:45 +08:00
|
|
|
phases::ID FinalPhase;
|
2011-08-18 06:59:59 +08:00
|
|
|
|
2014-06-14 04:59:54 +08:00
|
|
|
// -{E,EP,P,M,MM} only run the preprocessor.
|
2015-06-26 23:47:46 +08:00
|
|
|
if (CCCIsCPP() || (PhaseArg = DAL.getLastArg(options::OPT_E)) ||
|
2014-06-14 04:59:54 +08:00
|
|
|
(PhaseArg = DAL.getLastArg(options::OPT__SLASH_EP)) ||
|
2013-12-21 02:40:46 +08:00
|
|
|
(PhaseArg = DAL.getLastArg(options::OPT_M, options::OPT_MM)) ||
|
|
|
|
(PhaseArg = DAL.getLastArg(options::OPT__SLASH_P))) {
|
2011-07-28 07:36:45 +08:00
|
|
|
FinalPhase = phases::Preprocess;
|
2011-08-18 06:59:59 +08:00
|
|
|
|
Reapply "Change -save-temps to emit unoptimized bitcode files."
This reapplies r224503 along with a fix for compiling Fortran by having the
clang driver invoke gcc (see r224546, where it was reverted). I have added
a testcase for that as well.
Original commit message:
It is often convenient to use -save-temps to collect the intermediate
results of a compilation, e.g., when triaging a bug report. Besides the
temporary files for preprocessed source and assembly code, this adds the
unoptimized bitcode files as well.
This adds a new BackendJobAction, which is mostly mechanical, to run after
the CompileJobAction. When not using -save-temps, the BackendJobAction is
combined into one job with the CompileJobAction, similar to the way the
integrated assembler is handled. I've implemented this entirely as a
driver change, so under the hood, it is just using -disable-llvm-optzns
to get the unoptimized bitcode.
Based in part on a patch by Steven Wu.
rdar://problem/18909437
llvm-svn: 224688
2014-12-21 15:00:00 +08:00
|
|
|
// -{fsyntax-only,-analyze,emit-ast} only run up to the compiler.
|
2011-07-28 07:36:45 +08:00
|
|
|
} else if ((PhaseArg = DAL.getLastArg(options::OPT_fsyntax_only)) ||
|
2013-03-28 00:47:18 +08:00
|
|
|
(PhaseArg = DAL.getLastArg(options::OPT_module_file_info)) ||
|
2014-02-06 06:21:15 +08:00
|
|
|
(PhaseArg = DAL.getLastArg(options::OPT_verify_pch)) ||
|
2011-07-28 07:36:45 +08:00
|
|
|
(PhaseArg = DAL.getLastArg(options::OPT_rewrite_objc)) ||
|
2012-04-02 23:59:19 +08:00
|
|
|
(PhaseArg = DAL.getLastArg(options::OPT_rewrite_legacy_objc)) ||
|
2012-03-07 04:06:33 +08:00
|
|
|
(PhaseArg = DAL.getLastArg(options::OPT__migrate)) ||
|
2011-07-28 07:36:45 +08:00
|
|
|
(PhaseArg = DAL.getLastArg(options::OPT__analyze,
|
2012-03-07 07:14:35 +08:00
|
|
|
options::OPT__analyze_auto)) ||
|
Reapply "Change -save-temps to emit unoptimized bitcode files."
This reapplies r224503 along with a fix for compiling Fortran by having the
clang driver invoke gcc (see r224546, where it was reverted). I have added
a testcase for that as well.
Original commit message:
It is often convenient to use -save-temps to collect the intermediate
results of a compilation, e.g., when triaging a bug report. Besides the
temporary files for preprocessed source and assembly code, this adds the
unoptimized bitcode files as well.
This adds a new BackendJobAction, which is mostly mechanical, to run after
the CompileJobAction. When not using -save-temps, the BackendJobAction is
combined into one job with the CompileJobAction, similar to the way the
integrated assembler is handled. I've implemented this entirely as a
driver change, so under the hood, it is just using -disable-llvm-optzns
to get the unoptimized bitcode.
Based in part on a patch by Steven Wu.
rdar://problem/18909437
llvm-svn: 224688
2014-12-21 15:00:00 +08:00
|
|
|
(PhaseArg = DAL.getLastArg(options::OPT_emit_ast))) {
|
2011-07-28 07:36:45 +08:00
|
|
|
FinalPhase = phases::Compile;
|
|
|
|
|
Reapply "Change -save-temps to emit unoptimized bitcode files."
This reapplies r224503 along with a fix for compiling Fortran by having the
clang driver invoke gcc (see r224546, where it was reverted). I have added
a testcase for that as well.
Original commit message:
It is often convenient to use -save-temps to collect the intermediate
results of a compilation, e.g., when triaging a bug report. Besides the
temporary files for preprocessed source and assembly code, this adds the
unoptimized bitcode files as well.
This adds a new BackendJobAction, which is mostly mechanical, to run after
the CompileJobAction. When not using -save-temps, the BackendJobAction is
combined into one job with the CompileJobAction, similar to the way the
integrated assembler is handled. I've implemented this entirely as a
driver change, so under the hood, it is just using -disable-llvm-optzns
to get the unoptimized bitcode.
Based in part on a patch by Steven Wu.
rdar://problem/18909437
llvm-svn: 224688
2014-12-21 15:00:00 +08:00
|
|
|
// -S only runs up to the backend.
|
|
|
|
} else if ((PhaseArg = DAL.getLastArg(options::OPT_S))) {
|
|
|
|
FinalPhase = phases::Backend;
|
|
|
|
|
2015-07-14 07:27:56 +08:00
|
|
|
// -c and partial CUDA compilations only run up to the assembler.
|
|
|
|
} else if ((PhaseArg = DAL.getLastArg(options::OPT_c)) ||
|
|
|
|
(PhaseArg = DAL.getLastArg(options::OPT_cuda_device_only)) ||
|
|
|
|
(PhaseArg = DAL.getLastArg(options::OPT_cuda_host_only))) {
|
2011-07-28 07:36:45 +08:00
|
|
|
FinalPhase = phases::Assemble;
|
|
|
|
|
|
|
|
// Otherwise do everything.
|
|
|
|
} else
|
|
|
|
FinalPhase = phases::Link;
|
|
|
|
|
|
|
|
if (FinalPhaseArg)
|
|
|
|
*FinalPhaseArg = PhaseArg;
|
|
|
|
|
|
|
|
return FinalPhase;
|
|
|
|
}
|
|
|
|
|
2015-06-26 23:47:46 +08:00
|
|
|
static Arg *MakeInputArg(DerivedArgList &Args, OptTable *Opts,
|
2013-08-14 05:32:29 +08:00
|
|
|
StringRef Value) {
|
|
|
|
Arg *A = new Arg(Opts->getOption(options::OPT_INPUT), Value,
|
|
|
|
Args.getBaseArgs().MakeIndex(Value), Value.data());
|
2014-05-03 06:55:30 +08:00
|
|
|
Args.AddSynthesizedArg(A);
|
2013-08-14 05:32:29 +08:00
|
|
|
A->claim();
|
|
|
|
return A;
|
|
|
|
}
|
|
|
|
|
2010-06-12 06:00:26 +08:00
|
|
|
DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
|
|
|
|
DerivedArgList *DAL = new DerivedArgList(Args);
|
|
|
|
|
2010-09-17 08:45:02 +08:00
|
|
|
bool HasNostdlib = Args.hasArg(options::OPT_nostdlib);
|
2014-12-30 03:01:36 +08:00
|
|
|
for (Arg *A : Args) {
|
2010-06-15 05:23:12 +08:00
|
|
|
// Unfortunately, we have to parse some forwarding options (-Xassembler,
|
|
|
|
// -Xlinker, -Xpreprocessor) because we either integrate their functionality
|
|
|
|
// (assembler and preprocessor), or bypass a previous driver ('collect2').
|
2010-06-15 05:37:09 +08:00
|
|
|
|
|
|
|
// Rewrite linker options, to replace --no-demangle with a custom internal
|
|
|
|
// option.
|
|
|
|
if ((A->getOption().matches(options::OPT_Wl_COMMA) ||
|
|
|
|
A->getOption().matches(options::OPT_Xlinker)) &&
|
|
|
|
A->containsValue("--no-demangle")) {
|
2010-06-15 05:23:12 +08:00
|
|
|
// Add the rewritten no-demangle argument.
|
|
|
|
DAL->AddFlagArg(A, Opts->getOption(options::OPT_Z_Xlinker__no_demangle));
|
|
|
|
|
|
|
|
// Add the remaining values as Xlinker arguments.
|
2015-07-01 03:32:57 +08:00
|
|
|
for (const StringRef Val : A->getValues())
|
|
|
|
if (Val != "--no-demangle")
|
|
|
|
DAL->AddSeparateArg(A, Opts->getOption(options::OPT_Xlinker), Val);
|
2010-06-15 05:23:12 +08:00
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2010-06-15 05:37:09 +08:00
|
|
|
// Rewrite preprocessor options, to replace -Wp,-MD,FOO which is used by
|
|
|
|
// some build systems. We don't try to be complete here because we don't
|
|
|
|
// care to encourage this usage model.
|
|
|
|
if (A->getOption().matches(options::OPT_Wp_COMMA) &&
|
2012-11-01 12:30:05 +08:00
|
|
|
(A->getValue(0) == StringRef("-MD") ||
|
|
|
|
A->getValue(0) == StringRef("-MMD"))) {
|
2010-06-16 04:30:18 +08:00
|
|
|
// Rewrite to -MD/-MMD along with -MF.
|
2012-11-01 12:30:05 +08:00
|
|
|
if (A->getValue(0) == StringRef("-MD"))
|
2010-06-16 04:30:18 +08:00
|
|
|
DAL->AddFlagArg(A, Opts->getOption(options::OPT_MD));
|
|
|
|
else
|
|
|
|
DAL->AddFlagArg(A, Opts->getOption(options::OPT_MMD));
|
2012-11-08 07:37:14 +08:00
|
|
|
if (A->getNumValues() == 2)
|
|
|
|
DAL->AddSeparateArg(A, Opts->getOption(options::OPT_MF),
|
|
|
|
A->getValue(1));
|
2010-06-15 05:37:09 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2010-09-18 02:39:08 +08:00
|
|
|
// Rewrite reserved library names.
|
|
|
|
if (A->getOption().matches(options::OPT_l)) {
|
2012-11-01 12:30:05 +08:00
|
|
|
StringRef Value = A->getValue();
|
2010-09-17 08:45:02 +08:00
|
|
|
|
2010-09-18 02:39:08 +08:00
|
|
|
// Rewrite unless -nostdlib is present.
|
|
|
|
if (!HasNostdlib && Value == "stdc++") {
|
2015-06-26 23:47:46 +08:00
|
|
|
DAL->AddFlagArg(A, Opts->getOption(options::OPT_Z_reserved_lib_stdcxx));
|
2010-09-17 08:45:02 +08:00
|
|
|
continue;
|
|
|
|
}
|
2010-09-18 02:39:08 +08:00
|
|
|
|
|
|
|
// Rewrite unconditionally.
|
|
|
|
if (Value == "cc_kext") {
|
2015-06-26 23:47:46 +08:00
|
|
|
DAL->AddFlagArg(A, Opts->getOption(options::OPT_Z_reserved_lib_cckext));
|
2010-09-18 02:39:08 +08:00
|
|
|
continue;
|
|
|
|
}
|
2010-09-17 08:45:02 +08:00
|
|
|
}
|
|
|
|
|
2013-08-14 05:32:29 +08:00
|
|
|
// Pick up inputs via the -- option.
|
|
|
|
if (A->getOption().matches(options::OPT__DASH_DASH)) {
|
|
|
|
A->claim();
|
2015-07-01 03:32:57 +08:00
|
|
|
for (const StringRef Val : A->getValues())
|
|
|
|
DAL->append(MakeInputArg(*DAL, Opts, Val));
|
2013-08-14 05:32:29 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-12-30 03:01:36 +08:00
|
|
|
DAL->append(A);
|
2010-06-15 05:23:12 +08:00
|
|
|
}
|
2010-06-12 06:00:26 +08:00
|
|
|
|
2015-06-26 23:47:46 +08:00
|
|
|
// Add a default value of -mlinker-version=, if one was given and the user
|
|
|
|
// didn't specify one.
|
2010-08-12 08:05:12 +08:00
|
|
|
#if defined(HOST_LINK_VERSION)
|
2015-06-13 03:21:35 +08:00
|
|
|
if (!Args.hasArg(options::OPT_mlinker_version_EQ) &&
|
|
|
|
strlen(HOST_LINK_VERSION) > 0) {
|
2010-08-12 08:05:12 +08:00
|
|
|
DAL->AddJoinedArg(0, Opts->getOption(options::OPT_mlinker_version_EQ),
|
|
|
|
HOST_LINK_VERSION);
|
2010-08-18 06:32:45 +08:00
|
|
|
DAL->getLastArg(options::OPT_mlinker_version_EQ)->claim();
|
2010-08-12 08:05:12 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-06-12 06:00:26 +08:00
|
|
|
return DAL;
|
|
|
|
}
|
|
|
|
|
2015-07-11 03:47:55 +08:00
|
|
|
/// \brief Compute target triple from args.
|
|
|
|
///
|
|
|
|
/// This routine provides the logic to compute a target triple from various
|
|
|
|
/// args passed to the driver and the default triple string.
|
|
|
|
static llvm::Triple computeTargetTriple(StringRef DefaultTargetTriple,
|
|
|
|
const ArgList &Args,
|
|
|
|
StringRef DarwinArchName = "") {
|
|
|
|
// FIXME: Already done in Compilation *Driver::BuildCompilation
|
|
|
|
if (const Arg *A = Args.getLastArg(options::OPT_target))
|
|
|
|
DefaultTargetTriple = A->getValue();
|
|
|
|
|
|
|
|
llvm::Triple Target(llvm::Triple::normalize(DefaultTargetTriple));
|
|
|
|
|
|
|
|
// Handle Apple-specific options available here.
|
|
|
|
if (Target.isOSBinFormatMachO()) {
|
|
|
|
// If an explict Darwin arch name is given, that trumps all.
|
|
|
|
if (!DarwinArchName.empty()) {
|
|
|
|
tools::darwin::setTripleTypeForMachOArchName(Target, DarwinArchName);
|
|
|
|
return Target;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle the Darwin '-arch' flag.
|
|
|
|
if (Arg *A = Args.getLastArg(options::OPT_arch)) {
|
|
|
|
StringRef ArchName = A->getValue();
|
|
|
|
tools::darwin::setTripleTypeForMachOArchName(Target, ArchName);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle pseudo-target flags '-mlittle-endian'/'-EL' and
|
|
|
|
// '-mbig-endian'/'-EB'.
|
|
|
|
if (Arg *A = Args.getLastArg(options::OPT_mlittle_endian,
|
|
|
|
options::OPT_mbig_endian)) {
|
|
|
|
if (A->getOption().matches(options::OPT_mlittle_endian)) {
|
|
|
|
llvm::Triple LE = Target.getLittleEndianArchVariant();
|
|
|
|
if (LE.getArch() != llvm::Triple::UnknownArch)
|
|
|
|
Target = std::move(LE);
|
|
|
|
} else {
|
|
|
|
llvm::Triple BE = Target.getBigEndianArchVariant();
|
|
|
|
if (BE.getArch() != llvm::Triple::UnknownArch)
|
|
|
|
Target = std::move(BE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Skip further flag support on OSes which don't support '-m32' or '-m64'.
|
|
|
|
if (Target.getArchName() == "tce" || Target.getOS() == llvm::Triple::Minix)
|
|
|
|
return Target;
|
|
|
|
|
|
|
|
// Handle pseudo-target flags '-m64', '-mx32', '-m32' and '-m16'.
|
|
|
|
if (Arg *A = Args.getLastArg(options::OPT_m64, options::OPT_mx32,
|
|
|
|
options::OPT_m32, options::OPT_m16)) {
|
|
|
|
llvm::Triple::ArchType AT = llvm::Triple::UnknownArch;
|
|
|
|
|
|
|
|
if (A->getOption().matches(options::OPT_m64)) {
|
|
|
|
AT = Target.get64BitArchVariant().getArch();
|
|
|
|
if (Target.getEnvironment() == llvm::Triple::GNUX32)
|
|
|
|
Target.setEnvironment(llvm::Triple::GNU);
|
|
|
|
} else if (A->getOption().matches(options::OPT_mx32) &&
|
|
|
|
Target.get64BitArchVariant().getArch() == llvm::Triple::x86_64) {
|
|
|
|
AT = llvm::Triple::x86_64;
|
|
|
|
Target.setEnvironment(llvm::Triple::GNUX32);
|
|
|
|
} else if (A->getOption().matches(options::OPT_m32)) {
|
|
|
|
AT = Target.get32BitArchVariant().getArch();
|
|
|
|
if (Target.getEnvironment() == llvm::Triple::GNUX32)
|
|
|
|
Target.setEnvironment(llvm::Triple::GNU);
|
|
|
|
} else if (A->getOption().matches(options::OPT_m16) &&
|
|
|
|
Target.get32BitArchVariant().getArch() == llvm::Triple::x86) {
|
|
|
|
AT = llvm::Triple::x86;
|
|
|
|
Target.setEnvironment(llvm::Triple::CODE16);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (AT != llvm::Triple::UnknownArch && AT != Target.getArch())
|
|
|
|
Target.setArch(AT);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Target;
|
|
|
|
}
|
|
|
|
|
2011-07-24 01:14:25 +08:00
|
|
|
Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
|
2009-03-18 09:38:48 +08:00
|
|
|
llvm::PrettyStackTraceString CrashInfo("Compilation construction");
|
|
|
|
|
2011-08-18 06:59:59 +08:00
|
|
|
// FIXME: Handle environment options which affect driver behavior, somewhere
|
2012-03-13 05:24:57 +08:00
|
|
|
// (client?). GCC_EXEC_PREFIX, LPATH, CC_PRINT_OPTIONS.
|
2011-09-14 08:47:55 +08:00
|
|
|
|
|
|
|
if (char *env = ::getenv("COMPILER_PATH")) {
|
|
|
|
StringRef CompilerPath = env;
|
|
|
|
while (!CompilerPath.empty()) {
|
2015-06-26 23:47:46 +08:00
|
|
|
std::pair<StringRef, StringRef> Split =
|
|
|
|
CompilerPath.split(llvm::sys::EnvPathSeparator);
|
2011-09-14 08:47:55 +08:00
|
|
|
PrefixDirs.push_back(Split.first);
|
|
|
|
CompilerPath = Split.second;
|
|
|
|
}
|
|
|
|
}
|
2009-03-13 08:51:18 +08:00
|
|
|
|
2013-07-19 04:29:38 +08:00
|
|
|
// We look for the driver mode option early, because the mode can affect
|
|
|
|
// how other options are parsed.
|
|
|
|
ParseDriverMode(ArgList.slice(1));
|
|
|
|
|
2009-03-13 08:51:18 +08:00
|
|
|
// FIXME: What are we going to do with -V and -b?
|
|
|
|
|
2009-09-09 07:36:43 +08:00
|
|
|
// FIXME: This stuff needs to go into the Compilation, not the driver.
|
2015-06-26 03:37:41 +08:00
|
|
|
bool CCCPrintPhases;
|
2009-03-05 14:38:47 +08:00
|
|
|
|
2015-06-23 06:07:27 +08:00
|
|
|
InputArgList Args = ParseArgStrings(ArgList.slice(1));
|
2009-12-05 05:55:23 +08:00
|
|
|
|
2015-07-18 14:35:24 +08:00
|
|
|
// Silence driver warnings if requested
|
|
|
|
Diags.setIgnoreAllWarnings(Args.hasArg(options::OPT_w));
|
|
|
|
|
2009-12-08 02:28:29 +08:00
|
|
|
// -no-canonical-prefixes is used very early in main.
|
2015-06-23 06:07:27 +08:00
|
|
|
Args.ClaimAllArgs(options::OPT_no_canonical_prefixes);
|
2009-12-08 02:28:29 +08:00
|
|
|
|
2010-08-02 10:38:03 +08:00
|
|
|
// Ignore -pipe.
|
2015-06-23 06:07:27 +08:00
|
|
|
Args.ClaimAllArgs(options::OPT_pipe);
|
2010-08-02 10:38:03 +08:00
|
|
|
|
2009-12-05 05:55:23 +08:00
|
|
|
// Extract -ccc args.
|
2009-03-11 04:52:46 +08:00
|
|
|
//
|
2009-09-09 07:36:43 +08:00
|
|
|
// FIXME: We need to figure out where this behavior should live. Most of it
|
|
|
|
// should be outside in the client; the parts that aren't should have proper
|
|
|
|
// options, either by introducing new ones or by overloading gcc ones like -V
|
|
|
|
// or -b.
|
2015-06-26 03:37:41 +08:00
|
|
|
CCCPrintPhases = Args.hasArg(options::OPT_ccc_print_phases);
|
2015-06-23 06:07:27 +08:00
|
|
|
CCCPrintBindings = Args.hasArg(options::OPT_ccc_print_bindings);
|
|
|
|
if (const Arg *A = Args.getLastArg(options::OPT_ccc_gcc_name))
|
2012-11-01 12:30:05 +08:00
|
|
|
CCCGenericGCCName = A->getValue();
|
2015-06-23 06:07:27 +08:00
|
|
|
CCCUsePCH =
|
|
|
|
Args.hasFlag(options::OPT_ccc_pch_is_pch, options::OPT_ccc_pch_is_pth);
|
2012-02-23 03:15:16 +08:00
|
|
|
// FIXME: DefaultTargetTriple is used by the target-prefixed calls to as/ld
|
|
|
|
// and getToolChain is const.
|
2013-08-14 07:38:57 +08:00
|
|
|
if (IsCLMode()) {
|
2014-03-28 09:19:04 +08:00
|
|
|
// clang-cl targets MSVC-style Win32.
|
2013-08-14 07:38:57 +08:00
|
|
|
llvm::Triple T(DefaultTargetTriple);
|
2014-03-29 04:49:28 +08:00
|
|
|
T.setOS(llvm::Triple::Win32);
|
|
|
|
T.setEnvironment(llvm::Triple::MSVC);
|
2013-08-14 07:38:57 +08:00
|
|
|
DefaultTargetTriple = T.str();
|
|
|
|
}
|
2015-06-23 06:07:27 +08:00
|
|
|
if (const Arg *A = Args.getLastArg(options::OPT_target))
|
2012-11-01 12:30:05 +08:00
|
|
|
DefaultTargetTriple = A->getValue();
|
2015-06-23 06:07:27 +08:00
|
|
|
if (const Arg *A = Args.getLastArg(options::OPT_ccc_install_dir))
|
2012-11-01 12:30:05 +08:00
|
|
|
Dir = InstalledDir = A->getValue();
|
2015-06-23 06:07:27 +08:00
|
|
|
for (const Arg *A : Args.filtered(options::OPT_B)) {
|
2011-02-09 04:31:42 +08:00
|
|
|
A->claim();
|
2012-11-01 12:30:05 +08:00
|
|
|
PrefixDirs.push_back(A->getValue(0));
|
2011-02-09 04:31:42 +08:00
|
|
|
}
|
2015-06-23 06:07:27 +08:00
|
|
|
if (const Arg *A = Args.getLastArg(options::OPT__sysroot_EQ))
|
2012-11-01 12:30:05 +08:00
|
|
|
SysRoot = A->getValue();
|
2015-06-23 06:07:27 +08:00
|
|
|
if (const Arg *A = Args.getLastArg(options::OPT__dyld_prefix_EQ))
|
2013-05-28 05:40:20 +08:00
|
|
|
DyldPrefix = A->getValue();
|
2015-06-23 06:07:27 +08:00
|
|
|
if (Args.hasArg(options::OPT_nostdlib))
|
2011-03-21 21:59:26 +08:00
|
|
|
UseStdLib = false;
|
2009-03-11 04:52:46 +08:00
|
|
|
|
2015-06-23 06:07:27 +08:00
|
|
|
if (const Arg *A = Args.getLastArg(options::OPT_resource_dir))
|
2013-03-23 13:17:59 +08:00
|
|
|
ResourceDir = A->getValue();
|
2013-03-13 04:17:58 +08:00
|
|
|
|
2015-06-23 06:07:27 +08:00
|
|
|
if (const Arg *A = Args.getLastArg(options::OPT_save_temps_EQ)) {
|
2015-02-03 06:41:48 +08:00
|
|
|
SaveTemps = llvm::StringSwitch<SaveTempsMode>(A->getValue())
|
|
|
|
.Case("cwd", SaveTempsCwd)
|
|
|
|
.Case("obj", SaveTempsObj)
|
|
|
|
.Default(SaveTempsCwd);
|
|
|
|
}
|
|
|
|
|
2015-06-23 06:07:27 +08:00
|
|
|
std::unique_ptr<llvm::opt::InputArgList> UArgs =
|
|
|
|
llvm::make_unique<InputArgList>(std::move(Args));
|
|
|
|
|
2010-06-12 06:00:26 +08:00
|
|
|
// Perform the default argument translations.
|
2015-06-23 06:07:27 +08:00
|
|
|
DerivedArgList *TranslatedArgs = TranslateInputArgs(*UArgs);
|
2010-06-12 06:00:26 +08:00
|
|
|
|
2012-01-25 16:49:21 +08:00
|
|
|
// Owned by the host.
|
2015-07-11 03:47:55 +08:00
|
|
|
const ToolChain &TC =
|
|
|
|
getToolChain(*UArgs, computeTargetTriple(DefaultTargetTriple, *UArgs));
|
2012-01-25 16:49:21 +08:00
|
|
|
|
2009-03-18 10:55:38 +08:00
|
|
|
// The compilation takes ownership of Args.
|
2015-06-23 06:07:27 +08:00
|
|
|
Compilation *C = new Compilation(*this, TC, UArgs.release(), TranslatedArgs);
|
2009-03-18 10:55:38 +08:00
|
|
|
|
|
|
|
if (!HandleImmediateArgs(*C))
|
|
|
|
return C;
|
2009-03-13 08:51:18 +08:00
|
|
|
|
2011-08-13 06:08:57 +08:00
|
|
|
// Construct the list of inputs.
|
|
|
|
InputList Inputs;
|
2013-08-14 05:32:29 +08:00
|
|
|
BuildInputs(C->getDefaultToolChain(), *TranslatedArgs, Inputs);
|
2011-08-13 06:08:57 +08:00
|
|
|
|
2012-01-24 18:43:44 +08:00
|
|
|
// Construct the list of abstract actions to perform for this compilation. On
|
2014-01-16 16:48:16 +08:00
|
|
|
// MachO targets this uses the driver-driver and universal actions.
|
|
|
|
if (TC.getTriple().isOSBinFormatMachO())
|
2015-06-26 23:47:46 +08:00
|
|
|
BuildUniversalActions(C->getDefaultToolChain(), C->getArgs(), Inputs,
|
|
|
|
C->getActions());
|
2009-03-12 15:58:46 +08:00
|
|
|
else
|
2011-08-13 06:08:57 +08:00
|
|
|
BuildActions(C->getDefaultToolChain(), C->getArgs(), Inputs,
|
|
|
|
C->getActions());
|
2009-03-12 15:58:46 +08:00
|
|
|
|
2015-06-26 03:37:41 +08:00
|
|
|
if (CCCPrintPhases) {
|
2009-03-18 11:13:20 +08:00
|
|
|
PrintActions(*C);
|
2009-03-18 10:55:38 +08:00
|
|
|
return C;
|
2009-03-12 15:58:46 +08:00
|
|
|
}
|
2009-03-14 01:24:34 +08:00
|
|
|
|
2009-03-18 10:55:38 +08:00
|
|
|
BuildJobs(*C);
|
2009-03-15 09:38:15 +08:00
|
|
|
|
|
|
|
return C;
|
2009-03-11 04:52:46 +08:00
|
|
|
}
|
|
|
|
|
2015-07-09 14:58:31 +08:00
|
|
|
static void printArgList(raw_ostream &OS, const llvm::opt::ArgList &Args) {
|
|
|
|
llvm::opt::ArgStringList ASL;
|
|
|
|
for (const auto *A : Args)
|
|
|
|
A->render(Args, ASL);
|
|
|
|
|
|
|
|
for (auto I = ASL.begin(), E = ASL.end(); I != E; ++I) {
|
|
|
|
if (I != ASL.begin())
|
|
|
|
OS << ' ';
|
|
|
|
Command::printArg(OS, *I, true);
|
|
|
|
}
|
|
|
|
OS << '\n';
|
|
|
|
}
|
|
|
|
|
2011-08-18 06:59:59 +08:00
|
|
|
// When clang crashes, produce diagnostic information including the fully
|
|
|
|
// preprocessed source file(s). Request that the developer attach the
|
2011-08-03 01:58:04 +08:00
|
|
|
// diagnostic information to a bug report.
|
|
|
|
void Driver::generateCompilationDiagnostics(Compilation &C,
|
2014-10-21 05:02:05 +08:00
|
|
|
const Command &FailingCommand) {
|
2012-02-22 08:30:39 +08:00
|
|
|
if (C.getArgs().hasArg(options::OPT_fno_crash_diagnostics))
|
2012-07-10 01:31:28 +08:00
|
|
|
return;
|
2012-03-07 08:30:40 +08:00
|
|
|
|
2013-02-02 02:30:26 +08:00
|
|
|
// Don't try to generate diagnostics for link or dsymutil jobs.
|
2014-10-21 05:02:05 +08:00
|
|
|
if (FailingCommand.getCreator().isLinkJob() ||
|
|
|
|
FailingCommand.getCreator().isDsymutilJob())
|
2012-02-22 08:30:39 +08:00
|
|
|
return;
|
|
|
|
|
2012-06-20 01:51:34 +08:00
|
|
|
// Print the version of the compiler.
|
|
|
|
PrintVersion(C, llvm::errs());
|
|
|
|
|
2011-08-03 01:58:04 +08:00
|
|
|
Diag(clang::diag::note_drv_command_failed_diag_msg)
|
2015-06-26 23:47:46 +08:00
|
|
|
<< "PLEASE submit a bug report to " BUG_REPORT_URL " and include the "
|
|
|
|
"crash backtrace, preprocessed source, and associated run script.";
|
2011-08-03 01:58:04 +08:00
|
|
|
|
|
|
|
// Suppress driver output and emit preprocessor output to temp file.
|
2013-07-19 04:29:38 +08:00
|
|
|
Mode = CPPMode;
|
2011-08-03 01:58:04 +08:00
|
|
|
CCGenDiagnostics = true;
|
|
|
|
|
2011-11-03 05:29:05 +08:00
|
|
|
// Save the original job command(s).
|
2014-10-22 01:24:44 +08:00
|
|
|
Command Cmd = FailingCommand;
|
2011-11-03 05:29:05 +08:00
|
|
|
|
2012-12-20 10:22:15 +08:00
|
|
|
// Keep track of whether we produce any errors while trying to produce
|
|
|
|
// preprocessed sources.
|
|
|
|
DiagnosticErrorTrap Trap(Diags);
|
|
|
|
|
|
|
|
// Suppress tool output.
|
2011-08-03 01:58:04 +08:00
|
|
|
C.initCompilationForDiagnostics();
|
2011-08-13 06:08:57 +08:00
|
|
|
|
|
|
|
// Construct the list of inputs.
|
|
|
|
InputList Inputs;
|
|
|
|
BuildInputs(C.getDefaultToolChain(), C.getArgs(), Inputs);
|
2011-08-03 01:58:04 +08:00
|
|
|
|
2011-08-13 07:30:05 +08:00
|
|
|
for (InputList::iterator it = Inputs.begin(), ie = Inputs.end(); it != ie;) {
|
2011-08-18 08:22:25 +08:00
|
|
|
bool IgnoreInput = false;
|
|
|
|
|
|
|
|
// Ignore input from stdin or any inputs that cannot be preprocessed.
|
2014-04-29 06:24:44 +08:00
|
|
|
// Check type first as not all linker inputs have a value.
|
2015-06-26 23:47:46 +08:00
|
|
|
if (types::getPreprocessedType(it->first) == types::TY_INVALID) {
|
2014-04-29 06:24:44 +08:00
|
|
|
IgnoreInput = true;
|
|
|
|
} else if (!strcmp(it->second->getValue(), "-")) {
|
2011-08-18 08:22:25 +08:00
|
|
|
Diag(clang::diag::note_drv_command_failed_diag_msg)
|
2015-06-26 23:47:46 +08:00
|
|
|
<< "Error generating preprocessed source(s) - "
|
|
|
|
"ignoring input from stdin.";
|
2011-08-18 08:22:25 +08:00
|
|
|
IgnoreInput = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IgnoreInput) {
|
2011-08-13 07:30:05 +08:00
|
|
|
it = Inputs.erase(it);
|
|
|
|
ie = Inputs.end();
|
2011-08-18 07:08:45 +08:00
|
|
|
} else {
|
2011-08-13 07:30:05 +08:00
|
|
|
++it;
|
2011-08-18 07:08:45 +08:00
|
|
|
}
|
2011-08-13 07:30:05 +08:00
|
|
|
}
|
2011-08-18 08:22:25 +08:00
|
|
|
|
2013-01-30 07:57:10 +08:00
|
|
|
if (Inputs.empty()) {
|
|
|
|
Diag(clang::diag::note_drv_command_failed_diag_msg)
|
2015-06-26 23:47:46 +08:00
|
|
|
<< "Error generating preprocessed source(s) - "
|
|
|
|
"no preprocessable inputs.";
|
2013-01-30 07:57:10 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-09-07 07:52:36 +08:00
|
|
|
// Don't attempt to generate preprocessed files if multiple -arch options are
|
2012-02-14 02:16:28 +08:00
|
|
|
// used, unless they're all duplicates.
|
|
|
|
llvm::StringSet<> ArchNames;
|
2014-12-30 03:01:36 +08:00
|
|
|
for (const Arg *A : C.getArgs()) {
|
2011-09-07 07:52:36 +08:00
|
|
|
if (A->getOption().matches(options::OPT_arch)) {
|
2012-11-01 12:30:05 +08:00
|
|
|
StringRef ArchName = A->getValue();
|
2012-02-14 02:16:28 +08:00
|
|
|
ArchNames.insert(ArchName);
|
2011-09-07 07:52:36 +08:00
|
|
|
}
|
|
|
|
}
|
2012-02-14 02:16:28 +08:00
|
|
|
if (ArchNames.size() > 1) {
|
|
|
|
Diag(clang::diag::note_drv_command_failed_diag_msg)
|
2015-06-26 23:47:46 +08:00
|
|
|
<< "Error generating preprocessed source(s) - cannot generate "
|
|
|
|
"preprocessed source with multiple -arch options.";
|
2012-02-14 02:16:28 +08:00
|
|
|
return;
|
|
|
|
}
|
2011-09-07 07:52:36 +08:00
|
|
|
|
2012-01-24 18:43:44 +08:00
|
|
|
// Construct the list of abstract actions to perform for this compilation. On
|
|
|
|
// Darwin OSes this uses the driver-driver and builds universal actions.
|
2012-01-25 16:49:21 +08:00
|
|
|
const ToolChain &TC = C.getDefaultToolChain();
|
2014-01-16 16:48:16 +08:00
|
|
|
if (TC.getTriple().isOSBinFormatMachO())
|
2012-01-25 16:49:21 +08:00
|
|
|
BuildUniversalActions(TC, C.getArgs(), Inputs, C.getActions());
|
2011-08-03 01:58:04 +08:00
|
|
|
else
|
2012-01-25 16:49:21 +08:00
|
|
|
BuildActions(TC, C.getArgs(), Inputs, C.getActions());
|
2011-08-03 01:58:04 +08:00
|
|
|
|
|
|
|
BuildJobs(C);
|
|
|
|
|
|
|
|
// If there were errors building the compilation, quit now.
|
2012-12-20 10:22:15 +08:00
|
|
|
if (Trap.hasErrorOccurred()) {
|
2011-08-03 01:58:04 +08:00
|
|
|
Diag(clang::diag::note_drv_command_failed_diag_msg)
|
2015-06-26 23:47:46 +08:00
|
|
|
<< "Error generating preprocessed source(s).";
|
2011-08-03 01:58:04 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate preprocessed output.
|
2013-01-30 04:15:05 +08:00
|
|
|
SmallVector<std::pair<int, const Command *>, 4> FailingCommands;
|
2015-07-03 06:52:08 +08:00
|
|
|
C.ExecuteJobs(C.getJobs(), FailingCommands);
|
2011-08-03 01:58:04 +08:00
|
|
|
|
2014-10-21 05:20:27 +08:00
|
|
|
// If any of the preprocessing commands failed, clean up and exit.
|
|
|
|
if (!FailingCommands.empty()) {
|
2015-02-03 06:41:48 +08:00
|
|
|
if (!isSaveTempsEnabled())
|
2011-08-03 01:58:04 +08:00
|
|
|
C.CleanupFileList(C.getTempFiles(), true);
|
|
|
|
|
|
|
|
Diag(clang::diag::note_drv_command_failed_diag_msg)
|
2015-06-26 23:47:46 +08:00
|
|
|
<< "Error generating preprocessed source(s).";
|
2014-10-21 05:20:27 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-10-21 05:47:56 +08:00
|
|
|
const ArgStringList &TempFiles = C.getTempFiles();
|
|
|
|
if (TempFiles.empty()) {
|
|
|
|
Diag(clang::diag::note_drv_command_failed_diag_msg)
|
2015-06-26 23:47:46 +08:00
|
|
|
<< "Error generating preprocessed source(s).";
|
2014-10-21 05:47:56 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-10-21 05:20:27 +08:00
|
|
|
Diag(clang::diag::note_drv_command_failed_diag_msg)
|
|
|
|
<< "\n********************\n\n"
|
|
|
|
"PLEASE ATTACH THE FOLLOWING FILES TO THE BUG REPORT:\n"
|
|
|
|
"Preprocessed source(s) and associated run script(s) are located at:";
|
|
|
|
|
2014-10-21 06:47:23 +08:00
|
|
|
SmallString<128> VFS;
|
|
|
|
for (const char *TempFile : TempFiles) {
|
2014-10-21 05:47:56 +08:00
|
|
|
Diag(clang::diag::note_drv_command_failed_diag_msg) << TempFile;
|
2014-10-21 06:47:23 +08:00
|
|
|
if (StringRef(TempFile).endswith(".cache")) {
|
|
|
|
// In some cases (modules) we'll dump extra data to help with reproducing
|
|
|
|
// the crash into a directory next to the output.
|
|
|
|
VFS = llvm::sys::path::filename(TempFile);
|
|
|
|
llvm::sys::path::append(VFS, "vfs", "vfs.yaml");
|
|
|
|
}
|
|
|
|
}
|
2014-10-21 05:47:56 +08:00
|
|
|
|
|
|
|
// Assume associated files are based off of the first temporary file.
|
2014-10-22 01:24:44 +08:00
|
|
|
CrashReportInfo CrashInfo(TempFiles[0], VFS);
|
2014-10-21 05:47:56 +08:00
|
|
|
|
2014-10-22 01:24:44 +08:00
|
|
|
std::string Script = CrashInfo.Filename.rsplit('.').first.str() + ".sh";
|
2014-10-21 05:47:56 +08:00
|
|
|
std::error_code EC;
|
|
|
|
llvm::raw_fd_ostream ScriptOS(Script, EC, llvm::sys::fs::F_Excl);
|
|
|
|
if (EC) {
|
|
|
|
Diag(clang::diag::note_drv_command_failed_diag_msg)
|
|
|
|
<< "Error generating run script: " + Script + " " + EC.message();
|
|
|
|
} else {
|
2015-03-12 08:14:35 +08:00
|
|
|
ScriptOS << "# Crash reproducer for " << getClangFullVersion() << "\n"
|
2015-07-09 14:58:31 +08:00
|
|
|
<< "# Driver args: ";
|
|
|
|
printArgList(ScriptOS, C.getInputArgs());
|
|
|
|
ScriptOS << "# Original command: ";
|
2015-03-12 08:14:35 +08:00
|
|
|
Cmd.Print(ScriptOS, "\n", /*Quote=*/true);
|
2014-10-22 02:03:08 +08:00
|
|
|
Cmd.Print(ScriptOS, "\n", /*Quote=*/true, &CrashInfo);
|
2014-10-21 05:47:56 +08:00
|
|
|
Diag(clang::diag::note_drv_command_failed_diag_msg) << Script;
|
2011-08-03 01:58:04 +08:00
|
|
|
}
|
2015-01-12 10:33:09 +08:00
|
|
|
|
|
|
|
for (const auto &A : C.getArgs().filtered(options::OPT_frewrite_map_file,
|
|
|
|
options::OPT_frewrite_map_file_EQ))
|
|
|
|
Diag(clang::diag::note_drv_command_failed_diag_msg) << A->getValue();
|
|
|
|
|
2014-10-21 05:20:27 +08:00
|
|
|
Diag(clang::diag::note_drv_command_failed_diag_msg)
|
|
|
|
<< "\n\n********************";
|
2011-08-03 01:58:04 +08:00
|
|
|
}
|
|
|
|
|
2015-07-03 06:52:08 +08:00
|
|
|
void Driver::setUpResponseFiles(Compilation &C, Command &Cmd) {
|
Teach Clang how to use response files when calling other tools
Patch by Rafael Auler!
This patch addresses PR15171 and teaches Clang how to call other tools
with response files, when the command line exceeds system limits. This
is a problem for Windows systems, whose maximum command-line length is
32kb.
I introduce the concept of "response file support" for each Tool object.
A given Tool may have full support for response files (e.g. MSVC's
link.exe) or only support file names inside response files, but no flags
(e.g. Apple's ld64, as commented in PR15171), or no support at all (the
default case). Therefore, if you implement a toolchain in the clang
driver and you want clang to be able to use response files in your
tools, you must override a method (getReponseFileSupport()) to tell so.
I designed it to support different kinds of tools and
internationalisation needs:
- VS response files ( UTF-16 )
- GNU tools ( uses system's current code page, windows' legacy intl.
support, with escaped backslashes. On unix, fallback to UTF-8 )
- Clang itself ( UTF-16 on windows, UTF-8 on unix )
- ld64 response files ( only a limited file list, UTF-8 on unix )
With this design, I was able to test input file names with spaces and
international characters for Windows. When the linker input is large
enough, it creates a response file with the correct encoding. On a Mac,
to test ld64, I temporarily changed Clang's behavior to always use
response files regardless of the command size limit (avoiding using huge
command line inputs). I tested clang with the LLVM test suite (compiling
benchmarks) and it did fine.
Test Plan: A LIT test that tests proper response files support. This is
tricky, since, for Unix systems, we need a 2MB response file, otherwise
Clang will simply use regular arguments instead of a response file. To
do this, my LIT test generate the file on the fly by cloning many -DTEST
parameters until we have a 2MB file. I found out that processing 2MB of
arguments is pretty slow, it takes 1 minute using my notebook in a debug
build, or 10s in a Release build. Therefore, I also added "REQUIRES:
long_tests", so it will only run when the user wants to run long tests.
In the full discussion in
http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20130408/171463.html,
Rafael Espindola discusses a proper way to test
llvm::sys::argumentsFitWithinSystemLimits(), and, there, Chandler
suggests to use 10 times the current system limit (20MB resp file), so
we guarantee that the system will always use response file, even if a
new linux comes up that can handle a few more bytes of arguments.
However, by testing with a 20MB resp file, the test takes long 8 minutes
just to perform a silly check to see if the driver will use a response
file. I found it to be unreasonable. Thus, I discarded this approach and
uses a 2MB response file, which should be enough.
Reviewers: asl, rafael, silvas
Reviewed By: silvas
Subscribers: silvas, rnk, thakis, cfe-commits
Differential Revision: http://reviews.llvm.org/D4897
llvm-svn: 217792
2014-09-16 01:45:39 +08:00
|
|
|
// Since argumentsFitWithinSystemLimits() may underestimate system's capacity
|
|
|
|
// if the tool does not support response files, there is a chance/ that things
|
|
|
|
// will just work without a response file, so we silently just skip it.
|
2015-07-03 06:52:08 +08:00
|
|
|
if (Cmd.getCreator().getResponseFilesSupport() == Tool::RF_None ||
|
|
|
|
llvm::sys::argumentsFitWithinSystemLimits(Cmd.getArguments()))
|
Teach Clang how to use response files when calling other tools
Patch by Rafael Auler!
This patch addresses PR15171 and teaches Clang how to call other tools
with response files, when the command line exceeds system limits. This
is a problem for Windows systems, whose maximum command-line length is
32kb.
I introduce the concept of "response file support" for each Tool object.
A given Tool may have full support for response files (e.g. MSVC's
link.exe) or only support file names inside response files, but no flags
(e.g. Apple's ld64, as commented in PR15171), or no support at all (the
default case). Therefore, if you implement a toolchain in the clang
driver and you want clang to be able to use response files in your
tools, you must override a method (getReponseFileSupport()) to tell so.
I designed it to support different kinds of tools and
internationalisation needs:
- VS response files ( UTF-16 )
- GNU tools ( uses system's current code page, windows' legacy intl.
support, with escaped backslashes. On unix, fallback to UTF-8 )
- Clang itself ( UTF-16 on windows, UTF-8 on unix )
- ld64 response files ( only a limited file list, UTF-8 on unix )
With this design, I was able to test input file names with spaces and
international characters for Windows. When the linker input is large
enough, it creates a response file with the correct encoding. On a Mac,
to test ld64, I temporarily changed Clang's behavior to always use
response files regardless of the command size limit (avoiding using huge
command line inputs). I tested clang with the LLVM test suite (compiling
benchmarks) and it did fine.
Test Plan: A LIT test that tests proper response files support. This is
tricky, since, for Unix systems, we need a 2MB response file, otherwise
Clang will simply use regular arguments instead of a response file. To
do this, my LIT test generate the file on the fly by cloning many -DTEST
parameters until we have a 2MB file. I found out that processing 2MB of
arguments is pretty slow, it takes 1 minute using my notebook in a debug
build, or 10s in a Release build. Therefore, I also added "REQUIRES:
long_tests", so it will only run when the user wants to run long tests.
In the full discussion in
http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20130408/171463.html,
Rafael Espindola discusses a proper way to test
llvm::sys::argumentsFitWithinSystemLimits(), and, there, Chandler
suggests to use 10 times the current system limit (20MB resp file), so
we guarantee that the system will always use response file, even if a
new linux comes up that can handle a few more bytes of arguments.
However, by testing with a 20MB resp file, the test takes long 8 minutes
just to perform a silly check to see if the driver will use a response
file. I found it to be unreasonable. Thus, I discarded this approach and
uses a 2MB response file, which should be enough.
Reviewers: asl, rafael, silvas
Reviewed By: silvas
Subscribers: silvas, rnk, thakis, cfe-commits
Differential Revision: http://reviews.llvm.org/D4897
llvm-svn: 217792
2014-09-16 01:45:39 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
std::string TmpName = GetTemporaryPath("response", "txt");
|
2015-07-03 06:52:08 +08:00
|
|
|
Cmd.setResponseFile(
|
2015-06-26 23:47:46 +08:00
|
|
|
C.addTempFile(C.getArgs().MakeArgString(TmpName.c_str())));
|
Teach Clang how to use response files when calling other tools
Patch by Rafael Auler!
This patch addresses PR15171 and teaches Clang how to call other tools
with response files, when the command line exceeds system limits. This
is a problem for Windows systems, whose maximum command-line length is
32kb.
I introduce the concept of "response file support" for each Tool object.
A given Tool may have full support for response files (e.g. MSVC's
link.exe) or only support file names inside response files, but no flags
(e.g. Apple's ld64, as commented in PR15171), or no support at all (the
default case). Therefore, if you implement a toolchain in the clang
driver and you want clang to be able to use response files in your
tools, you must override a method (getReponseFileSupport()) to tell so.
I designed it to support different kinds of tools and
internationalisation needs:
- VS response files ( UTF-16 )
- GNU tools ( uses system's current code page, windows' legacy intl.
support, with escaped backslashes. On unix, fallback to UTF-8 )
- Clang itself ( UTF-16 on windows, UTF-8 on unix )
- ld64 response files ( only a limited file list, UTF-8 on unix )
With this design, I was able to test input file names with spaces and
international characters for Windows. When the linker input is large
enough, it creates a response file with the correct encoding. On a Mac,
to test ld64, I temporarily changed Clang's behavior to always use
response files regardless of the command size limit (avoiding using huge
command line inputs). I tested clang with the LLVM test suite (compiling
benchmarks) and it did fine.
Test Plan: A LIT test that tests proper response files support. This is
tricky, since, for Unix systems, we need a 2MB response file, otherwise
Clang will simply use regular arguments instead of a response file. To
do this, my LIT test generate the file on the fly by cloning many -DTEST
parameters until we have a 2MB file. I found out that processing 2MB of
arguments is pretty slow, it takes 1 minute using my notebook in a debug
build, or 10s in a Release build. Therefore, I also added "REQUIRES:
long_tests", so it will only run when the user wants to run long tests.
In the full discussion in
http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20130408/171463.html,
Rafael Espindola discusses a proper way to test
llvm::sys::argumentsFitWithinSystemLimits(), and, there, Chandler
suggests to use 10 times the current system limit (20MB resp file), so
we guarantee that the system will always use response file, even if a
new linux comes up that can handle a few more bytes of arguments.
However, by testing with a 20MB resp file, the test takes long 8 minutes
just to perform a silly check to see if the driver will use a response
file. I found it to be unreasonable. Thus, I discarded this approach and
uses a 2MB response file, which should be enough.
Reviewers: asl, rafael, silvas
Reviewed By: silvas
Subscribers: silvas, rnk, thakis, cfe-commits
Differential Revision: http://reviews.llvm.org/D4897
llvm-svn: 217792
2014-09-16 01:45:39 +08:00
|
|
|
}
|
|
|
|
|
2015-06-26 23:47:46 +08:00
|
|
|
int Driver::ExecuteCompilation(
|
|
|
|
Compilation &C,
|
|
|
|
SmallVectorImpl<std::pair<int, const Command *>> &FailingCommands) {
|
2009-07-02 04:03:04 +08:00
|
|
|
// Just print if -### was present.
|
|
|
|
if (C.getArgs().hasArg(options::OPT__HASH_HASH_HASH)) {
|
2013-09-13 02:23:34 +08:00
|
|
|
C.getJobs().Print(llvm::errs(), "\n", true);
|
2009-07-02 04:03:04 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there were errors building the compilation, quit now.
|
2011-08-03 01:58:04 +08:00
|
|
|
if (Diags.hasErrorOccurred())
|
2009-07-02 04:03:04 +08:00
|
|
|
return 1;
|
|
|
|
|
Teach Clang how to use response files when calling other tools
Patch by Rafael Auler!
This patch addresses PR15171 and teaches Clang how to call other tools
with response files, when the command line exceeds system limits. This
is a problem for Windows systems, whose maximum command-line length is
32kb.
I introduce the concept of "response file support" for each Tool object.
A given Tool may have full support for response files (e.g. MSVC's
link.exe) or only support file names inside response files, but no flags
(e.g. Apple's ld64, as commented in PR15171), or no support at all (the
default case). Therefore, if you implement a toolchain in the clang
driver and you want clang to be able to use response files in your
tools, you must override a method (getReponseFileSupport()) to tell so.
I designed it to support different kinds of tools and
internationalisation needs:
- VS response files ( UTF-16 )
- GNU tools ( uses system's current code page, windows' legacy intl.
support, with escaped backslashes. On unix, fallback to UTF-8 )
- Clang itself ( UTF-16 on windows, UTF-8 on unix )
- ld64 response files ( only a limited file list, UTF-8 on unix )
With this design, I was able to test input file names with spaces and
international characters for Windows. When the linker input is large
enough, it creates a response file with the correct encoding. On a Mac,
to test ld64, I temporarily changed Clang's behavior to always use
response files regardless of the command size limit (avoiding using huge
command line inputs). I tested clang with the LLVM test suite (compiling
benchmarks) and it did fine.
Test Plan: A LIT test that tests proper response files support. This is
tricky, since, for Unix systems, we need a 2MB response file, otherwise
Clang will simply use regular arguments instead of a response file. To
do this, my LIT test generate the file on the fly by cloning many -DTEST
parameters until we have a 2MB file. I found out that processing 2MB of
arguments is pretty slow, it takes 1 minute using my notebook in a debug
build, or 10s in a Release build. Therefore, I also added "REQUIRES:
long_tests", so it will only run when the user wants to run long tests.
In the full discussion in
http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20130408/171463.html,
Rafael Espindola discusses a proper way to test
llvm::sys::argumentsFitWithinSystemLimits(), and, there, Chandler
suggests to use 10 times the current system limit (20MB resp file), so
we guarantee that the system will always use response file, even if a
new linux comes up that can handle a few more bytes of arguments.
However, by testing with a 20MB resp file, the test takes long 8 minutes
just to perform a silly check to see if the driver will use a response
file. I found it to be unreasonable. Thus, I discarded this approach and
uses a 2MB response file, which should be enough.
Reviewers: asl, rafael, silvas
Reviewed By: silvas
Subscribers: silvas, rnk, thakis, cfe-commits
Differential Revision: http://reviews.llvm.org/D4897
llvm-svn: 217792
2014-09-16 01:45:39 +08:00
|
|
|
// Set up response file names for each command, if necessary
|
2015-07-03 06:52:08 +08:00
|
|
|
for (auto &Job : C.getJobs())
|
|
|
|
setUpResponseFiles(C, Job);
|
Teach Clang how to use response files when calling other tools
Patch by Rafael Auler!
This patch addresses PR15171 and teaches Clang how to call other tools
with response files, when the command line exceeds system limits. This
is a problem for Windows systems, whose maximum command-line length is
32kb.
I introduce the concept of "response file support" for each Tool object.
A given Tool may have full support for response files (e.g. MSVC's
link.exe) or only support file names inside response files, but no flags
(e.g. Apple's ld64, as commented in PR15171), or no support at all (the
default case). Therefore, if you implement a toolchain in the clang
driver and you want clang to be able to use response files in your
tools, you must override a method (getReponseFileSupport()) to tell so.
I designed it to support different kinds of tools and
internationalisation needs:
- VS response files ( UTF-16 )
- GNU tools ( uses system's current code page, windows' legacy intl.
support, with escaped backslashes. On unix, fallback to UTF-8 )
- Clang itself ( UTF-16 on windows, UTF-8 on unix )
- ld64 response files ( only a limited file list, UTF-8 on unix )
With this design, I was able to test input file names with spaces and
international characters for Windows. When the linker input is large
enough, it creates a response file with the correct encoding. On a Mac,
to test ld64, I temporarily changed Clang's behavior to always use
response files regardless of the command size limit (avoiding using huge
command line inputs). I tested clang with the LLVM test suite (compiling
benchmarks) and it did fine.
Test Plan: A LIT test that tests proper response files support. This is
tricky, since, for Unix systems, we need a 2MB response file, otherwise
Clang will simply use regular arguments instead of a response file. To
do this, my LIT test generate the file on the fly by cloning many -DTEST
parameters until we have a 2MB file. I found out that processing 2MB of
arguments is pretty slow, it takes 1 minute using my notebook in a debug
build, or 10s in a Release build. Therefore, I also added "REQUIRES:
long_tests", so it will only run when the user wants to run long tests.
In the full discussion in
http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20130408/171463.html,
Rafael Espindola discusses a proper way to test
llvm::sys::argumentsFitWithinSystemLimits(), and, there, Chandler
suggests to use 10 times the current system limit (20MB resp file), so
we guarantee that the system will always use response file, even if a
new linux comes up that can handle a few more bytes of arguments.
However, by testing with a 20MB resp file, the test takes long 8 minutes
just to perform a silly check to see if the driver will use a response
file. I found it to be unreasonable. Thus, I discarded this approach and
uses a 2MB response file, which should be enough.
Reviewers: asl, rafael, silvas
Reviewed By: silvas
Subscribers: silvas, rnk, thakis, cfe-commits
Differential Revision: http://reviews.llvm.org/D4897
llvm-svn: 217792
2014-09-16 01:45:39 +08:00
|
|
|
|
2015-07-03 06:52:08 +08:00
|
|
|
C.ExecuteJobs(C.getJobs(), FailingCommands);
|
2009-09-09 07:36:43 +08:00
|
|
|
|
2009-07-02 04:03:04 +08:00
|
|
|
// Remove temp files.
|
|
|
|
C.CleanupFileList(C.getTempFiles());
|
|
|
|
|
2010-05-22 08:37:20 +08:00
|
|
|
// If the command succeeded, we are done.
|
2013-01-30 04:15:05 +08:00
|
|
|
if (FailingCommands.empty())
|
|
|
|
return 0;
|
2010-05-22 08:37:20 +08:00
|
|
|
|
2013-01-30 04:15:05 +08:00
|
|
|
// Otherwise, remove result files and print extra information about abnormal
|
|
|
|
// failures.
|
2015-06-26 02:51:37 +08:00
|
|
|
for (const auto &CmdPair : FailingCommands) {
|
|
|
|
int Res = CmdPair.first;
|
|
|
|
const Command *FailingCommand = CmdPair.second;
|
2009-07-02 04:03:04 +08:00
|
|
|
|
2013-01-30 04:15:05 +08:00
|
|
|
// Remove result files if we're not saving temps.
|
2015-02-03 06:41:48 +08:00
|
|
|
if (!isSaveTempsEnabled()) {
|
2013-01-30 04:15:05 +08:00
|
|
|
const JobAction *JA = cast<JobAction>(&FailingCommand->getSource());
|
|
|
|
C.CleanupFileMap(C.getResultFiles(), JA, true);
|
2011-11-21 08:01:05 +08:00
|
|
|
|
2013-01-30 04:15:05 +08:00
|
|
|
// Failure result files are valid unless we crashed.
|
|
|
|
if (Res < 0)
|
|
|
|
C.CleanupFileMap(C.getFailureResultFiles(), JA, true);
|
|
|
|
}
|
2009-07-02 04:03:04 +08:00
|
|
|
|
2013-01-30 04:15:05 +08:00
|
|
|
// Print extra information about abnormal failures, if possible.
|
|
|
|
//
|
|
|
|
// This is ad-hoc, but we don't want to be excessively noisy. If the result
|
2014-06-27 04:59:36 +08:00
|
|
|
// status was 1, assume the command failed normally. In particular, if it
|
2013-01-30 04:15:05 +08:00
|
|
|
// was the compiler then assume it gave a reasonable error code. Failures
|
|
|
|
// in other tools are less common, and they generally have worse
|
|
|
|
// diagnostics, so always print the diagnostic there.
|
|
|
|
const Tool &FailingTool = FailingCommand->getCreator();
|
|
|
|
|
|
|
|
if (!FailingCommand->getCreator().hasGoodDiagnostics() || Res != 1) {
|
|
|
|
// FIXME: See FIXME above regarding result code interpretation.
|
|
|
|
if (Res < 0)
|
|
|
|
Diag(clang::diag::err_drv_command_signalled)
|
2015-06-26 23:47:46 +08:00
|
|
|
<< FailingTool.getShortName();
|
2013-01-30 04:15:05 +08:00
|
|
|
else
|
2015-06-26 23:47:46 +08:00
|
|
|
Diag(clang::diag::err_drv_command_failed) << FailingTool.getShortName()
|
|
|
|
<< Res;
|
2013-01-30 04:15:05 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
2009-07-02 04:03:04 +08:00
|
|
|
}
|
|
|
|
|
2009-04-16 00:34:29 +08:00
|
|
|
void Driver::PrintHelp(bool ShowHidden) const {
|
2013-07-27 08:23:45 +08:00
|
|
|
unsigned IncludedFlagsBitmask;
|
|
|
|
unsigned ExcludedFlagsBitmask;
|
2014-03-02 21:01:17 +08:00
|
|
|
std::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
|
2015-06-26 23:47:46 +08:00
|
|
|
getIncludeExcludeOptionFlagMasks();
|
2013-07-27 08:23:45 +08:00
|
|
|
|
|
|
|
ExcludedFlagsBitmask |= options::NoDriverOption;
|
|
|
|
if (!ShowHidden)
|
|
|
|
ExcludedFlagsBitmask |= HelpHidden;
|
|
|
|
|
|
|
|
getOpts().PrintHelp(llvm::outs(), Name.c_str(), DriverTitle.c_str(),
|
|
|
|
IncludedFlagsBitmask, ExcludedFlagsBitmask);
|
2009-04-01 05:38:17 +08:00
|
|
|
}
|
|
|
|
|
2011-07-23 18:55:15 +08:00
|
|
|
void Driver::PrintVersion(const Compilation &C, raw_ostream &OS) const {
|
2009-09-09 07:36:43 +08:00
|
|
|
// FIXME: The following handlers should use a callback mechanism, we don't
|
|
|
|
// know what the client would like to do.
|
2010-01-23 10:11:34 +08:00
|
|
|
OS << getClangFullVersion() << '\n';
|
2009-03-27 00:09:13 +08:00
|
|
|
const ToolChain &TC = C.getDefaultToolChain();
|
2009-07-22 04:06:58 +08:00
|
|
|
OS << "Target: " << TC.getTripleString() << '\n';
|
2009-06-17 07:32:58 +08:00
|
|
|
|
|
|
|
// Print the threading model.
|
2014-10-04 05:57:44 +08:00
|
|
|
if (Arg *A = C.getArgs().getLastArg(options::OPT_mthread_model)) {
|
|
|
|
// Don't print if the ToolChain would have barfed on it already
|
|
|
|
if (TC.isThreadModelSupported(A->getValue()))
|
|
|
|
OS << "Thread model: " << A->getValue();
|
|
|
|
} else
|
|
|
|
OS << "Thread model: " << TC.getThreadModel();
|
|
|
|
OS << '\n';
|
2009-03-13 08:51:18 +08:00
|
|
|
}
|
|
|
|
|
2010-05-05 13:53:24 +08:00
|
|
|
/// PrintDiagnosticCategories - Implement the --print-diagnostic-categories
|
|
|
|
/// option.
|
2011-07-23 18:55:15 +08:00
|
|
|
static void PrintDiagnosticCategories(raw_ostream &OS) {
|
2011-05-25 13:05:01 +08:00
|
|
|
// Skip the empty category.
|
2015-06-26 23:47:46 +08:00
|
|
|
for (unsigned i = 1, max = DiagnosticIDs::getNumberOfCategories(); i != max;
|
|
|
|
++i)
|
2011-05-25 13:05:01 +08:00
|
|
|
OS << i << ',' << DiagnosticIDs::getCategoryNameFromID(i) << '\n';
|
2010-05-05 13:53:24 +08:00
|
|
|
}
|
|
|
|
|
2009-03-18 10:55:38 +08:00
|
|
|
bool Driver::HandleImmediateArgs(const Compilation &C) {
|
2010-06-12 06:00:19 +08:00
|
|
|
// The order these options are handled in gcc is all over the place, but we
|
2009-09-09 07:36:43 +08:00
|
|
|
// don't expect inconsistencies w.r.t. that to matter in practice.
|
2009-04-01 05:38:17 +08:00
|
|
|
|
2010-09-17 10:47:28 +08:00
|
|
|
if (C.getArgs().hasArg(options::OPT_dumpmachine)) {
|
|
|
|
llvm::outs() << C.getDefaultToolChain().getTripleString() << '\n';
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-04-04 13:17:38 +08:00
|
|
|
if (C.getArgs().hasArg(options::OPT_dumpversion)) {
|
2011-01-12 08:43:47 +08:00
|
|
|
// Since -dumpversion is only implemented for pedantic GCC compatibility, we
|
|
|
|
// return an answer which matches our definition of __VERSION__.
|
|
|
|
//
|
|
|
|
// If we want to return a more correct answer some day, then we should
|
|
|
|
// introduce a non-pedantically GCC compatible mode to Clang in which we
|
|
|
|
// provide sensible definitions for -dumpversion, __VERSION__, etc.
|
|
|
|
llvm::outs() << "4.2.1\n";
|
2009-04-04 13:17:38 +08:00
|
|
|
return false;
|
|
|
|
}
|
2010-06-15 05:23:12 +08:00
|
|
|
|
2010-05-05 13:53:24 +08:00
|
|
|
if (C.getArgs().hasArg(options::OPT__print_diagnostic_categories)) {
|
|
|
|
PrintDiagnosticCategories(llvm::outs());
|
|
|
|
return false;
|
|
|
|
}
|
2009-04-04 13:17:38 +08:00
|
|
|
|
2012-05-01 22:57:16 +08:00
|
|
|
if (C.getArgs().hasArg(options::OPT_help) ||
|
2009-04-16 00:34:29 +08:00
|
|
|
C.getArgs().hasArg(options::OPT__help_hidden)) {
|
|
|
|
PrintHelp(C.getArgs().hasArg(options::OPT__help_hidden));
|
2009-04-01 05:38:17 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-04-02 23:05:41 +08:00
|
|
|
if (C.getArgs().hasArg(options::OPT__version)) {
|
2009-09-09 07:36:43 +08:00
|
|
|
// Follow gcc behavior and use stdout for --version and stderr for -v.
|
2009-07-22 04:06:58 +08:00
|
|
|
PrintVersion(C, llvm::outs());
|
2009-04-02 23:05:41 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-09-09 07:36:43 +08:00
|
|
|
if (C.getArgs().hasArg(options::OPT_v) ||
|
2009-03-18 10:55:38 +08:00
|
|
|
C.getArgs().hasArg(options::OPT__HASH_HASH_HASH)) {
|
2009-07-22 04:06:58 +08:00
|
|
|
PrintVersion(C, llvm::errs());
|
2009-03-13 08:51:18 +08:00
|
|
|
SuppressMissingInputWarning = true;
|
|
|
|
}
|
|
|
|
|
2009-03-18 10:55:38 +08:00
|
|
|
const ToolChain &TC = C.getDefaultToolChain();
|
2013-07-31 01:57:09 +08:00
|
|
|
|
|
|
|
if (C.getArgs().hasArg(options::OPT_v))
|
|
|
|
TC.printVerboseInfo(llvm::errs());
|
|
|
|
|
2009-03-20 12:37:21 +08:00
|
|
|
if (C.getArgs().hasArg(options::OPT_print_search_dirs)) {
|
|
|
|
llvm::outs() << "programs: =";
|
2015-06-24 06:43:50 +08:00
|
|
|
bool separator = false;
|
|
|
|
for (const std::string &Path : TC.getProgramPaths()) {
|
2015-06-26 23:47:46 +08:00
|
|
|
if (separator)
|
|
|
|
llvm::outs() << ':';
|
2015-06-24 06:43:50 +08:00
|
|
|
llvm::outs() << Path;
|
|
|
|
separator = true;
|
2009-03-20 12:37:21 +08:00
|
|
|
}
|
|
|
|
llvm::outs() << "\n";
|
2011-09-06 10:08:31 +08:00
|
|
|
llvm::outs() << "libraries: =" << ResourceDir;
|
2011-07-16 18:50:05 +08:00
|
|
|
|
2012-04-16 12:16:43 +08:00
|
|
|
StringRef sysroot = C.getSysRoot();
|
2011-07-16 18:50:05 +08:00
|
|
|
|
2015-06-24 06:43:50 +08:00
|
|
|
for (const std::string &Path : TC.getFilePaths()) {
|
|
|
|
// Always print a separator. ResourceDir was the first item shown.
|
2011-09-06 10:08:31 +08:00
|
|
|
llvm::outs() << ':';
|
2015-06-24 06:43:50 +08:00
|
|
|
// Interpretation of leading '=' is needed only for NetBSD.
|
|
|
|
if (Path[0] == '=')
|
2015-06-24 23:10:30 +08:00
|
|
|
llvm::outs() << sysroot << Path.substr(1);
|
2011-07-16 18:50:05 +08:00
|
|
|
else
|
2015-06-24 06:43:50 +08:00
|
|
|
llvm::outs() << Path;
|
2009-03-20 12:37:21 +08:00
|
|
|
}
|
|
|
|
llvm::outs() << "\n";
|
2009-04-01 05:38:17 +08:00
|
|
|
return false;
|
2009-03-20 12:37:21 +08:00
|
|
|
}
|
|
|
|
|
2009-09-09 07:36:43 +08:00
|
|
|
// FIXME: The following handlers should use a callback mechanism, we don't
|
|
|
|
// know what the client would like to do.
|
2009-03-18 10:55:38 +08:00
|
|
|
if (Arg *A = C.getArgs().getLastArg(options::OPT_print_file_name_EQ)) {
|
2012-11-01 12:30:05 +08:00
|
|
|
llvm::outs() << GetFilePath(A->getValue(), TC) << "\n";
|
2009-03-13 08:51:18 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-03-18 10:55:38 +08:00
|
|
|
if (Arg *A = C.getArgs().getLastArg(options::OPT_print_prog_name_EQ)) {
|
2012-11-01 12:30:05 +08:00
|
|
|
llvm::outs() << GetProgramPath(A->getValue(), TC) << "\n";
|
2009-03-13 08:51:18 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-03-18 10:55:38 +08:00
|
|
|
if (C.getArgs().hasArg(options::OPT_print_libgcc_file_name)) {
|
2009-09-10 06:33:00 +08:00
|
|
|
llvm::outs() << GetFilePath("libgcc.a", TC) << "\n";
|
2009-03-13 08:51:18 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-06-17 07:25:22 +08:00
|
|
|
if (C.getArgs().hasArg(options::OPT_print_multi_lib)) {
|
2015-07-01 03:32:57 +08:00
|
|
|
for (const Multilib &Multilib : TC.getMultilibs())
|
|
|
|
llvm::outs() << Multilib << "\n";
|
2009-06-17 07:25:22 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-02-12 11:21:20 +08:00
|
|
|
if (C.getArgs().hasArg(options::OPT_print_multi_directory)) {
|
2015-07-01 03:32:57 +08:00
|
|
|
for (const Multilib &Multilib : TC.getMultilibs()) {
|
2015-06-26 02:51:37 +08:00
|
|
|
if (Multilib.gccSuffix().empty())
|
2014-02-12 11:21:20 +08:00
|
|
|
llvm::outs() << ".\n";
|
|
|
|
else {
|
2015-06-26 02:51:37 +08:00
|
|
|
StringRef Suffix(Multilib.gccSuffix());
|
2014-02-12 11:21:20 +08:00
|
|
|
assert(Suffix.front() == '/');
|
|
|
|
llvm::outs() << Suffix.substr(1) << "\n";
|
|
|
|
}
|
2014-02-12 09:36:51 +08:00
|
|
|
}
|
2014-02-12 09:29:25 +08:00
|
|
|
return false;
|
|
|
|
}
|
2009-03-13 08:51:18 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-06-11 23:05:22 +08:00
|
|
|
// Display an action graph human-readably. Action A is the "sink" node
|
|
|
|
// and latest-occuring action. Traversal is in pre-order, visiting the
|
|
|
|
// inputs to each action before printing the action itself.
|
2009-09-09 07:36:43 +08:00
|
|
|
static unsigned PrintActions1(const Compilation &C, Action *A,
|
2015-06-26 23:47:46 +08:00
|
|
|
std::map<Action *, unsigned> &Ids) {
|
2015-06-11 23:05:22 +08:00
|
|
|
if (Ids.count(A)) // A was already visited.
|
2009-03-13 20:19:02 +08:00
|
|
|
return Ids[A];
|
2009-09-09 07:36:43 +08:00
|
|
|
|
2009-03-13 20:19:02 +08:00
|
|
|
std::string str;
|
|
|
|
llvm::raw_string_ostream os(str);
|
2009-09-09 07:36:43 +08:00
|
|
|
|
2009-03-13 20:19:02 +08:00
|
|
|
os << Action::getClassName(A->getKind()) << ", ";
|
2009-09-09 07:36:43 +08:00
|
|
|
if (InputAction *IA = dyn_cast<InputAction>(A)) {
|
2012-11-01 12:30:05 +08:00
|
|
|
os << "\"" << IA->getInputArg().getValue() << "\"";
|
2009-03-13 20:19:02 +08:00
|
|
|
} else if (BindArchAction *BIA = dyn_cast<BindArchAction>(A)) {
|
2015-06-26 03:37:41 +08:00
|
|
|
os << '"' << BIA->getArchName() << '"' << ", {"
|
|
|
|
<< PrintActions1(C, *BIA->begin(), Ids) << "}";
|
2015-07-14 07:27:56 +08:00
|
|
|
} else if (CudaDeviceAction *CDA = dyn_cast<CudaDeviceAction>(A)) {
|
|
|
|
os << '"' << CDA->getGpuArchName() << '"' << ", {"
|
|
|
|
<< PrintActions1(C, *CDA->begin(), Ids) << "}";
|
2009-03-13 20:19:02 +08:00
|
|
|
} else {
|
2015-07-14 07:27:56 +08:00
|
|
|
ActionList *AL;
|
|
|
|
if (CudaHostAction *CHA = dyn_cast<CudaHostAction>(A)) {
|
|
|
|
os << "{" << PrintActions1(C, *CHA->begin(), Ids) << "}"
|
|
|
|
<< ", gpu binaries ";
|
|
|
|
AL = &CHA->getDeviceActions();
|
|
|
|
} else
|
|
|
|
AL = &A->getInputs();
|
|
|
|
|
2015-06-26 03:37:41 +08:00
|
|
|
const char *Prefix = "{";
|
2015-07-14 07:27:56 +08:00
|
|
|
for (Action *PreRequisite : *AL) {
|
2015-06-26 03:37:41 +08:00
|
|
|
os << Prefix << PrintActions1(C, PreRequisite, Ids);
|
|
|
|
Prefix = ", ";
|
2009-03-13 20:19:02 +08:00
|
|
|
}
|
|
|
|
os << "}";
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned Id = Ids.size();
|
|
|
|
Ids[A] = Id;
|
2009-09-09 07:36:43 +08:00
|
|
|
llvm::errs() << Id << ": " << os.str() << ", "
|
2009-03-13 20:19:02 +08:00
|
|
|
<< types::getTypeName(A->getType()) << "\n";
|
|
|
|
|
|
|
|
return Id;
|
|
|
|
}
|
|
|
|
|
2015-06-11 23:05:22 +08:00
|
|
|
// Print the action graphs in a compilation C.
|
|
|
|
// For example "clang -c file1.c file2.c" is composed of two subgraphs.
|
2009-03-18 11:13:20 +08:00
|
|
|
void Driver::PrintActions(const Compilation &C) const {
|
2015-06-26 03:37:41 +08:00
|
|
|
std::map<Action *, unsigned> Ids;
|
|
|
|
for (Action *A : C.getActions())
|
|
|
|
PrintActions1(C, A, Ids);
|
2009-03-12 15:58:46 +08:00
|
|
|
}
|
|
|
|
|
2011-05-06 22:05:11 +08:00
|
|
|
/// \brief Check whether the given input tree contains any compilation or
|
|
|
|
/// assembly actions.
|
|
|
|
static bool ContainsCompileOrAssembleAction(const Action *A) {
|
2015-06-26 23:47:46 +08:00
|
|
|
if (isa<CompileJobAction>(A) || isa<BackendJobAction>(A) ||
|
Reapply "Change -save-temps to emit unoptimized bitcode files."
This reapplies r224503 along with a fix for compiling Fortran by having the
clang driver invoke gcc (see r224546, where it was reverted). I have added
a testcase for that as well.
Original commit message:
It is often convenient to use -save-temps to collect the intermediate
results of a compilation, e.g., when triaging a bug report. Besides the
temporary files for preprocessed source and assembly code, this adds the
unoptimized bitcode files as well.
This adds a new BackendJobAction, which is mostly mechanical, to run after
the CompileJobAction. When not using -save-temps, the BackendJobAction is
combined into one job with the CompileJobAction, similar to the way the
integrated assembler is handled. I've implemented this entirely as a
driver change, so under the hood, it is just using -disable-llvm-optzns
to get the unoptimized bitcode.
Based in part on a patch by Steven Wu.
rdar://problem/18909437
llvm-svn: 224688
2014-12-21 15:00:00 +08:00
|
|
|
isa<AssembleJobAction>(A))
|
2010-06-30 00:38:33 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
for (Action::const_iterator it = A->begin(), ie = A->end(); it != ie; ++it)
|
2011-05-06 22:05:11 +08:00
|
|
|
if (ContainsCompileOrAssembleAction(*it))
|
2010-06-30 00:38:33 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-06-26 23:47:46 +08:00
|
|
|
void Driver::BuildUniversalActions(const ToolChain &TC, DerivedArgList &Args,
|
2011-08-13 06:08:57 +08:00
|
|
|
const InputList &BAInputs,
|
2009-03-18 10:55:38 +08:00
|
|
|
ActionList &Actions) const {
|
2009-09-09 07:36:43 +08:00
|
|
|
llvm::PrettyStackTraceString CrashInfo("Building universal build actions");
|
|
|
|
// Collect the list of architectures. Duplicates are allowed, but should only
|
|
|
|
// be handled once (in the order seen).
|
2009-03-14 04:33:35 +08:00
|
|
|
llvm::StringSet<> ArchNames;
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<const char *, 4> Archs;
|
2014-12-30 03:01:36 +08:00
|
|
|
for (Arg *A : Args) {
|
2009-11-19 11:26:40 +08:00
|
|
|
if (A->getOption().matches(options::OPT_arch)) {
|
2009-09-09 07:37:30 +08:00
|
|
|
// Validate the option here; we don't save the type here because its
|
|
|
|
// particular spelling may participate in other driver choices.
|
|
|
|
llvm::Triple::ArchType Arch =
|
2015-06-26 23:47:46 +08:00
|
|
|
tools::darwin::getArchTypeForMachOArchName(A->getValue());
|
2009-09-09 07:37:30 +08:00
|
|
|
if (Arch == llvm::Triple::UnknownArch) {
|
2015-06-26 23:47:46 +08:00
|
|
|
Diag(clang::diag::err_drv_invalid_arch_name) << A->getAsString(Args);
|
2009-09-09 07:37:30 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-03-19 15:55:12 +08:00
|
|
|
A->claim();
|
2014-11-19 10:56:13 +08:00
|
|
|
if (ArchNames.insert(A->getValue()).second)
|
2012-11-01 12:30:05 +08:00
|
|
|
Archs.push_back(A->getValue());
|
2009-03-13 02:40:18 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-09-09 07:36:43 +08:00
|
|
|
// When there is no explicit arch for this platform, make sure we still bind
|
|
|
|
// the architecture (to the default) so that -Xarch_ is handled correctly.
|
2009-03-18 11:13:20 +08:00
|
|
|
if (!Archs.size())
|
2012-11-08 11:38:26 +08:00
|
|
|
Archs.push_back(Args.MakeArgString(TC.getDefaultUniversalArchName()));
|
2009-03-13 02:40:18 +08:00
|
|
|
|
|
|
|
ActionList SingleActions;
|
2011-08-13 06:08:57 +08:00
|
|
|
BuildActions(TC, Args, BAInputs, SingleActions);
|
2009-03-13 02:40:18 +08:00
|
|
|
|
2010-06-05 02:28:41 +08:00
|
|
|
// Add in arch bindings for every top level action, as well as lipo and
|
|
|
|
// dsymutil steps if needed.
|
2009-03-13 02:40:18 +08:00
|
|
|
for (unsigned i = 0, e = SingleActions.size(); i != e; ++i) {
|
|
|
|
Action *Act = SingleActions[i];
|
|
|
|
|
2009-09-09 07:36:43 +08:00
|
|
|
// Make sure we can lipo this kind of output. If not (and it is an actual
|
|
|
|
// output) then we disallow, since we can't create an output file with the
|
|
|
|
// right name without overwriting it. We could remove this oddity by just
|
|
|
|
// changing the output names to include the arch, which would also fix
|
2009-03-13 02:40:18 +08:00
|
|
|
// -save-temps. Compatibility wins for now.
|
|
|
|
|
2009-03-14 01:46:02 +08:00
|
|
|
if (Archs.size() > 1 && !types::canLipoType(Act->getType()))
|
2009-03-13 02:40:18 +08:00
|
|
|
Diag(clang::diag::err_drv_invalid_output_with_multiple_archs)
|
2015-06-26 23:47:46 +08:00
|
|
|
<< types::getTypeName(Act->getType());
|
2009-03-13 02:40:18 +08:00
|
|
|
|
|
|
|
ActionList Inputs;
|
2010-03-12 02:04:58 +08:00
|
|
|
for (unsigned i = 0, e = Archs.size(); i != e; ++i) {
|
2014-08-29 15:25:23 +08:00
|
|
|
Inputs.push_back(
|
|
|
|
new BindArchAction(std::unique_ptr<Action>(Act), Archs[i]));
|
2010-03-12 02:04:58 +08:00
|
|
|
if (i != 0)
|
|
|
|
Inputs.back()->setOwnsInputs(false);
|
|
|
|
}
|
2009-03-13 02:40:18 +08:00
|
|
|
|
2009-09-09 07:36:43 +08:00
|
|
|
// Lipo if necessary, we do it this way because we need to set the arch flag
|
|
|
|
// so that -Xarch_ gets overwritten.
|
2009-03-13 02:40:18 +08:00
|
|
|
if (Inputs.size() == 1 || Act->getType() == types::TY_Nothing)
|
|
|
|
Actions.append(Inputs.begin(), Inputs.end());
|
|
|
|
else
|
|
|
|
Actions.push_back(new LipoJobAction(Inputs, Act->getType()));
|
2010-06-05 02:28:41 +08:00
|
|
|
|
2012-02-07 03:43:51 +08:00
|
|
|
// Handle debug info queries.
|
|
|
|
Arg *A = Args.getLastArg(options::OPT_g_Group);
|
2012-04-16 05:22:10 +08:00
|
|
|
if (A && !A->getOption().matches(options::OPT_g0) &&
|
|
|
|
!A->getOption().matches(options::OPT_gstabs) &&
|
|
|
|
ContainsCompileOrAssembleAction(Actions.back())) {
|
2012-07-10 01:31:28 +08:00
|
|
|
|
2012-04-16 05:22:10 +08:00
|
|
|
// Add a 'dsymutil' step if necessary, when debug info is enabled and we
|
|
|
|
// have a compile input. We need to run 'dsymutil' ourselves in such cases
|
2013-01-29 01:39:03 +08:00
|
|
|
// because the debug info will refer to a temporary object file which
|
2012-04-16 05:22:10 +08:00
|
|
|
// will be removed at the end of the compilation process.
|
|
|
|
if (Act->getType() == types::TY_Image) {
|
|
|
|
ActionList Inputs;
|
|
|
|
Inputs.push_back(Actions.back());
|
|
|
|
Actions.pop_back();
|
|
|
|
Actions.push_back(new DsymutilJobAction(Inputs, types::TY_dSYM));
|
2010-06-05 02:28:41 +08:00
|
|
|
}
|
2012-04-16 05:22:10 +08:00
|
|
|
|
2014-02-07 02:53:25 +08:00
|
|
|
// Verify the debug info output.
|
2014-01-17 10:06:23 +08:00
|
|
|
if (Args.hasArg(options::OPT_verify_debug_info)) {
|
2014-08-29 15:25:23 +08:00
|
|
|
std::unique_ptr<Action> VerifyInput(Actions.back());
|
2012-04-16 05:22:10 +08:00
|
|
|
Actions.pop_back();
|
2014-08-29 15:25:23 +08:00
|
|
|
Actions.push_back(new VerifyDebugInfoJobAction(std::move(VerifyInput),
|
2014-02-07 02:53:25 +08:00
|
|
|
types::TY_Nothing));
|
2012-04-16 05:22:10 +08:00
|
|
|
}
|
|
|
|
}
|
2009-03-13 02:40:18 +08:00
|
|
|
}
|
2009-03-12 15:58:46 +08:00
|
|
|
}
|
|
|
|
|
2013-08-06 08:20:31 +08:00
|
|
|
/// \brief Check that the file referenced by Value exists. If it doesn't,
|
|
|
|
/// issue a diagnostic and return false.
|
2013-12-03 14:53:35 +08:00
|
|
|
static bool DiagnoseInputExistence(const Driver &D, const DerivedArgList &Args,
|
2013-08-06 08:20:31 +08:00
|
|
|
StringRef Value) {
|
|
|
|
if (!D.getCheckInputsExist())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// stdin always exists.
|
|
|
|
if (Value == "-")
|
|
|
|
return true;
|
|
|
|
|
|
|
|
SmallString<64> Path(Value);
|
|
|
|
if (Arg *WorkDir = Args.getLastArg(options::OPT_working_directory)) {
|
2015-03-18 18:17:07 +08:00
|
|
|
if (!llvm::sys::path::is_absolute(Path)) {
|
2013-08-06 08:20:31 +08:00
|
|
|
SmallString<64> Directory(WorkDir->getValue());
|
|
|
|
llvm::sys::path::append(Directory, Value);
|
|
|
|
Path.assign(Directory);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (llvm::sys::fs::exists(Twine(Path)))
|
|
|
|
return true;
|
|
|
|
|
2015-04-25 06:16:53 +08:00
|
|
|
if (D.IsCLMode() && !llvm::sys::path::is_absolute(Twine(Path)) &&
|
|
|
|
llvm::sys::Process::FindInEnvPath("LIB", Value))
|
2014-06-19 01:21:50 +08:00
|
|
|
return true;
|
|
|
|
|
2015-03-18 18:17:07 +08:00
|
|
|
D.Diag(clang::diag::err_drv_no_such_file) << Path;
|
2013-08-06 08:20:31 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-08-13 06:08:57 +08:00
|
|
|
// Construct a the list of inputs and their types.
|
2014-05-03 06:55:30 +08:00
|
|
|
void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
|
2011-08-13 06:08:57 +08:00
|
|
|
InputList &Inputs) const {
|
2009-09-09 07:36:43 +08:00
|
|
|
// Track the current user specified (-x) input. We also explicitly track the
|
|
|
|
// argument used to set the type; we only want to claim the type when we
|
|
|
|
// actually use it, so we warn about unused -x arguments.
|
2009-03-14 01:57:10 +08:00
|
|
|
types::ID InputType = types::TY_Nothing;
|
2014-05-18 00:56:41 +08:00
|
|
|
Arg *InputTypeArg = nullptr;
|
2009-03-14 01:57:10 +08:00
|
|
|
|
2013-08-13 02:34:17 +08:00
|
|
|
// The last /TC or /TP option sets the input type to C or C++ globally.
|
2014-09-13 05:44:24 +08:00
|
|
|
if (Arg *TCTP = Args.getLastArgNoClaim(options::OPT__SLASH_TC,
|
|
|
|
options::OPT__SLASH_TP)) {
|
2013-08-06 08:20:31 +08:00
|
|
|
InputTypeArg = TCTP;
|
2013-08-13 02:34:17 +08:00
|
|
|
InputType = TCTP->getOption().matches(options::OPT__SLASH_TC)
|
2015-06-26 23:47:46 +08:00
|
|
|
? types::TY_C
|
|
|
|
: types::TY_CXX;
|
2013-08-13 02:34:17 +08:00
|
|
|
|
2015-06-26 23:47:46 +08:00
|
|
|
arg_iterator it =
|
|
|
|
Args.filtered_begin(options::OPT__SLASH_TC, options::OPT__SLASH_TP);
|
2013-08-13 02:34:17 +08:00
|
|
|
const arg_iterator ie = Args.filtered_end();
|
|
|
|
Arg *Previous = *it++;
|
|
|
|
bool ShowNote = false;
|
|
|
|
while (it != ie) {
|
2013-09-12 00:38:41 +08:00
|
|
|
Diag(clang::diag::warn_drv_overriding_flag_option)
|
|
|
|
<< Previous->getSpelling() << (*it)->getSpelling();
|
2013-08-13 02:34:17 +08:00
|
|
|
Previous = *it++;
|
|
|
|
ShowNote = true;
|
2013-08-06 08:20:31 +08:00
|
|
|
}
|
2013-08-13 02:34:17 +08:00
|
|
|
if (ShowNote)
|
|
|
|
Diag(clang::diag::note_drv_t_option_is_global);
|
2013-08-06 08:20:31 +08:00
|
|
|
|
|
|
|
// No driver mode exposes -x and /TC or /TP; we don't support mixing them.
|
|
|
|
assert(!Args.hasArg(options::OPT_x) && "-x and /TC or /TP is not allowed");
|
|
|
|
}
|
|
|
|
|
2014-12-30 03:01:36 +08:00
|
|
|
for (Arg *A : Args) {
|
2012-08-21 05:41:17 +08:00
|
|
|
if (A->getOption().getKind() == Option::InputClass) {
|
2012-11-01 12:30:05 +08:00
|
|
|
const char *Value = A->getValue();
|
2009-03-12 15:58:46 +08:00
|
|
|
types::ID Ty = types::TY_INVALID;
|
|
|
|
|
|
|
|
// Infer the input type if necessary.
|
2009-03-14 01:57:10 +08:00
|
|
|
if (InputType == types::TY_Nothing) {
|
|
|
|
// If there was an explicit arg for this, claim it.
|
|
|
|
if (InputTypeArg)
|
|
|
|
InputTypeArg->claim();
|
|
|
|
|
2009-03-12 15:58:46 +08:00
|
|
|
// stdin must be handled specially.
|
|
|
|
if (memcmp(Value, "-", 2) == 0) {
|
2009-09-09 07:36:43 +08:00
|
|
|
// If running with -E, treat as a C input (this changes the builtin
|
|
|
|
// macros, for example). This may be overridden by -ObjC below.
|
2009-03-12 15:58:46 +08:00
|
|
|
//
|
2009-09-09 07:36:43 +08:00
|
|
|
// Otherwise emit an error but still use a valid type to avoid
|
|
|
|
// spurious errors (e.g., no inputs).
|
2013-07-19 04:29:38 +08:00
|
|
|
if (!Args.hasArgNoClaim(options::OPT_E) && !CCCIsCPP())
|
2014-01-29 09:04:40 +08:00
|
|
|
Diag(IsCLMode() ? clang::diag::err_drv_unknown_stdin_type_clang_cl
|
|
|
|
: clang::diag::err_drv_unknown_stdin_type);
|
2009-03-12 15:58:46 +08:00
|
|
|
Ty = types::TY_C;
|
|
|
|
} else {
|
2011-03-17 06:45:02 +08:00
|
|
|
// Otherwise lookup by extension.
|
|
|
|
// Fallback is C if invoked as C preprocessor or Object otherwise.
|
|
|
|
// We use a host hook here because Darwin at least has its own
|
2009-09-09 07:36:43 +08:00
|
|
|
// idea of what .s is.
|
2009-03-12 15:58:46 +08:00
|
|
|
if (const char *Ext = strrchr(Value, '.'))
|
2010-08-02 13:43:56 +08:00
|
|
|
Ty = TC.LookupTypeForExtension(Ext + 1);
|
2009-03-21 07:39:23 +08:00
|
|
|
|
2011-03-17 06:45:02 +08:00
|
|
|
if (Ty == types::TY_INVALID) {
|
2013-07-19 04:29:38 +08:00
|
|
|
if (CCCIsCPP())
|
2011-03-17 06:45:02 +08:00
|
|
|
Ty = types::TY_C;
|
|
|
|
else
|
|
|
|
Ty = types::TY_Object;
|
|
|
|
}
|
2010-02-18 04:32:58 +08:00
|
|
|
|
|
|
|
// If the driver is invoked as C++ compiler (like clang++ or c++) it
|
|
|
|
// should autodetect some input files as C++ for g++ compatibility.
|
2013-07-19 04:29:38 +08:00
|
|
|
if (CCCIsCXX()) {
|
2010-02-18 04:32:58 +08:00
|
|
|
types::ID OldTy = Ty;
|
|
|
|
Ty = types::lookupCXXTypeForCType(Ty);
|
|
|
|
|
|
|
|
if (Ty != OldTy)
|
|
|
|
Diag(clang::diag::warn_drv_treating_input_as_cxx)
|
2015-06-26 23:47:46 +08:00
|
|
|
<< getTypeName(OldTy) << getTypeName(Ty);
|
2010-02-18 04:32:58 +08:00
|
|
|
}
|
2009-03-12 15:58:46 +08:00
|
|
|
}
|
|
|
|
|
2009-05-19 05:47:54 +08:00
|
|
|
// -ObjC and -ObjC++ override the default language, but only for "source
|
|
|
|
// files". We just treat everything that isn't a linker input as a
|
|
|
|
// source file.
|
2009-09-09 07:36:43 +08:00
|
|
|
//
|
2009-05-19 05:47:54 +08:00
|
|
|
// FIXME: Clean this up if we move the phase sequence into the type.
|
2009-03-12 15:58:46 +08:00
|
|
|
if (Ty != types::TY_Object) {
|
|
|
|
if (Args.hasArg(options::OPT_ObjC))
|
|
|
|
Ty = types::TY_ObjC;
|
|
|
|
else if (Args.hasArg(options::OPT_ObjCXX))
|
|
|
|
Ty = types::TY_ObjCXX;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert(InputTypeArg && "InputType set w/o InputTypeArg");
|
2014-09-13 02:15:10 +08:00
|
|
|
if (!InputTypeArg->getOption().matches(options::OPT_x)) {
|
|
|
|
// If emulating cl.exe, make sure that /TC and /TP don't affect input
|
|
|
|
// object files.
|
|
|
|
const char *Ext = strrchr(Value, '.');
|
|
|
|
if (Ext && TC.LookupTypeForExtension(Ext + 1) == types::TY_Object)
|
|
|
|
Ty = types::TY_Object;
|
|
|
|
}
|
|
|
|
if (Ty == types::TY_INVALID) {
|
|
|
|
Ty = InputType;
|
|
|
|
InputTypeArg->claim();
|
|
|
|
}
|
2009-03-12 15:58:46 +08:00
|
|
|
}
|
|
|
|
|
2013-12-03 14:53:35 +08:00
|
|
|
if (DiagnoseInputExistence(*this, Args, Value))
|
2009-03-12 15:58:46 +08:00
|
|
|
Inputs.push_back(std::make_pair(Ty, A));
|
|
|
|
|
2013-08-06 08:20:31 +08:00
|
|
|
} else if (A->getOption().matches(options::OPT__SLASH_Tc)) {
|
|
|
|
StringRef Value = A->getValue();
|
2013-12-03 14:53:35 +08:00
|
|
|
if (DiagnoseInputExistence(*this, Args, Value)) {
|
2013-08-06 08:20:31 +08:00
|
|
|
Arg *InputArg = MakeInputArg(Args, Opts, A->getValue());
|
|
|
|
Inputs.push_back(std::make_pair(types::TY_C, InputArg));
|
|
|
|
}
|
|
|
|
A->claim();
|
|
|
|
} else if (A->getOption().matches(options::OPT__SLASH_Tp)) {
|
|
|
|
StringRef Value = A->getValue();
|
2013-12-03 14:53:35 +08:00
|
|
|
if (DiagnoseInputExistence(*this, Args, Value)) {
|
2013-08-06 08:20:31 +08:00
|
|
|
Arg *InputArg = MakeInputArg(Args, Opts, A->getValue());
|
|
|
|
Inputs.push_back(std::make_pair(types::TY_CXX, InputArg));
|
|
|
|
}
|
|
|
|
A->claim();
|
2012-10-20 06:37:06 +08:00
|
|
|
} else if (A->getOption().hasFlag(options::LinkerInput)) {
|
2009-09-09 07:36:43 +08:00
|
|
|
// Just treat as object type, we could make a special type for this if
|
|
|
|
// necessary.
|
2009-03-12 15:58:46 +08:00
|
|
|
Inputs.push_back(std::make_pair(types::TY_Object, A));
|
|
|
|
|
2009-11-19 11:26:40 +08:00
|
|
|
} else if (A->getOption().matches(options::OPT_x)) {
|
2009-09-09 07:36:43 +08:00
|
|
|
InputTypeArg = A;
|
2012-11-01 12:30:05 +08:00
|
|
|
InputType = types::lookupTypeForTypeSpecifier(A->getValue());
|
2012-04-07 08:01:31 +08:00
|
|
|
A->claim();
|
2009-03-12 15:58:46 +08:00
|
|
|
|
|
|
|
// Follow gcc behavior and treat as linker input for invalid -x
|
2009-09-09 07:36:43 +08:00
|
|
|
// options. Its not clear why we shouldn't just revert to unknown; but
|
2010-12-18 05:22:33 +08:00
|
|
|
// this isn't very important, we might as well be bug compatible.
|
2009-03-12 15:58:46 +08:00
|
|
|
if (!InputType) {
|
2012-11-01 12:30:05 +08:00
|
|
|
Diag(clang::diag::err_drv_unknown_language) << A->getValue();
|
2009-03-12 15:58:46 +08:00
|
|
|
InputType = types::TY_Object;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-07-19 04:29:38 +08:00
|
|
|
if (CCCIsCPP() && Inputs.empty()) {
|
2011-03-07 07:31:01 +08:00
|
|
|
// If called as standalone preprocessor, stdin is processed
|
|
|
|
// if no other input is present.
|
2013-08-06 08:20:31 +08:00
|
|
|
Arg *A = MakeInputArg(Args, Opts, "-");
|
2011-03-07 07:31:01 +08:00
|
|
|
Inputs.push_back(std::make_pair(types::TY_C, A));
|
|
|
|
}
|
2011-08-13 06:08:57 +08:00
|
|
|
}
|
|
|
|
|
2015-07-14 07:27:56 +08:00
|
|
|
// For each unique --cuda-gpu-arch= argument creates a TY_CUDA_DEVICE input
|
|
|
|
// action and then wraps each in CudaDeviceAction paired with appropriate GPU
|
|
|
|
// arch name. If we're only building device-side code, each action remains
|
|
|
|
// independent. Otherwise we pass device-side actions as inputs to a new
|
|
|
|
// CudaHostAction which combines both host and device side actions.
|
|
|
|
static std::unique_ptr<Action>
|
|
|
|
buildCudaActions(const Driver &D, const ToolChain &TC, DerivedArgList &Args,
|
2015-07-21 05:59:31 +08:00
|
|
|
const Arg *InputArg, std::unique_ptr<Action> HostAction,
|
|
|
|
ActionList &Actions) {
|
2015-07-14 07:27:56 +08:00
|
|
|
|
|
|
|
// Collect all cuda_gpu_arch parameters, removing duplicates.
|
|
|
|
SmallVector<const char *, 4> GpuArchList;
|
|
|
|
llvm::StringSet<> GpuArchNames;
|
|
|
|
for (Arg *A : Args) {
|
|
|
|
if (A->getOption().matches(options::OPT_cuda_gpu_arch_EQ)) {
|
|
|
|
A->claim();
|
|
|
|
if (GpuArchNames.insert(A->getValue()).second)
|
|
|
|
GpuArchList.push_back(A->getValue());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Default to sm_20 which is the lowest common denominator for supported GPUs.
|
|
|
|
// sm_20 code should work correctly, if suboptimally, on all newer GPUs.
|
|
|
|
if (GpuArchList.empty())
|
|
|
|
GpuArchList.push_back("sm_20");
|
|
|
|
|
|
|
|
// Replicate inputs for each GPU architecture.
|
|
|
|
Driver::InputList CudaDeviceInputs;
|
2015-07-21 04:02:54 +08:00
|
|
|
for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I)
|
2015-07-14 07:27:56 +08:00
|
|
|
CudaDeviceInputs.push_back(std::make_pair(types::TY_CUDA_DEVICE, InputArg));
|
|
|
|
|
|
|
|
// Build actions for all device inputs.
|
|
|
|
ActionList CudaDeviceActions;
|
|
|
|
D.BuildActions(TC, Args, CudaDeviceInputs, CudaDeviceActions);
|
|
|
|
assert(GpuArchList.size() == CudaDeviceActions.size() &&
|
|
|
|
"Failed to create actions for all devices");
|
|
|
|
|
|
|
|
// Check whether any of device actions stopped before they could generate PTX.
|
|
|
|
bool PartialCompilation = false;
|
2015-07-21 04:02:54 +08:00
|
|
|
for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
|
|
|
|
if (CudaDeviceActions[I]->getKind() != Action::BackendJobClass) {
|
2015-07-14 07:27:56 +08:00
|
|
|
PartialCompilation = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-21 05:59:31 +08:00
|
|
|
// Figure out which NVPTX triple to use for device-side compilation based on
|
|
|
|
// whether host is 64-bit.
|
|
|
|
const char *DeviceTriple = TC.getTriple().isArch64Bit()
|
|
|
|
? "nvptx64-nvidia-cuda"
|
|
|
|
: "nvptx-nvidia-cuda";
|
|
|
|
|
2015-07-14 07:27:56 +08:00
|
|
|
// Figure out what to do with device actions -- pass them as inputs to the
|
|
|
|
// host action or run each of them independently.
|
2015-07-21 04:02:54 +08:00
|
|
|
bool DeviceOnlyCompilation = Args.hasArg(options::OPT_cuda_device_only);
|
2015-07-14 07:27:56 +08:00
|
|
|
if (PartialCompilation || DeviceOnlyCompilation) {
|
|
|
|
// In case of partial or device-only compilation results of device actions
|
|
|
|
// are not consumed by the host action device actions have to be added to
|
|
|
|
// top-level actions list with AtTopLevel=true and run independently.
|
|
|
|
|
|
|
|
// -o is ambiguous if we have more than one top-level action.
|
|
|
|
if (Args.hasArg(options::OPT_o) &&
|
|
|
|
(!DeviceOnlyCompilation || GpuArchList.size() > 1)) {
|
|
|
|
D.Diag(clang::diag::err_drv_output_argument_with_multiple_files);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2015-07-21 04:02:54 +08:00
|
|
|
for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I)
|
2015-07-21 05:59:31 +08:00
|
|
|
Actions.push_back(new CudaDeviceAction(
|
|
|
|
std::unique_ptr<Action>(CudaDeviceActions[I]), GpuArchList[I],
|
|
|
|
DeviceTriple, /* AtTopLevel */ true));
|
2015-07-14 07:27:56 +08:00
|
|
|
// Kill host action in case of device-only compilation.
|
|
|
|
if (DeviceOnlyCompilation)
|
2015-07-21 05:59:31 +08:00
|
|
|
HostAction.reset(nullptr);
|
|
|
|
return HostAction;
|
2015-07-14 07:27:56 +08:00
|
|
|
}
|
2015-07-21 04:02:54 +08:00
|
|
|
|
|
|
|
// Outputs of device actions during complete CUDA compilation get created
|
|
|
|
// with AtTopLevel=false and become inputs for the host action.
|
|
|
|
ActionList DeviceActions;
|
|
|
|
for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I)
|
2015-07-21 05:59:31 +08:00
|
|
|
DeviceActions.push_back(new CudaDeviceAction(
|
|
|
|
std::unique_ptr<Action>(CudaDeviceActions[I]), GpuArchList[I],
|
|
|
|
DeviceTriple, /* AtTopLevel */ false));
|
2015-07-21 04:02:54 +08:00
|
|
|
// Return a new host action that incorporates original host action and all
|
|
|
|
// device actions.
|
|
|
|
return std::unique_ptr<Action>(
|
2015-07-21 05:59:31 +08:00
|
|
|
new CudaHostAction(std::move(HostAction), DeviceActions));
|
2015-07-14 07:27:56 +08:00
|
|
|
}
|
|
|
|
|
2013-08-13 07:26:25 +08:00
|
|
|
void Driver::BuildActions(const ToolChain &TC, DerivedArgList &Args,
|
2011-08-13 06:08:57 +08:00
|
|
|
const InputList &Inputs, ActionList &Actions) const {
|
|
|
|
llvm::PrettyStackTraceString CrashInfo("Building compilation actions");
|
2011-03-07 07:31:01 +08:00
|
|
|
|
2009-03-13 08:17:48 +08:00
|
|
|
if (!SuppressMissingInputWarning && Inputs.empty()) {
|
2009-03-13 07:55:14 +08:00
|
|
|
Diag(clang::diag::err_drv_no_input_files);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-07-28 07:36:45 +08:00
|
|
|
Arg *FinalPhaseArg;
|
|
|
|
phases::ID FinalPhase = getFinalPhase(Args, &FinalPhaseArg);
|
2009-03-13 07:55:14 +08:00
|
|
|
|
2013-08-25 22:27:09 +08:00
|
|
|
if (FinalPhase == phases::Link && Args.hasArg(options::OPT_emit_llvm)) {
|
|
|
|
Diag(clang::diag::err_drv_emit_llvm_link);
|
|
|
|
}
|
|
|
|
|
2009-09-09 07:36:43 +08:00
|
|
|
// Reject -Z* at the top level, these options should never have been exposed
|
|
|
|
// by gcc.
|
2009-03-27 00:12:09 +08:00
|
|
|
if (Arg *A = Args.getLastArg(options::OPT_Z_Joined))
|
2009-03-20 14:14:23 +08:00
|
|
|
Diag(clang::diag::err_drv_use_of_Z_option) << A->getAsString(Args);
|
2009-03-13 07:55:14 +08:00
|
|
|
|
2013-08-13 07:26:25 +08:00
|
|
|
// Diagnose misuse of /Fo.
|
|
|
|
if (Arg *A = Args.getLastArg(options::OPT__SLASH_Fo)) {
|
|
|
|
StringRef V = A->getValue();
|
2014-11-18 03:16:36 +08:00
|
|
|
if (Inputs.size() > 1 && !V.empty() &&
|
|
|
|
!llvm::sys::path::is_separator(V.back())) {
|
2013-08-13 07:26:25 +08:00
|
|
|
// Check whether /Fo tries to name an output file for multiple inputs.
|
2013-10-19 06:49:04 +08:00
|
|
|
Diag(clang::diag::err_drv_out_file_argument_with_multiple_sources)
|
2015-06-26 23:47:46 +08:00
|
|
|
<< A->getSpelling() << V;
|
2013-08-13 07:26:25 +08:00
|
|
|
Args.eraseArg(options::OPT__SLASH_Fo);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-19 06:49:04 +08:00
|
|
|
// Diagnose misuse of /Fa.
|
|
|
|
if (Arg *A = Args.getLastArg(options::OPT__SLASH_Fa)) {
|
|
|
|
StringRef V = A->getValue();
|
2014-11-18 03:16:36 +08:00
|
|
|
if (Inputs.size() > 1 && !V.empty() &&
|
|
|
|
!llvm::sys::path::is_separator(V.back())) {
|
2013-10-19 06:49:04 +08:00
|
|
|
// Check whether /Fa tries to name an asm file for multiple inputs.
|
|
|
|
Diag(clang::diag::err_drv_out_file_argument_with_multiple_sources)
|
2015-06-26 23:47:46 +08:00
|
|
|
<< A->getSpelling() << V;
|
2013-10-19 06:49:04 +08:00
|
|
|
Args.eraseArg(options::OPT__SLASH_Fa);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-12 02:16:21 +08:00
|
|
|
// Diagnose misuse of /o.
|
|
|
|
if (Arg *A = Args.getLastArg(options::OPT__SLASH_o)) {
|
|
|
|
if (A->getValue()[0] == '\0') {
|
|
|
|
// It has to have a value.
|
|
|
|
Diag(clang::diag::err_drv_missing_argument) << A->getSpelling() << 1;
|
|
|
|
Args.eraseArg(options::OPT__SLASH_o);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-03-13 19:38:42 +08:00
|
|
|
// Construct the actions to perform.
|
|
|
|
ActionList LinkerInputs;
|
2013-11-27 13:22:15 +08:00
|
|
|
|
2013-03-07 20:32:26 +08:00
|
|
|
llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PL;
|
2015-07-21 04:02:54 +08:00
|
|
|
for (auto &I : Inputs) {
|
|
|
|
types::ID InputType = I.first;
|
|
|
|
const Arg *InputArg = I.second;
|
2009-03-13 19:38:42 +08:00
|
|
|
|
2013-03-07 20:32:26 +08:00
|
|
|
PL.clear();
|
|
|
|
types::getCompilationPhases(InputType, PL);
|
2009-03-13 19:38:42 +08:00
|
|
|
|
2009-09-09 07:36:43 +08:00
|
|
|
// If the first step comes after the final phase we are doing as part of
|
|
|
|
// this compilation, warn the user about it.
|
2013-03-07 20:32:26 +08:00
|
|
|
phases::ID InitialPhase = PL[0];
|
2009-03-13 19:38:42 +08:00
|
|
|
if (InitialPhase > FinalPhase) {
|
2009-03-19 15:57:08 +08:00
|
|
|
// Claim here to avoid the more general unused warning.
|
|
|
|
InputArg->claim();
|
2009-09-17 12:13:26 +08:00
|
|
|
|
2011-04-20 23:44:48 +08:00
|
|
|
// Suppress all unused style warnings with -Qunused-arguments
|
|
|
|
if (Args.hasArg(options::OPT_Qunused_arguments))
|
|
|
|
continue;
|
|
|
|
|
2012-08-06 12:09:06 +08:00
|
|
|
// Special case when final phase determined by binary name, rather than
|
|
|
|
// by a command-line argument with a corresponding Arg.
|
2013-07-19 04:29:38 +08:00
|
|
|
if (CCCIsCPP())
|
2012-08-06 12:09:06 +08:00
|
|
|
Diag(clang::diag::warn_drv_input_file_unused_by_cpp)
|
2015-06-26 23:47:46 +08:00
|
|
|
<< InputArg->getAsString(Args) << getPhaseName(InitialPhase);
|
2009-09-17 12:13:26 +08:00
|
|
|
// Special case '-E' warning on a previously preprocessed file to make
|
|
|
|
// more sense.
|
2012-08-06 12:09:06 +08:00
|
|
|
else if (InitialPhase == phases::Compile &&
|
|
|
|
FinalPhase == phases::Preprocess &&
|
|
|
|
getPreprocessedType(InputType) == types::TY_INVALID)
|
2009-09-17 12:13:26 +08:00
|
|
|
Diag(clang::diag::warn_drv_preprocessed_input_file_unused)
|
2015-06-26 23:47:46 +08:00
|
|
|
<< InputArg->getAsString(Args) << !!FinalPhaseArg
|
|
|
|
<< (FinalPhaseArg ? FinalPhaseArg->getOption().getName() : "");
|
2009-09-17 12:13:26 +08:00
|
|
|
else
|
|
|
|
Diag(clang::diag::warn_drv_input_file_unused)
|
2015-06-26 23:47:46 +08:00
|
|
|
<< InputArg->getAsString(Args) << getPhaseName(InitialPhase)
|
|
|
|
<< !!FinalPhaseArg
|
|
|
|
<< (FinalPhaseArg ? FinalPhaseArg->getOption().getName() : "");
|
2009-03-13 19:38:42 +08:00
|
|
|
continue;
|
|
|
|
}
|
2009-09-09 07:36:43 +08:00
|
|
|
|
2015-07-14 07:27:56 +08:00
|
|
|
phases::ID CudaInjectionPhase;
|
|
|
|
if (isSaveTempsEnabled()) {
|
|
|
|
// All phases are done independently, inject GPU blobs during compilation
|
|
|
|
// phase as that's where we generate glue code to init them.
|
|
|
|
CudaInjectionPhase = phases::Compile;
|
|
|
|
} else {
|
|
|
|
// Assumes that clang does everything up until linking phase, so we inject
|
|
|
|
// cuda device actions at the last step before linking. Otherwise CUDA
|
|
|
|
// host action forces preprocessor into a separate invocation.
|
2015-07-21 04:02:54 +08:00
|
|
|
CudaInjectionPhase = FinalPhase;
|
|
|
|
if (FinalPhase == phases::Link)
|
|
|
|
for (auto PI = PL.begin(), PE = PL.end(); PI != PE; ++PI) {
|
|
|
|
auto next = PI + 1;
|
|
|
|
if (next != PE && *next == phases::Link)
|
|
|
|
CudaInjectionPhase = *PI;
|
2015-07-14 07:27:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-03-13 19:38:42 +08:00
|
|
|
// Build the pipeline for this file.
|
2014-03-08 04:03:18 +08:00
|
|
|
std::unique_ptr<Action> Current(new InputAction(*InputArg, InputType));
|
2015-06-26 23:47:46 +08:00
|
|
|
for (SmallVectorImpl<phases::ID>::iterator i = PL.begin(), e = PL.end();
|
|
|
|
i != e; ++i) {
|
2013-03-07 20:32:26 +08:00
|
|
|
phases::ID Phase = *i;
|
2009-03-13 19:38:42 +08:00
|
|
|
|
|
|
|
// We are done if this step is past what the user requested.
|
|
|
|
if (Phase > FinalPhase)
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Queue linker inputs.
|
|
|
|
if (Phase == phases::Link) {
|
2013-03-07 20:32:26 +08:00
|
|
|
assert((i + 1) == e && "linking must be final compilation step.");
|
2014-03-08 03:33:25 +08:00
|
|
|
LinkerInputs.push_back(Current.release());
|
2009-03-13 19:38:42 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-09-09 07:36:43 +08:00
|
|
|
// Some types skip the assembler phase (e.g., llvm-bc), but we can't
|
|
|
|
// encode this in the steps because the intermediate type depends on
|
|
|
|
// arguments. Just special case here.
|
2009-03-25 04:17:30 +08:00
|
|
|
if (Phase == phases::Assemble && Current->getType() != types::TY_PP_Asm)
|
|
|
|
continue;
|
|
|
|
|
2009-03-13 19:38:42 +08:00
|
|
|
// Otherwise construct the appropriate action.
|
2015-02-21 04:30:56 +08:00
|
|
|
Current = ConstructPhaseAction(TC, Args, Phase, std::move(Current));
|
2015-07-14 07:27:56 +08:00
|
|
|
|
|
|
|
if (InputType == types::TY_CUDA && Phase == CudaInjectionPhase &&
|
|
|
|
!Args.hasArg(options::OPT_cuda_host_only)) {
|
2015-07-21 05:59:31 +08:00
|
|
|
Current = buildCudaActions(*this, TC, Args, InputArg,
|
2015-07-14 07:27:56 +08:00
|
|
|
std::move(Current), Actions);
|
|
|
|
if (!Current)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-03-13 19:38:42 +08:00
|
|
|
if (Current->getType() == types::TY_Nothing)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we ended with something, add to the output list.
|
|
|
|
if (Current)
|
2014-03-08 03:33:25 +08:00
|
|
|
Actions.push_back(Current.release());
|
2009-03-12 15:58:46 +08:00
|
|
|
}
|
2009-03-13 19:38:42 +08:00
|
|
|
|
|
|
|
// Add a link action if necessary.
|
|
|
|
if (!LinkerInputs.empty())
|
|
|
|
Actions.push_back(new LinkJobAction(LinkerInputs, types::TY_Image));
|
2009-12-23 07:19:32 +08:00
|
|
|
|
|
|
|
// If we are linking, claim any options which are obviously only used for
|
|
|
|
// compilation.
|
2013-09-17 08:03:41 +08:00
|
|
|
if (FinalPhase == phases::Link && PL.size() == 1) {
|
2009-12-23 07:19:32 +08:00
|
|
|
Args.ClaimAllArgs(options::OPT_CompileOnly_Group);
|
2013-09-17 08:03:41 +08:00
|
|
|
Args.ClaimAllArgs(options::OPT_cl_compile_Group);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Claim ignored clang-cl options.
|
|
|
|
Args.ClaimAllArgs(options::OPT_cl_ignored_Group);
|
2009-03-13 19:38:42 +08:00
|
|
|
}
|
|
|
|
|
2014-08-29 15:25:23 +08:00
|
|
|
std::unique_ptr<Action>
|
2015-02-21 04:30:56 +08:00
|
|
|
Driver::ConstructPhaseAction(const ToolChain &TC, const ArgList &Args,
|
|
|
|
phases::ID Phase,
|
2014-08-29 15:25:23 +08:00
|
|
|
std::unique_ptr<Action> Input) const {
|
2009-03-18 09:38:48 +08:00
|
|
|
llvm::PrettyStackTraceString CrashInfo("Constructing phase actions");
|
2009-03-13 19:38:42 +08:00
|
|
|
// Build the appropriate action.
|
|
|
|
switch (Phase) {
|
2015-06-26 23:47:46 +08:00
|
|
|
case phases::Link:
|
|
|
|
llvm_unreachable("link action invalid here.");
|
2009-03-13 19:38:42 +08:00
|
|
|
case phases::Preprocess: {
|
2009-03-30 14:36:42 +08:00
|
|
|
types::ID OutputTy;
|
|
|
|
// -{M, MM} alter the output type.
|
2010-12-09 05:33:40 +08:00
|
|
|
if (Args.hasArg(options::OPT_M, options::OPT_MM)) {
|
2009-03-30 14:36:42 +08:00
|
|
|
OutputTy = types::TY_Dependencies;
|
|
|
|
} else {
|
2012-06-30 06:03:56 +08:00
|
|
|
OutputTy = Input->getType();
|
|
|
|
if (!Args.hasFlag(options::OPT_frewrite_includes,
|
2014-06-24 16:01:01 +08:00
|
|
|
options::OPT_fno_rewrite_includes, false) &&
|
|
|
|
!CCGenDiagnostics)
|
2012-06-30 06:03:56 +08:00
|
|
|
OutputTy = types::getPreprocessedType(OutputTy);
|
2009-03-30 14:36:42 +08:00
|
|
|
assert(OutputTy != types::TY_INVALID &&
|
|
|
|
"Cannot preprocess this input type!");
|
|
|
|
}
|
2014-08-29 15:25:23 +08:00
|
|
|
return llvm::make_unique<PreprocessJobAction>(std::move(Input), OutputTy);
|
2009-03-13 19:38:42 +08:00
|
|
|
}
|
2012-07-31 09:21:00 +08:00
|
|
|
case phases::Precompile: {
|
|
|
|
types::ID OutputTy = types::TY_PCH;
|
|
|
|
if (Args.hasArg(options::OPT_fsyntax_only)) {
|
|
|
|
// Syntax checks should not emit a PCH file
|
|
|
|
OutputTy = types::TY_Nothing;
|
|
|
|
}
|
2014-08-29 15:25:23 +08:00
|
|
|
return llvm::make_unique<PrecompileJobAction>(std::move(Input), OutputTy);
|
2012-07-31 09:21:00 +08:00
|
|
|
}
|
2009-03-13 19:38:42 +08:00
|
|
|
case phases::Compile: {
|
2014-08-29 15:25:23 +08:00
|
|
|
if (Args.hasArg(options::OPT_fsyntax_only))
|
|
|
|
return llvm::make_unique<CompileJobAction>(std::move(Input),
|
|
|
|
types::TY_Nothing);
|
|
|
|
if (Args.hasArg(options::OPT_rewrite_objc))
|
|
|
|
return llvm::make_unique<CompileJobAction>(std::move(Input),
|
|
|
|
types::TY_RewrittenObjC);
|
|
|
|
if (Args.hasArg(options::OPT_rewrite_legacy_objc))
|
|
|
|
return llvm::make_unique<CompileJobAction>(std::move(Input),
|
|
|
|
types::TY_RewrittenLegacyObjC);
|
|
|
|
if (Args.hasArg(options::OPT__analyze, options::OPT__analyze_auto))
|
|
|
|
return llvm::make_unique<AnalyzeJobAction>(std::move(Input),
|
|
|
|
types::TY_Plist);
|
|
|
|
if (Args.hasArg(options::OPT__migrate))
|
|
|
|
return llvm::make_unique<MigrateJobAction>(std::move(Input),
|
|
|
|
types::TY_Remap);
|
|
|
|
if (Args.hasArg(options::OPT_emit_ast))
|
|
|
|
return llvm::make_unique<CompileJobAction>(std::move(Input),
|
|
|
|
types::TY_AST);
|
|
|
|
if (Args.hasArg(options::OPT_module_file_info))
|
|
|
|
return llvm::make_unique<CompileJobAction>(std::move(Input),
|
|
|
|
types::TY_ModuleFile);
|
|
|
|
if (Args.hasArg(options::OPT_verify_pch))
|
|
|
|
return llvm::make_unique<VerifyPCHJobAction>(std::move(Input),
|
|
|
|
types::TY_Nothing);
|
Reapply "Change -save-temps to emit unoptimized bitcode files."
This reapplies r224503 along with a fix for compiling Fortran by having the
clang driver invoke gcc (see r224546, where it was reverted). I have added
a testcase for that as well.
Original commit message:
It is often convenient to use -save-temps to collect the intermediate
results of a compilation, e.g., when triaging a bug report. Besides the
temporary files for preprocessed source and assembly code, this adds the
unoptimized bitcode files as well.
This adds a new BackendJobAction, which is mostly mechanical, to run after
the CompileJobAction. When not using -save-temps, the BackendJobAction is
combined into one job with the CompileJobAction, similar to the way the
integrated assembler is handled. I've implemented this entirely as a
driver change, so under the hood, it is just using -disable-llvm-optzns
to get the unoptimized bitcode.
Based in part on a patch by Steven Wu.
rdar://problem/18909437
llvm-svn: 224688
2014-12-21 15:00:00 +08:00
|
|
|
return llvm::make_unique<CompileJobAction>(std::move(Input),
|
|
|
|
types::TY_LLVM_BC);
|
|
|
|
}
|
|
|
|
case phases::Backend: {
|
2015-06-20 03:57:46 +08:00
|
|
|
if (IsUsingLTO(Args)) {
|
2009-09-09 07:36:43 +08:00
|
|
|
types::ID Output =
|
2015-06-26 23:47:46 +08:00
|
|
|
Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC;
|
Reapply "Change -save-temps to emit unoptimized bitcode files."
This reapplies r224503 along with a fix for compiling Fortran by having the
clang driver invoke gcc (see r224546, where it was reverted). I have added
a testcase for that as well.
Original commit message:
It is often convenient to use -save-temps to collect the intermediate
results of a compilation, e.g., when triaging a bug report. Besides the
temporary files for preprocessed source and assembly code, this adds the
unoptimized bitcode files as well.
This adds a new BackendJobAction, which is mostly mechanical, to run after
the CompileJobAction. When not using -save-temps, the BackendJobAction is
combined into one job with the CompileJobAction, similar to the way the
integrated assembler is handled. I've implemented this entirely as a
driver change, so under the hood, it is just using -disable-llvm-optzns
to get the unoptimized bitcode.
Based in part on a patch by Steven Wu.
rdar://problem/18909437
llvm-svn: 224688
2014-12-21 15:00:00 +08:00
|
|
|
return llvm::make_unique<BackendJobAction>(std::move(Input), Output);
|
2014-08-29 15:25:23 +08:00
|
|
|
}
|
|
|
|
if (Args.hasArg(options::OPT_emit_llvm)) {
|
2013-08-24 05:34:57 +08:00
|
|
|
types::ID Output =
|
2015-06-26 23:47:46 +08:00
|
|
|
Args.hasArg(options::OPT_S) ? types::TY_LLVM_IR : types::TY_LLVM_BC;
|
Reapply "Change -save-temps to emit unoptimized bitcode files."
This reapplies r224503 along with a fix for compiling Fortran by having the
clang driver invoke gcc (see r224546, where it was reverted). I have added
a testcase for that as well.
Original commit message:
It is often convenient to use -save-temps to collect the intermediate
results of a compilation, e.g., when triaging a bug report. Besides the
temporary files for preprocessed source and assembly code, this adds the
unoptimized bitcode files as well.
This adds a new BackendJobAction, which is mostly mechanical, to run after
the CompileJobAction. When not using -save-temps, the BackendJobAction is
combined into one job with the CompileJobAction, similar to the way the
integrated assembler is handled. I've implemented this entirely as a
driver change, so under the hood, it is just using -disable-llvm-optzns
to get the unoptimized bitcode.
Based in part on a patch by Steven Wu.
rdar://problem/18909437
llvm-svn: 224688
2014-12-21 15:00:00 +08:00
|
|
|
return llvm::make_unique<BackendJobAction>(std::move(Input), Output);
|
2009-03-13 19:38:42 +08:00
|
|
|
}
|
Reapply "Change -save-temps to emit unoptimized bitcode files."
This reapplies r224503 along with a fix for compiling Fortran by having the
clang driver invoke gcc (see r224546, where it was reverted). I have added
a testcase for that as well.
Original commit message:
It is often convenient to use -save-temps to collect the intermediate
results of a compilation, e.g., when triaging a bug report. Besides the
temporary files for preprocessed source and assembly code, this adds the
unoptimized bitcode files as well.
This adds a new BackendJobAction, which is mostly mechanical, to run after
the CompileJobAction. When not using -save-temps, the BackendJobAction is
combined into one job with the CompileJobAction, similar to the way the
integrated assembler is handled. I've implemented this entirely as a
driver change, so under the hood, it is just using -disable-llvm-optzns
to get the unoptimized bitcode.
Based in part on a patch by Steven Wu.
rdar://problem/18909437
llvm-svn: 224688
2014-12-21 15:00:00 +08:00
|
|
|
return llvm::make_unique<BackendJobAction>(std::move(Input),
|
2014-08-29 15:25:23 +08:00
|
|
|
types::TY_PP_Asm);
|
2009-03-13 19:38:42 +08:00
|
|
|
}
|
|
|
|
case phases::Assemble:
|
2014-08-29 15:25:23 +08:00
|
|
|
return llvm::make_unique<AssembleJobAction>(std::move(Input),
|
|
|
|
types::TY_Object);
|
2009-03-13 19:38:42 +08:00
|
|
|
}
|
|
|
|
|
2011-09-23 13:06:16 +08:00
|
|
|
llvm_unreachable("invalid phase in ConstructPhaseAction");
|
2009-03-12 15:58:46 +08:00
|
|
|
}
|
|
|
|
|
2015-06-20 03:57:46 +08:00
|
|
|
bool Driver::IsUsingLTO(const ArgList &Args) const {
|
|
|
|
return Args.hasFlag(options::OPT_flto, options::OPT_fno_lto, false);
|
2011-06-22 04:55:08 +08:00
|
|
|
}
|
|
|
|
|
2009-03-18 10:55:38 +08:00
|
|
|
void Driver::BuildJobs(Compilation &C) const {
|
2009-03-18 09:38:48 +08:00
|
|
|
llvm::PrettyStackTraceString CrashInfo("Building compilation jobs");
|
2009-03-16 14:56:51 +08:00
|
|
|
|
|
|
|
Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o);
|
|
|
|
|
2009-09-09 07:36:43 +08:00
|
|
|
// It is an error to provide a -o option if we are making multiple output
|
|
|
|
// files.
|
2009-03-16 14:56:51 +08:00
|
|
|
if (FinalOutput) {
|
|
|
|
unsigned NumOutputs = 0;
|
2014-12-30 05:02:47 +08:00
|
|
|
for (const Action *A : C.getActions())
|
|
|
|
if (A->getType() != types::TY_Nothing)
|
2009-03-16 14:56:51 +08:00
|
|
|
++NumOutputs;
|
2009-09-09 07:36:43 +08:00
|
|
|
|
2009-03-16 14:56:51 +08:00
|
|
|
if (NumOutputs > 1) {
|
|
|
|
Diag(clang::diag::err_drv_output_argument_with_multiple_files);
|
2014-05-18 00:56:41 +08:00
|
|
|
FinalOutput = nullptr;
|
2009-03-16 14:56:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-01 06:01:21 +08:00
|
|
|
// Collect the list of architectures.
|
|
|
|
llvm::StringSet<> ArchNames;
|
2014-12-30 03:01:36 +08:00
|
|
|
if (C.getDefaultToolChain().getTriple().isOSBinFormatMachO())
|
|
|
|
for (const Arg *A : C.getArgs())
|
2013-05-01 06:01:21 +08:00
|
|
|
if (A->getOption().matches(options::OPT_arch))
|
|
|
|
ArchNames.insert(A->getValue());
|
2009-03-16 14:56:51 +08:00
|
|
|
|
2014-12-30 03:01:36 +08:00
|
|
|
for (Action *A : C.getActions()) {
|
2009-09-09 07:36:43 +08:00
|
|
|
// If we are linking an image for multiple archs then the linker wants
|
|
|
|
// -arch_multiple and -final_output <final image name>. Unfortunately, this
|
|
|
|
// doesn't fit in cleanly because we have to pass this information down.
|
2009-03-16 14:56:51 +08:00
|
|
|
//
|
2009-09-09 07:36:43 +08:00
|
|
|
// FIXME: This is a hack; find a cleaner way to integrate this into the
|
|
|
|
// process.
|
2014-05-18 00:56:41 +08:00
|
|
|
const char *LinkingOutput = nullptr;
|
2009-03-27 00:12:09 +08:00
|
|
|
if (isa<LipoJobAction>(A)) {
|
2009-03-16 14:56:51 +08:00
|
|
|
if (FinalOutput)
|
2012-11-01 12:30:05 +08:00
|
|
|
LinkingOutput = FinalOutput->getValue();
|
2009-03-16 14:56:51 +08:00
|
|
|
else
|
2015-01-10 01:38:53 +08:00
|
|
|
LinkingOutput = getDefaultImageName();
|
2009-03-16 14:56:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
InputInfo II;
|
2009-09-09 07:36:43 +08:00
|
|
|
BuildJobsForAction(C, A, &C.getDefaultToolChain(),
|
2015-06-26 23:47:46 +08:00
|
|
|
/*BoundArch*/ nullptr,
|
2009-03-16 14:56:51 +08:00
|
|
|
/*AtTopLevel*/ true,
|
2013-05-01 06:01:21 +08:00
|
|
|
/*MultipleArchs*/ ArchNames.size() > 1,
|
2015-06-26 23:47:46 +08:00
|
|
|
/*LinkingOutput*/ LinkingOutput, II);
|
2009-03-16 14:56:51 +08:00
|
|
|
}
|
2009-03-16 14:42:30 +08:00
|
|
|
|
2009-09-09 07:36:43 +08:00
|
|
|
// If the user passed -Qunused-arguments or there were errors, don't warn
|
|
|
|
// about any unused arguments.
|
2010-11-19 05:47:07 +08:00
|
|
|
if (Diags.hasErrorOccurred() ||
|
2009-04-08 03:04:18 +08:00
|
|
|
C.getArgs().hasArg(options::OPT_Qunused_arguments))
|
2009-03-19 02:03:46 +08:00
|
|
|
return;
|
|
|
|
|
2009-03-30 06:24:54 +08:00
|
|
|
// Claim -### here.
|
2015-06-26 23:47:46 +08:00
|
|
|
(void)C.getArgs().hasArg(options::OPT__HASH_HASH_HASH);
|
2009-09-09 07:36:43 +08:00
|
|
|
|
2013-07-19 04:29:38 +08:00
|
|
|
// Claim --driver-mode, it was handled earlier.
|
2015-06-26 23:47:46 +08:00
|
|
|
(void)C.getArgs().hasArg(options::OPT_driver_mode);
|
2013-07-19 04:29:38 +08:00
|
|
|
|
2014-12-30 03:01:36 +08:00
|
|
|
for (Arg *A : C.getArgs()) {
|
2009-03-16 14:42:30 +08:00
|
|
|
// FIXME: It would be nice to be able to send the argument to the
|
2011-09-26 07:23:43 +08:00
|
|
|
// DiagnosticsEngine, so that extra values, position, and so on could be
|
|
|
|
// printed.
|
2009-04-04 08:52:26 +08:00
|
|
|
if (!A->isClaimed()) {
|
2012-10-20 06:37:06 +08:00
|
|
|
if (A->getOption().hasFlag(options::NoArgumentUnused))
|
2009-04-08 03:04:18 +08:00
|
|
|
continue;
|
|
|
|
|
2009-09-09 07:36:43 +08:00
|
|
|
// Suppress the warning automatically if this is just a flag, and it is an
|
|
|
|
// instance of an argument we already claimed.
|
2009-04-04 08:52:26 +08:00
|
|
|
const Option &Opt = A->getOption();
|
2012-08-21 05:41:17 +08:00
|
|
|
if (Opt.getKind() == Option::FlagClass) {
|
2009-04-04 08:52:26 +08:00
|
|
|
bool DuplicateClaimed = false;
|
|
|
|
|
2015-06-09 09:57:17 +08:00
|
|
|
for (const Arg *AA : C.getArgs().filtered(&Opt)) {
|
|
|
|
if (AA->isClaimed()) {
|
2009-04-04 08:52:26 +08:00
|
|
|
DuplicateClaimed = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (DuplicateClaimed)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-09-09 07:36:43 +08:00
|
|
|
Diag(clang::diag::warn_drv_unused_argument)
|
2015-06-26 23:47:46 +08:00
|
|
|
<< A->getAsString(C.getArgs());
|
2009-04-04 08:52:26 +08:00
|
|
|
}
|
2009-03-16 14:42:30 +08:00
|
|
|
}
|
2009-03-14 06:12:33 +08:00
|
|
|
}
|
|
|
|
|
2015-02-03 06:41:48 +08:00
|
|
|
static const Tool *SelectToolForJob(Compilation &C, bool SaveTemps,
|
|
|
|
const ToolChain *TC, const JobAction *JA,
|
2010-02-03 11:07:56 +08:00
|
|
|
const ActionList *&Inputs) {
|
2014-05-18 00:56:41 +08:00
|
|
|
const Tool *ToolForJob = nullptr;
|
2010-02-03 11:07:56 +08:00
|
|
|
|
|
|
|
// See if we should look for a compiler with an integrated assembler. We match
|
|
|
|
// bottom up, so what we are actually looking for is an assembler job with a
|
2014-12-19 07:07:04 +08:00
|
|
|
// compiler input.
|
2010-05-14 10:03:00 +08:00
|
|
|
|
2015-06-26 23:47:46 +08:00
|
|
|
if (TC->useIntegratedAs() && !SaveTemps &&
|
2013-12-07 04:27:33 +08:00
|
|
|
!C.getArgs().hasArg(options::OPT_via_file_asm) &&
|
2013-10-18 00:16:23 +08:00
|
|
|
!C.getArgs().hasArg(options::OPT__SLASH_FA) &&
|
|
|
|
!C.getArgs().hasArg(options::OPT__SLASH_Fa) &&
|
2015-06-26 23:47:46 +08:00
|
|
|
isa<AssembleJobAction>(JA) && Inputs->size() == 1 &&
|
|
|
|
isa<BackendJobAction>(*Inputs->begin())) {
|
Reapply "Change -save-temps to emit unoptimized bitcode files."
This reapplies r224503 along with a fix for compiling Fortran by having the
clang driver invoke gcc (see r224546, where it was reverted). I have added
a testcase for that as well.
Original commit message:
It is often convenient to use -save-temps to collect the intermediate
results of a compilation, e.g., when triaging a bug report. Besides the
temporary files for preprocessed source and assembly code, this adds the
unoptimized bitcode files as well.
This adds a new BackendJobAction, which is mostly mechanical, to run after
the CompileJobAction. When not using -save-temps, the BackendJobAction is
combined into one job with the CompileJobAction, similar to the way the
integrated assembler is handled. I've implemented this entirely as a
driver change, so under the hood, it is just using -disable-llvm-optzns
to get the unoptimized bitcode.
Based in part on a patch by Steven Wu.
rdar://problem/18909437
llvm-svn: 224688
2014-12-21 15:00:00 +08:00
|
|
|
// A BackendJob is always preceded by a CompileJob, and without
|
|
|
|
// -save-temps they will always get combined together, so instead of
|
|
|
|
// checking the backend tool, check if the tool for the CompileJob
|
|
|
|
// has an integrated assembler.
|
|
|
|
const ActionList *BackendInputs = &(*Inputs)[0]->getInputs();
|
|
|
|
JobAction *CompileJA = cast<CompileJobAction>(*BackendInputs->begin());
|
|
|
|
const Tool *Compiler = TC->SelectTool(*CompileJA);
|
2013-03-24 23:06:53 +08:00
|
|
|
if (!Compiler)
|
2014-05-18 00:56:41 +08:00
|
|
|
return nullptr;
|
2013-03-24 23:06:53 +08:00
|
|
|
if (Compiler->hasIntegratedAssembler()) {
|
Reapply "Change -save-temps to emit unoptimized bitcode files."
This reapplies r224503 along with a fix for compiling Fortran by having the
clang driver invoke gcc (see r224546, where it was reverted). I have added
a testcase for that as well.
Original commit message:
It is often convenient to use -save-temps to collect the intermediate
results of a compilation, e.g., when triaging a bug report. Besides the
temporary files for preprocessed source and assembly code, this adds the
unoptimized bitcode files as well.
This adds a new BackendJobAction, which is mostly mechanical, to run after
the CompileJobAction. When not using -save-temps, the BackendJobAction is
combined into one job with the CompileJobAction, similar to the way the
integrated assembler is handled. I've implemented this entirely as a
driver change, so under the hood, it is just using -disable-llvm-optzns
to get the unoptimized bitcode.
Based in part on a patch by Steven Wu.
rdar://problem/18909437
llvm-svn: 224688
2014-12-21 15:00:00 +08:00
|
|
|
Inputs = &(*BackendInputs)[0]->getInputs();
|
|
|
|
ToolForJob = Compiler;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// A backend job should always be combined with the preceding compile job
|
|
|
|
// unless OPT_save_temps is enabled and the compiler is capable of emitting
|
|
|
|
// LLVM IR as an intermediate output.
|
|
|
|
if (isa<BackendJobAction>(JA)) {
|
|
|
|
// Check if the compiler supports emitting LLVM IR.
|
|
|
|
assert(Inputs->size() == 1);
|
2015-07-14 07:27:56 +08:00
|
|
|
JobAction *CompileJA;
|
|
|
|
// Extract real host action, if it's a CudaHostAction.
|
|
|
|
if (CudaHostAction *CudaHA = dyn_cast<CudaHostAction>(*Inputs->begin()))
|
|
|
|
CompileJA = cast<CompileJobAction>(*CudaHA->begin());
|
|
|
|
else
|
|
|
|
CompileJA = cast<CompileJobAction>(*Inputs->begin());
|
|
|
|
|
Reapply "Change -save-temps to emit unoptimized bitcode files."
This reapplies r224503 along with a fix for compiling Fortran by having the
clang driver invoke gcc (see r224546, where it was reverted). I have added
a testcase for that as well.
Original commit message:
It is often convenient to use -save-temps to collect the intermediate
results of a compilation, e.g., when triaging a bug report. Besides the
temporary files for preprocessed source and assembly code, this adds the
unoptimized bitcode files as well.
This adds a new BackendJobAction, which is mostly mechanical, to run after
the CompileJobAction. When not using -save-temps, the BackendJobAction is
combined into one job with the CompileJobAction, similar to the way the
integrated assembler is handled. I've implemented this entirely as a
driver change, so under the hood, it is just using -disable-llvm-optzns
to get the unoptimized bitcode.
Based in part on a patch by Steven Wu.
rdar://problem/18909437
llvm-svn: 224688
2014-12-21 15:00:00 +08:00
|
|
|
const Tool *Compiler = TC->SelectTool(*CompileJA);
|
|
|
|
if (!Compiler)
|
|
|
|
return nullptr;
|
2015-02-03 06:41:48 +08:00
|
|
|
if (!Compiler->canEmitIR() || !SaveTemps) {
|
2010-02-03 11:07:56 +08:00
|
|
|
Inputs = &(*Inputs)[0]->getInputs();
|
2013-03-24 23:06:53 +08:00
|
|
|
ToolForJob = Compiler;
|
2010-02-03 11:07:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise use the tool for the current job.
|
|
|
|
if (!ToolForJob)
|
2013-03-24 23:06:53 +08:00
|
|
|
ToolForJob = TC->SelectTool(*JA);
|
2010-02-03 11:07:56 +08:00
|
|
|
|
|
|
|
// See if we should use an integrated preprocessor. We do so when we have
|
|
|
|
// exactly one input, since this is the only use case we care about
|
|
|
|
// (irrelevant since we don't support combine yet).
|
|
|
|
if (Inputs->size() == 1 && isa<PreprocessJobAction>(*Inputs->begin()) &&
|
|
|
|
!C.getArgs().hasArg(options::OPT_no_integrated_cpp) &&
|
2015-06-26 23:47:46 +08:00
|
|
|
!C.getArgs().hasArg(options::OPT_traditional_cpp) && !SaveTemps &&
|
2012-09-29 03:05:17 +08:00
|
|
|
!C.getArgs().hasArg(options::OPT_rewrite_objc) &&
|
2010-02-03 11:07:56 +08:00
|
|
|
ToolForJob->hasIntegratedCPP())
|
|
|
|
Inputs = &(*Inputs)[0]->getInputs();
|
|
|
|
|
2013-03-24 23:06:53 +08:00
|
|
|
return ToolForJob;
|
2010-02-03 11:07:56 +08:00
|
|
|
}
|
|
|
|
|
2015-06-26 23:47:46 +08:00
|
|
|
void Driver::BuildJobsForAction(Compilation &C, const Action *A,
|
|
|
|
const ToolChain *TC, const char *BoundArch,
|
|
|
|
bool AtTopLevel, bool MultipleArchs,
|
2009-03-16 14:56:51 +08:00
|
|
|
const char *LinkingOutput,
|
|
|
|
InputInfo &Result) const {
|
2009-09-09 07:36:43 +08:00
|
|
|
llvm::PrettyStackTraceString CrashInfo("Building compilation jobs");
|
2009-03-19 07:18:19 +08:00
|
|
|
|
2015-07-14 07:27:56 +08:00
|
|
|
InputInfoList CudaDeviceInputInfos;
|
|
|
|
if (const CudaHostAction *CHA = dyn_cast<CudaHostAction>(A)) {
|
|
|
|
InputInfo II;
|
|
|
|
// Append outputs of device jobs to the input list.
|
|
|
|
for (const Action *DA : CHA->getDeviceActions()) {
|
|
|
|
BuildJobsForAction(C, DA, TC, "", AtTopLevel,
|
|
|
|
/*MultipleArchs*/ false, LinkingOutput, II);
|
|
|
|
CudaDeviceInputInfos.push_back(II);
|
|
|
|
}
|
|
|
|
// Override current action with a real host compile action and continue
|
|
|
|
// processing it.
|
|
|
|
A = *CHA->begin();
|
|
|
|
}
|
|
|
|
|
2009-03-16 14:56:51 +08:00
|
|
|
if (const InputAction *IA = dyn_cast<InputAction>(A)) {
|
2009-09-09 07:36:43 +08:00
|
|
|
// FIXME: It would be nice to not claim this here; maybe the old scheme of
|
|
|
|
// just using Args was better?
|
2009-03-19 15:29:38 +08:00
|
|
|
const Arg &Input = IA->getInputArg();
|
|
|
|
Input.claim();
|
2010-06-10 06:31:08 +08:00
|
|
|
if (Input.getOption().matches(options::OPT_INPUT)) {
|
2012-11-01 12:30:05 +08:00
|
|
|
const char *Name = Input.getValue();
|
2009-03-19 15:29:38 +08:00
|
|
|
Result = InputInfo(Name, A->getType(), Name);
|
2015-06-17 02:01:24 +08:00
|
|
|
} else {
|
2009-03-19 15:29:38 +08:00
|
|
|
Result = InputInfo(&Input, A->getType(), "");
|
2015-06-17 02:01:24 +08:00
|
|
|
}
|
2009-03-16 14:56:51 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (const BindArchAction *BAA = dyn_cast<BindArchAction>(A)) {
|
2012-04-28 00:50:38 +08:00
|
|
|
const ToolChain *TC;
|
2012-04-28 00:48:16 +08:00
|
|
|
const char *ArchName = BAA->getArchName();
|
2009-09-09 07:37:19 +08:00
|
|
|
|
2012-04-28 00:48:16 +08:00
|
|
|
if (ArchName)
|
2015-07-11 03:47:55 +08:00
|
|
|
TC = &getToolChain(
|
|
|
|
C.getArgs(),
|
|
|
|
computeTargetTriple(DefaultTargetTriple, C.getArgs(), ArchName));
|
2012-04-28 00:50:38 +08:00
|
|
|
else
|
|
|
|
TC = &C.getDefaultToolChain();
|
2009-09-09 07:37:19 +08:00
|
|
|
|
2015-07-14 07:27:56 +08:00
|
|
|
BuildJobsForAction(C, *BAA->begin(), TC, ArchName, AtTopLevel,
|
2015-06-26 23:47:46 +08:00
|
|
|
MultipleArchs, LinkingOutput, Result);
|
2009-03-16 14:56:51 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-07-14 07:27:56 +08:00
|
|
|
if (const CudaDeviceAction *CDA = dyn_cast<CudaDeviceAction>(A)) {
|
2015-07-21 05:59:31 +08:00
|
|
|
BuildJobsForAction(
|
|
|
|
C, *CDA->begin(),
|
|
|
|
&getToolChain(C.getArgs(), llvm::Triple(CDA->getDeviceTriple())),
|
|
|
|
CDA->getGpuArchName(), CDA->isAtTopLevel(),
|
|
|
|
/*MultipleArchs*/ true, LinkingOutput, Result);
|
2015-07-14 07:27:56 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-03-16 14:56:51 +08:00
|
|
|
const ActionList *Inputs = &A->getInputs();
|
2010-02-03 11:07:56 +08:00
|
|
|
|
|
|
|
const JobAction *JA = cast<JobAction>(A);
|
2015-02-03 06:41:48 +08:00
|
|
|
const Tool *T = SelectToolForJob(C, isSaveTempsEnabled(), TC, JA, Inputs);
|
2013-03-24 23:06:53 +08:00
|
|
|
if (!T)
|
|
|
|
return;
|
2009-03-16 14:56:51 +08:00
|
|
|
|
|
|
|
// Only use pipes when there is exactly one input.
|
2009-03-18 14:00:36 +08:00
|
|
|
InputInfoList InputInfos;
|
2014-12-30 05:02:47 +08:00
|
|
|
for (const Action *Input : *Inputs) {
|
2013-02-18 08:38:25 +08:00
|
|
|
// Treat dsymutil and verify sub-jobs as being at the top-level too, they
|
|
|
|
// shouldn't get temporary output names.
|
2010-06-05 02:28:41 +08:00
|
|
|
// FIXME: Clean this up.
|
|
|
|
bool SubJobAtTopLevel = false;
|
2013-02-18 08:38:25 +08:00
|
|
|
if (AtTopLevel && (isa<DsymutilJobAction>(A) || isa<VerifyJobAction>(A)))
|
2011-08-24 01:56:55 +08:00
|
|
|
SubJobAtTopLevel = true;
|
|
|
|
|
2009-03-16 14:56:51 +08:00
|
|
|
InputInfo II;
|
2014-12-30 05:02:47 +08:00
|
|
|
BuildJobsForAction(C, Input, TC, BoundArch, SubJobAtTopLevel, MultipleArchs,
|
2013-05-01 06:01:21 +08:00
|
|
|
LinkingOutput, II);
|
2009-03-16 14:56:51 +08:00
|
|
|
InputInfos.push_back(II);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Always use the first input as the base input.
|
|
|
|
const char *BaseInput = InputInfos[0].getBaseInput();
|
2009-03-18 01:53:55 +08:00
|
|
|
|
2010-06-05 02:28:41 +08:00
|
|
|
// ... except dsymutil actions, which use their actual input as the base
|
|
|
|
// input.
|
|
|
|
if (JA->getType() == types::TY_dSYM)
|
|
|
|
BaseInput = InputInfos[0].getFilename();
|
|
|
|
|
2015-07-14 07:27:56 +08:00
|
|
|
// Append outputs of cuda device jobs to the input list
|
|
|
|
if (CudaDeviceInputInfos.size())
|
|
|
|
InputInfos.append(CudaDeviceInputInfos.begin(), CudaDeviceInputInfos.end());
|
|
|
|
|
2010-08-02 10:38:15 +08:00
|
|
|
// Determine the place to write output to, if any.
|
2013-02-05 15:29:57 +08:00
|
|
|
if (JA->getType() == types::TY_Nothing)
|
2009-03-18 06:47:06 +08:00
|
|
|
Result = InputInfo(A->getType(), BaseInput);
|
2013-02-05 15:29:57 +08:00
|
|
|
else
|
2013-05-01 06:01:21 +08:00
|
|
|
Result = InputInfo(GetNamedOutputPath(C, *JA, BaseInput, BoundArch,
|
|
|
|
AtTopLevel, MultipleArchs),
|
2009-03-18 06:47:06 +08:00
|
|
|
A->getType(), BaseInput);
|
2009-03-18 01:53:55 +08:00
|
|
|
|
2011-08-03 01:58:04 +08:00
|
|
|
if (CCCPrintBindings && !CCGenDiagnostics) {
|
2013-03-24 23:06:53 +08:00
|
|
|
llvm::errs() << "# \"" << T->getToolChain().getTripleString() << '"'
|
|
|
|
<< " - \"" << T->getName() << "\", inputs: [";
|
2009-03-18 06:47:06 +08:00
|
|
|
for (unsigned i = 0, e = InputInfos.size(); i != e; ++i) {
|
|
|
|
llvm::errs() << InputInfos[i].getAsString();
|
|
|
|
if (i + 1 != e)
|
|
|
|
llvm::errs() << ", ";
|
|
|
|
}
|
|
|
|
llvm::errs() << "], output: " << Result.getAsString() << "\n";
|
|
|
|
} else {
|
2013-03-24 23:06:53 +08:00
|
|
|
T->ConstructJob(C, *JA, Result, InputInfos,
|
|
|
|
C.getArgsForToolChain(TC, BoundArch), LinkingOutput);
|
2009-03-18 06:47:06 +08:00
|
|
|
}
|
2009-03-16 14:56:51 +08:00
|
|
|
}
|
|
|
|
|
2015-01-10 01:38:53 +08:00
|
|
|
const char *Driver::getDefaultImageName() const {
|
|
|
|
llvm::Triple Target(llvm::Triple::normalize(DefaultTargetTriple));
|
|
|
|
return Target.isOSWindows() ? "a.exe" : "a.out";
|
|
|
|
}
|
|
|
|
|
2013-10-18 00:16:23 +08:00
|
|
|
/// \brief Create output filename based on ArgValue, which could either be a
|
|
|
|
/// full filename, filename without extension, or a directory. If ArgValue
|
|
|
|
/// does not provide a filename, then use BaseName, and use the extension
|
|
|
|
/// suitable for FileType.
|
2013-08-13 05:56:42 +08:00
|
|
|
static const char *MakeCLOutputFilename(const ArgList &Args, StringRef ArgValue,
|
2015-06-26 23:47:46 +08:00
|
|
|
StringRef BaseName,
|
|
|
|
types::ID FileType) {
|
2013-08-13 05:56:42 +08:00
|
|
|
SmallString<128> Filename = ArgValue;
|
2014-06-27 04:59:36 +08:00
|
|
|
|
2013-09-11 04:18:04 +08:00
|
|
|
if (ArgValue.empty()) {
|
|
|
|
// If the argument is empty, output to BaseName in the current dir.
|
|
|
|
Filename = BaseName;
|
|
|
|
} else if (llvm::sys::path::is_separator(Filename.back())) {
|
2013-08-13 05:56:42 +08:00
|
|
|
// If the argument is a directory, output to BaseName in that dir.
|
|
|
|
llvm::sys::path::append(Filename, BaseName);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!llvm::sys::path::has_extension(ArgValue)) {
|
|
|
|
// If the argument didn't provide an extension, then set it.
|
|
|
|
const char *Extension = types::getTypeTempSuffix(FileType, true);
|
2013-09-11 04:18:04 +08:00
|
|
|
|
|
|
|
if (FileType == types::TY_Image &&
|
|
|
|
Args.hasArg(options::OPT__SLASH_LD, options::OPT__SLASH_LDd)) {
|
|
|
|
// The output file is a dll.
|
|
|
|
Extension = "dll";
|
|
|
|
}
|
|
|
|
|
2013-08-13 05:56:42 +08:00
|
|
|
llvm::sys::path::replace_extension(Filename, Extension);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Args.MakeArgString(Filename.c_str());
|
|
|
|
}
|
|
|
|
|
2015-06-26 23:47:46 +08:00
|
|
|
const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
|
2009-03-18 01:53:55 +08:00
|
|
|
const char *BaseInput,
|
2015-06-26 23:47:46 +08:00
|
|
|
const char *BoundArch, bool AtTopLevel,
|
2013-05-01 06:01:21 +08:00
|
|
|
bool MultipleArchs) const {
|
2009-03-18 09:38:48 +08:00
|
|
|
llvm::PrettyStackTraceString CrashInfo("Computing output path");
|
2009-03-18 01:53:55 +08:00
|
|
|
// Output to a user requested destination?
|
2015-06-26 23:47:46 +08:00
|
|
|
if (AtTopLevel && !isa<DsymutilJobAction>(JA) && !isa<VerifyJobAction>(JA)) {
|
2009-03-18 01:53:55 +08:00
|
|
|
if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o))
|
2013-01-25 03:14:47 +08:00
|
|
|
return C.addResultFile(FinalOutput->getValue(), &JA);
|
2009-03-18 01:53:55 +08:00
|
|
|
}
|
|
|
|
|
2013-12-21 02:40:46 +08:00
|
|
|
// For /P, preprocess to file named after BaseInput.
|
|
|
|
if (C.getArgs().hasArg(options::OPT__SLASH_P)) {
|
|
|
|
assert(AtTopLevel && isa<PreprocessJobAction>(JA));
|
|
|
|
StringRef BaseName = llvm::sys::path::filename(BaseInput);
|
2014-06-17 08:19:12 +08:00
|
|
|
StringRef NameArg;
|
2015-06-09 18:24:06 +08:00
|
|
|
if (Arg *A = C.getArgs().getLastArg(options::OPT__SLASH_Fi))
|
2014-06-17 08:19:12 +08:00
|
|
|
NameArg = A->getValue();
|
2015-06-26 23:47:46 +08:00
|
|
|
return C.addResultFile(
|
|
|
|
MakeCLOutputFilename(C.getArgs(), NameArg, BaseName, types::TY_PP_C),
|
|
|
|
&JA);
|
2013-12-21 02:40:46 +08:00
|
|
|
}
|
|
|
|
|
2010-09-24 08:46:53 +08:00
|
|
|
// Default to writing to stdout?
|
2013-03-28 00:47:18 +08:00
|
|
|
if (AtTopLevel && !CCGenDiagnostics &&
|
|
|
|
(isa<PreprocessJobAction>(JA) || JA.getType() == types::TY_ModuleFile))
|
2010-09-24 08:46:53 +08:00
|
|
|
return "-";
|
|
|
|
|
2013-10-18 00:16:23 +08:00
|
|
|
// Is this the assembly listing for /FA?
|
|
|
|
if (JA.getType() == types::TY_PP_Asm &&
|
|
|
|
(C.getArgs().hasArg(options::OPT__SLASH_FA) ||
|
|
|
|
C.getArgs().hasArg(options::OPT__SLASH_Fa))) {
|
|
|
|
// Use /Fa and the input filename to determine the asm file name.
|
|
|
|
StringRef BaseName = llvm::sys::path::filename(BaseInput);
|
|
|
|
StringRef FaValue = C.getArgs().getLastArgValue(options::OPT__SLASH_Fa);
|
2015-06-26 23:47:46 +08:00
|
|
|
return C.addResultFile(
|
|
|
|
MakeCLOutputFilename(C.getArgs(), FaValue, BaseName, JA.getType()),
|
|
|
|
&JA);
|
2013-10-18 00:16:23 +08:00
|
|
|
}
|
|
|
|
|
2009-03-18 01:53:55 +08:00
|
|
|
// Output to a temporary file?
|
2015-02-03 06:41:48 +08:00
|
|
|
if ((!AtTopLevel && !isSaveTempsEnabled() &&
|
2015-06-26 23:47:46 +08:00
|
|
|
!C.getArgs().hasArg(options::OPT__SLASH_Fo)) ||
|
2011-08-03 01:58:04 +08:00
|
|
|
CCGenDiagnostics) {
|
2011-08-27 06:27:02 +08:00
|
|
|
StringRef Name = llvm::sys::path::filename(BaseInput);
|
|
|
|
std::pair<StringRef, StringRef> Split = Name.split('.');
|
2015-06-26 23:47:46 +08:00
|
|
|
std::string TmpName = GetTemporaryPath(
|
|
|
|
Split.first, types::getTypeTempSuffix(JA.getType(), IsCLMode()));
|
2009-03-19 03:34:39 +08:00
|
|
|
return C.addTempFile(C.getArgs().MakeArgString(TmpName.c_str()));
|
2009-03-18 01:53:55 +08:00
|
|
|
}
|
|
|
|
|
2012-02-05 10:13:05 +08:00
|
|
|
SmallString<128> BasePath(BaseInput);
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef BaseName;
|
2011-03-26 02:16:51 +08:00
|
|
|
|
|
|
|
// Dsymutil actions should use the full path.
|
2011-08-24 01:56:55 +08:00
|
|
|
if (isa<DsymutilJobAction>(JA) || isa<VerifyJobAction>(JA))
|
2011-03-26 02:16:51 +08:00
|
|
|
BaseName = BasePath;
|
|
|
|
else
|
|
|
|
BaseName = llvm::sys::path::filename(BasePath);
|
2009-03-18 01:53:55 +08:00
|
|
|
|
|
|
|
// Determine what the derived output name should be.
|
|
|
|
const char *NamedOutput;
|
2013-08-07 06:11:28 +08:00
|
|
|
|
2015-06-13 10:53:50 +08:00
|
|
|
if (JA.getType() == types::TY_Object &&
|
2014-09-12 02:16:21 +08:00
|
|
|
C.getArgs().hasArg(options::OPT__SLASH_Fo, options::OPT__SLASH_o)) {
|
|
|
|
// The /Fo or /o flag decides the object filename.
|
2015-06-26 23:47:46 +08:00
|
|
|
StringRef Val =
|
|
|
|
C.getArgs()
|
|
|
|
.getLastArg(options::OPT__SLASH_Fo, options::OPT__SLASH_o)
|
|
|
|
->getValue();
|
|
|
|
NamedOutput =
|
|
|
|
MakeCLOutputFilename(C.getArgs(), Val, BaseName, types::TY_Object);
|
2013-08-13 05:56:42 +08:00
|
|
|
} else if (JA.getType() == types::TY_Image &&
|
2015-06-26 23:47:46 +08:00
|
|
|
C.getArgs().hasArg(options::OPT__SLASH_Fe,
|
|
|
|
options::OPT__SLASH_o)) {
|
2014-09-12 02:16:21 +08:00
|
|
|
// The /Fe or /o flag names the linked file.
|
2015-06-26 23:47:46 +08:00
|
|
|
StringRef Val =
|
|
|
|
C.getArgs()
|
|
|
|
.getLastArg(options::OPT__SLASH_Fe, options::OPT__SLASH_o)
|
|
|
|
->getValue();
|
|
|
|
NamedOutput =
|
|
|
|
MakeCLOutputFilename(C.getArgs(), Val, BaseName, types::TY_Image);
|
2013-09-11 04:18:04 +08:00
|
|
|
} else if (JA.getType() == types::TY_Image) {
|
2013-08-13 05:56:42 +08:00
|
|
|
if (IsCLMode()) {
|
|
|
|
// clang-cl uses BaseName for the executable name.
|
2015-06-26 23:47:46 +08:00
|
|
|
NamedOutput =
|
|
|
|
MakeCLOutputFilename(C.getArgs(), "", BaseName, types::TY_Image);
|
2013-08-13 05:56:42 +08:00
|
|
|
} else if (MultipleArchs && BoundArch) {
|
2015-01-10 01:38:53 +08:00
|
|
|
SmallString<128> Output(getDefaultImageName());
|
2013-05-01 06:01:21 +08:00
|
|
|
Output += "-";
|
|
|
|
Output.append(BoundArch);
|
|
|
|
NamedOutput = C.getArgs().MakeArgString(Output.c_str());
|
|
|
|
} else
|
2015-01-10 01:38:53 +08:00
|
|
|
NamedOutput = getDefaultImageName();
|
2009-03-18 01:53:55 +08:00
|
|
|
} else {
|
2013-09-06 01:05:56 +08:00
|
|
|
const char *Suffix = types::getTypeTempSuffix(JA.getType(), IsCLMode());
|
2009-03-18 01:53:55 +08:00
|
|
|
assert(Suffix && "All types used for output should have a suffix.");
|
|
|
|
|
|
|
|
std::string::size_type End = std::string::npos;
|
|
|
|
if (!types::appendSuffixForType(JA.getType()))
|
|
|
|
End = BaseName.rfind('.');
|
2013-05-01 06:01:21 +08:00
|
|
|
SmallString<128> Suffixed(BaseName.substr(0, End));
|
|
|
|
if (MultipleArchs && BoundArch) {
|
|
|
|
Suffixed += "-";
|
|
|
|
Suffixed.append(BoundArch);
|
|
|
|
}
|
Reapply "Change -save-temps to emit unoptimized bitcode files."
This reapplies r224503 along with a fix for compiling Fortran by having the
clang driver invoke gcc (see r224546, where it was reverted). I have added
a testcase for that as well.
Original commit message:
It is often convenient to use -save-temps to collect the intermediate
results of a compilation, e.g., when triaging a bug report. Besides the
temporary files for preprocessed source and assembly code, this adds the
unoptimized bitcode files as well.
This adds a new BackendJobAction, which is mostly mechanical, to run after
the CompileJobAction. When not using -save-temps, the BackendJobAction is
combined into one job with the CompileJobAction, similar to the way the
integrated assembler is handled. I've implemented this entirely as a
driver change, so under the hood, it is just using -disable-llvm-optzns
to get the unoptimized bitcode.
Based in part on a patch by Steven Wu.
rdar://problem/18909437
llvm-svn: 224688
2014-12-21 15:00:00 +08:00
|
|
|
// When using both -save-temps and -emit-llvm, use a ".tmp.bc" suffix for
|
|
|
|
// the unoptimized bitcode so that it does not get overwritten by the ".bc"
|
|
|
|
// optimized bitcode output.
|
|
|
|
if (!AtTopLevel && C.getArgs().hasArg(options::OPT_emit_llvm) &&
|
|
|
|
JA.getType() == types::TY_LLVM_BC)
|
|
|
|
Suffixed += ".tmp";
|
2009-03-18 01:53:55 +08:00
|
|
|
Suffixed += '.';
|
|
|
|
Suffixed += Suffix;
|
|
|
|
NamedOutput = C.getArgs().MakeArgString(Suffixed.c_str());
|
|
|
|
}
|
|
|
|
|
2015-02-03 06:41:48 +08:00
|
|
|
// Prepend object file path if -save-temps=obj
|
|
|
|
if (!AtTopLevel && isSaveTempsObj() && C.getArgs().hasArg(options::OPT_o) &&
|
|
|
|
JA.getType() != types::TY_PCH) {
|
|
|
|
Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o);
|
|
|
|
SmallString<128> TempPath(FinalOutput->getValue());
|
|
|
|
llvm::sys::path::remove_filename(TempPath);
|
|
|
|
StringRef OutputFileName = llvm::sys::path::filename(NamedOutput);
|
|
|
|
llvm::sys::path::append(TempPath, OutputFileName);
|
|
|
|
NamedOutput = C.getArgs().MakeArgString(TempPath.c_str());
|
|
|
|
}
|
|
|
|
|
2012-07-10 01:31:28 +08:00
|
|
|
// If we're saving temps and the temp file conflicts with the input file,
|
2012-04-21 04:05:08 +08:00
|
|
|
// then avoid overwriting input file.
|
2015-02-03 06:41:48 +08:00
|
|
|
if (!AtTopLevel && isSaveTempsEnabled() && NamedOutput == BaseName) {
|
2012-04-21 04:05:08 +08:00
|
|
|
bool SameFile = false;
|
|
|
|
SmallString<256> Result;
|
|
|
|
llvm::sys::fs::current_path(Result);
|
|
|
|
llvm::sys::path::append(Result, BaseName);
|
|
|
|
llvm::sys::fs::equivalent(BaseInput, Result.c_str(), SameFile);
|
|
|
|
// Must share the same path to conflict.
|
|
|
|
if (SameFile) {
|
|
|
|
StringRef Name = llvm::sys::path::filename(BaseInput);
|
|
|
|
std::pair<StringRef, StringRef> Split = Name.split('.');
|
2015-06-26 23:47:46 +08:00
|
|
|
std::string TmpName = GetTemporaryPath(
|
|
|
|
Split.first, types::getTypeTempSuffix(JA.getType(), IsCLMode()));
|
2012-04-21 04:05:08 +08:00
|
|
|
return C.addTempFile(C.getArgs().MakeArgString(TmpName.c_str()));
|
|
|
|
}
|
2011-07-16 05:54:29 +08:00
|
|
|
}
|
|
|
|
|
2009-09-09 07:36:43 +08:00
|
|
|
// As an annoying special case, PCH generation doesn't strip the pathname.
|
2009-03-18 01:53:55 +08:00
|
|
|
if (JA.getType() == types::TY_PCH) {
|
2010-12-18 08:19:12 +08:00
|
|
|
llvm::sys::path::remove_filename(BasePath);
|
|
|
|
if (BasePath.empty())
|
2009-03-18 17:58:30 +08:00
|
|
|
BasePath = NamedOutput;
|
|
|
|
else
|
2010-12-18 08:19:12 +08:00
|
|
|
llvm::sys::path::append(BasePath, NamedOutput);
|
2013-01-25 03:14:47 +08:00
|
|
|
return C.addResultFile(C.getArgs().MakeArgString(BasePath.c_str()), &JA);
|
2009-03-18 01:53:55 +08:00
|
|
|
} else {
|
2013-01-25 03:14:47 +08:00
|
|
|
return C.addResultFile(NamedOutput, &JA);
|
2009-03-18 01:53:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-09-10 06:33:00 +08:00
|
|
|
std::string Driver::GetFilePath(const char *Name, const ToolChain &TC) const {
|
2010-03-22 09:52:07 +08:00
|
|
|
// Respect a limited subset of the '-Bprefix' functionality in GCC by
|
2012-10-04 16:08:56 +08:00
|
|
|
// attempting to use this prefix when looking for file paths.
|
2015-06-24 23:10:30 +08:00
|
|
|
for (const std::string &Dir : PrefixDirs) {
|
2011-03-21 21:51:29 +08:00
|
|
|
if (Dir.empty())
|
|
|
|
continue;
|
2015-06-24 23:10:30 +08:00
|
|
|
SmallString<128> P(Dir[0] == '=' ? SysRoot + Dir.substr(1) : Dir);
|
2013-06-25 02:33:43 +08:00
|
|
|
llvm::sys::path::append(P, Name);
|
|
|
|
if (llvm::sys::fs::exists(Twine(P)))
|
2010-03-22 09:52:07 +08:00
|
|
|
return P.str();
|
|
|
|
}
|
|
|
|
|
2013-06-25 02:33:43 +08:00
|
|
|
SmallString<128> P(ResourceDir);
|
|
|
|
llvm::sys::path::append(P, Name);
|
|
|
|
if (llvm::sys::fs::exists(Twine(P)))
|
2011-09-06 10:08:31 +08:00
|
|
|
return P.str();
|
|
|
|
|
2015-06-24 23:10:30 +08:00
|
|
|
for (const std::string &Dir : TC.getFilePaths()) {
|
2011-03-21 21:51:29 +08:00
|
|
|
if (Dir.empty())
|
|
|
|
continue;
|
2015-06-24 23:10:30 +08:00
|
|
|
SmallString<128> P(Dir[0] == '=' ? SysRoot + Dir.substr(1) : Dir);
|
2013-06-25 02:33:43 +08:00
|
|
|
llvm::sys::path::append(P, Name);
|
|
|
|
if (llvm::sys::fs::exists(Twine(P)))
|
2009-09-10 06:33:00 +08:00
|
|
|
return P.str();
|
2009-03-19 04:26:19 +08:00
|
|
|
}
|
|
|
|
|
2009-09-10 06:33:00 +08:00
|
|
|
return Name;
|
2009-03-13 08:51:18 +08:00
|
|
|
}
|
|
|
|
|
2015-06-26 23:47:46 +08:00
|
|
|
void Driver::generatePrefixedToolNames(
|
|
|
|
const char *Tool, const ToolChain &TC,
|
|
|
|
SmallVectorImpl<std::string> &Names) const {
|
2014-10-26 07:33:21 +08:00
|
|
|
// FIXME: Needs a better variable than DefaultTargetTriple
|
2015-05-30 03:42:19 +08:00
|
|
|
Names.emplace_back(DefaultTargetTriple + "-" + Tool);
|
|
|
|
Names.emplace_back(Tool);
|
2014-10-26 07:33:21 +08:00
|
|
|
}
|
|
|
|
|
2014-11-05 04:26:01 +08:00
|
|
|
static bool ScanDirForExecutable(SmallString<128> &Dir,
|
|
|
|
ArrayRef<std::string> Names) {
|
2014-10-26 07:33:21 +08:00
|
|
|
for (const auto &Name : Names) {
|
|
|
|
llvm::sys::path::append(Dir, Name);
|
|
|
|
if (llvm::sys::fs::can_execute(Twine(Dir)))
|
|
|
|
return true;
|
|
|
|
llvm::sys::path::remove_filename(Dir);
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-10-04 03:52:37 +08:00
|
|
|
std::string Driver::GetProgramPath(const char *Name,
|
|
|
|
const ToolChain &TC) const {
|
2014-10-26 07:33:21 +08:00
|
|
|
SmallVector<std::string, 2> TargetSpecificExecutables;
|
|
|
|
generatePrefixedToolNames(Name, TC, TargetSpecificExecutables);
|
|
|
|
|
2010-03-22 09:52:07 +08:00
|
|
|
// Respect a limited subset of the '-Bprefix' functionality in GCC by
|
2012-10-04 16:08:56 +08:00
|
|
|
// attempting to use this prefix when looking for program paths.
|
2014-09-16 11:48:32 +08:00
|
|
|
for (const auto &PrefixDir : PrefixDirs) {
|
|
|
|
if (llvm::sys::fs::is_directory(PrefixDir)) {
|
|
|
|
SmallString<128> P(PrefixDir);
|
2014-10-26 07:33:21 +08:00
|
|
|
if (ScanDirForExecutable(P, TargetSpecificExecutables))
|
2013-06-19 21:24:29 +08:00
|
|
|
return P.str();
|
2012-10-31 22:39:28 +08:00
|
|
|
} else {
|
2014-09-16 11:48:32 +08:00
|
|
|
SmallString<128> P(PrefixDir + Name);
|
2013-06-25 02:33:43 +08:00
|
|
|
if (llvm::sys::fs::can_execute(Twine(P)))
|
2013-06-19 21:24:29 +08:00
|
|
|
return P.str();
|
2012-10-31 20:01:53 +08:00
|
|
|
}
|
2010-03-22 09:52:07 +08:00
|
|
|
}
|
|
|
|
|
2009-03-19 04:26:19 +08:00
|
|
|
const ToolChain::path_list &List = TC.getProgramPaths();
|
2014-09-16 11:48:32 +08:00
|
|
|
for (const auto &Path : List) {
|
|
|
|
SmallString<128> P(Path);
|
2014-10-26 07:33:21 +08:00
|
|
|
if (ScanDirForExecutable(P, TargetSpecificExecutables))
|
2013-06-19 21:24:29 +08:00
|
|
|
return P.str();
|
2009-03-19 04:26:19 +08:00
|
|
|
}
|
|
|
|
|
2009-03-24 00:15:50 +08:00
|
|
|
// If all else failed, search the path.
|
2014-11-08 05:30:32 +08:00
|
|
|
for (const auto &TargetSpecificExecutable : TargetSpecificExecutables)
|
|
|
|
if (llvm::ErrorOr<std::string> P =
|
|
|
|
llvm::sys::findProgramByName(TargetSpecificExecutable))
|
2014-11-04 09:30:55 +08:00
|
|
|
return *P;
|
2009-03-19 05:34:08 +08:00
|
|
|
|
2009-09-10 06:33:00 +08:00
|
|
|
return Name;
|
2009-03-13 08:51:18 +08:00
|
|
|
}
|
|
|
|
|
2015-06-26 23:47:46 +08:00
|
|
|
std::string Driver::GetTemporaryPath(StringRef Prefix,
|
|
|
|
const char *Suffix) const {
|
2013-06-25 12:26:55 +08:00
|
|
|
SmallString<128> Path;
|
2014-06-12 22:02:15 +08:00
|
|
|
std::error_code EC = llvm::sys::fs::createTemporaryFile(Prefix, Suffix, Path);
|
2013-06-25 12:26:55 +08:00
|
|
|
if (EC) {
|
|
|
|
Diag(clang::diag::err_unable_to_make_temp) << EC.message();
|
2009-03-19 03:34:39 +08:00
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
2013-06-25 12:26:55 +08:00
|
|
|
return Path.str();
|
2009-03-19 03:34:39 +08:00
|
|
|
}
|
|
|
|
|
Delete the driver's HostInfo class. This abstraction just never really
did anything. The two big pieces of functionality it tried to provide
was to cache the ToolChain objects for each target, and to figure out
the exact target based on the flag set coming in to an invocation.
However, it had a lot of flaws even with those goals:
- Neither of these have anything to do with the host, or its info.
- The HostInfo class was setup as a full blown class *hierarchy* with
a separate implementation for each "host" OS. This required
dispatching just to create the objects in the first place.
- The hierarchy claimed to represent the host, when in fact it was
based on the target OS.
- Each leaf in the hierarchy was responsible for implementing the flag
processing and caching, resulting in a *lot* of copy-paste code and
quite a few bugs.
- The caching was consistently done based on architecture alone, even
though *any* aspect of the targeted triple might change the behavior
of the configured toolchain.
- Flag processing was already being done in the Driver proper,
separating the flag handling even more than it already is.
Instead of this, we can simply have the dispatch logic in the Driver
which previously created a HostInfo object create the ToolChain objects.
Adding caching in the Driver layer is a tiny amount of code. Finally,
pulling the flag processing into the Driver puts it where it belongs and
consolidates it in one location.
The result is that two functions, and maybe 100 lines of new code
replace over 10 classes and 800 lines of code. Woot.
This also paves the way to introduce more detailed ToolChain objects for
various OSes without threading through a new HostInfo type as well, and
the accompanying boiler plate. That, of course, was the yak I started to
shave that began this entire refactoring escapade. Wheee!
llvm-svn: 148950
2012-01-25 19:01:57 +08:00
|
|
|
const ToolChain &Driver::getToolChain(const ArgList &Args,
|
2015-07-11 03:47:55 +08:00
|
|
|
const llvm::Triple &Target) const {
|
Delete the driver's HostInfo class. This abstraction just never really
did anything. The two big pieces of functionality it tried to provide
was to cache the ToolChain objects for each target, and to figure out
the exact target based on the flag set coming in to an invocation.
However, it had a lot of flaws even with those goals:
- Neither of these have anything to do with the host, or its info.
- The HostInfo class was setup as a full blown class *hierarchy* with
a separate implementation for each "host" OS. This required
dispatching just to create the objects in the first place.
- The hierarchy claimed to represent the host, when in fact it was
based on the target OS.
- Each leaf in the hierarchy was responsible for implementing the flag
processing and caching, resulting in a *lot* of copy-paste code and
quite a few bugs.
- The caching was consistently done based on architecture alone, even
though *any* aspect of the targeted triple might change the behavior
of the configured toolchain.
- Flag processing was already being done in the Driver proper,
separating the flag handling even more than it already is.
Instead of this, we can simply have the dispatch logic in the Driver
which previously created a HostInfo object create the ToolChain objects.
Adding caching in the Driver layer is a tiny amount of code. Finally,
pulling the flag processing into the Driver puts it where it belongs and
consolidates it in one location.
The result is that two functions, and maybe 100 lines of new code
replace over 10 classes and 800 lines of code. Woot.
This also paves the way to introduce more detailed ToolChain objects for
various OSes without threading through a new HostInfo type as well, and
the accompanying boiler plate. That, of course, was the yak I started to
shave that began this entire refactoring escapade. Wheee!
llvm-svn: 148950
2012-01-25 19:01:57 +08:00
|
|
|
|
2012-01-31 10:21:20 +08:00
|
|
|
ToolChain *&TC = ToolChains[Target.str()];
|
Delete the driver's HostInfo class. This abstraction just never really
did anything. The two big pieces of functionality it tried to provide
was to cache the ToolChain objects for each target, and to figure out
the exact target based on the flag set coming in to an invocation.
However, it had a lot of flaws even with those goals:
- Neither of these have anything to do with the host, or its info.
- The HostInfo class was setup as a full blown class *hierarchy* with
a separate implementation for each "host" OS. This required
dispatching just to create the objects in the first place.
- The hierarchy claimed to represent the host, when in fact it was
based on the target OS.
- Each leaf in the hierarchy was responsible for implementing the flag
processing and caching, resulting in a *lot* of copy-paste code and
quite a few bugs.
- The caching was consistently done based on architecture alone, even
though *any* aspect of the targeted triple might change the behavior
of the configured toolchain.
- Flag processing was already being done in the Driver proper,
separating the flag handling even more than it already is.
Instead of this, we can simply have the dispatch logic in the Driver
which previously created a HostInfo object create the ToolChain objects.
Adding caching in the Driver layer is a tiny amount of code. Finally,
pulling the flag processing into the Driver puts it where it belongs and
consolidates it in one location.
The result is that two functions, and maybe 100 lines of new code
replace over 10 classes and 800 lines of code. Woot.
This also paves the way to introduce more detailed ToolChain objects for
various OSes without threading through a new HostInfo type as well, and
the accompanying boiler plate. That, of course, was the yak I started to
shave that began this entire refactoring escapade. Wheee!
llvm-svn: 148950
2012-01-25 19:01:57 +08:00
|
|
|
if (!TC) {
|
|
|
|
switch (Target.getOS()) {
|
2015-03-26 19:13:44 +08:00
|
|
|
case llvm::Triple::CloudABI:
|
|
|
|
TC = new toolchains::CloudABI(*this, Target, Args);
|
|
|
|
break;
|
Delete the driver's HostInfo class. This abstraction just never really
did anything. The two big pieces of functionality it tried to provide
was to cache the ToolChain objects for each target, and to figure out
the exact target based on the flag set coming in to an invocation.
However, it had a lot of flaws even with those goals:
- Neither of these have anything to do with the host, or its info.
- The HostInfo class was setup as a full blown class *hierarchy* with
a separate implementation for each "host" OS. This required
dispatching just to create the objects in the first place.
- The hierarchy claimed to represent the host, when in fact it was
based on the target OS.
- Each leaf in the hierarchy was responsible for implementing the flag
processing and caching, resulting in a *lot* of copy-paste code and
quite a few bugs.
- The caching was consistently done based on architecture alone, even
though *any* aspect of the targeted triple might change the behavior
of the configured toolchain.
- Flag processing was already being done in the Driver proper,
separating the flag handling even more than it already is.
Instead of this, we can simply have the dispatch logic in the Driver
which previously created a HostInfo object create the ToolChain objects.
Adding caching in the Driver layer is a tiny amount of code. Finally,
pulling the flag processing into the Driver puts it where it belongs and
consolidates it in one location.
The result is that two functions, and maybe 100 lines of new code
replace over 10 classes and 800 lines of code. Woot.
This also paves the way to introduce more detailed ToolChain objects for
various OSes without threading through a new HostInfo type as well, and
the accompanying boiler plate. That, of course, was the yak I started to
shave that began this entire refactoring escapade. Wheee!
llvm-svn: 148950
2012-01-25 19:01:57 +08:00
|
|
|
case llvm::Triple::Darwin:
|
|
|
|
case llvm::Triple::MacOSX:
|
|
|
|
case llvm::Triple::IOS:
|
2013-11-25 07:28:23 +08:00
|
|
|
TC = new toolchains::DarwinClang(*this, Target, Args);
|
Delete the driver's HostInfo class. This abstraction just never really
did anything. The two big pieces of functionality it tried to provide
was to cache the ToolChain objects for each target, and to figure out
the exact target based on the flag set coming in to an invocation.
However, it had a lot of flaws even with those goals:
- Neither of these have anything to do with the host, or its info.
- The HostInfo class was setup as a full blown class *hierarchy* with
a separate implementation for each "host" OS. This required
dispatching just to create the objects in the first place.
- The hierarchy claimed to represent the host, when in fact it was
based on the target OS.
- Each leaf in the hierarchy was responsible for implementing the flag
processing and caching, resulting in a *lot* of copy-paste code and
quite a few bugs.
- The caching was consistently done based on architecture alone, even
though *any* aspect of the targeted triple might change the behavior
of the configured toolchain.
- Flag processing was already being done in the Driver proper,
separating the flag handling even more than it already is.
Instead of this, we can simply have the dispatch logic in the Driver
which previously created a HostInfo object create the ToolChain objects.
Adding caching in the Driver layer is a tiny amount of code. Finally,
pulling the flag processing into the Driver puts it where it belongs and
consolidates it in one location.
The result is that two functions, and maybe 100 lines of new code
replace over 10 classes and 800 lines of code. Woot.
This also paves the way to introduce more detailed ToolChain objects for
various OSes without threading through a new HostInfo type as well, and
the accompanying boiler plate. That, of course, was the yak I started to
shave that began this entire refactoring escapade. Wheee!
llvm-svn: 148950
2012-01-25 19:01:57 +08:00
|
|
|
break;
|
|
|
|
case llvm::Triple::DragonFly:
|
2012-02-19 09:38:32 +08:00
|
|
|
TC = new toolchains::DragonFly(*this, Target, Args);
|
Delete the driver's HostInfo class. This abstraction just never really
did anything. The two big pieces of functionality it tried to provide
was to cache the ToolChain objects for each target, and to figure out
the exact target based on the flag set coming in to an invocation.
However, it had a lot of flaws even with those goals:
- Neither of these have anything to do with the host, or its info.
- The HostInfo class was setup as a full blown class *hierarchy* with
a separate implementation for each "host" OS. This required
dispatching just to create the objects in the first place.
- The hierarchy claimed to represent the host, when in fact it was
based on the target OS.
- Each leaf in the hierarchy was responsible for implementing the flag
processing and caching, resulting in a *lot* of copy-paste code and
quite a few bugs.
- The caching was consistently done based on architecture alone, even
though *any* aspect of the targeted triple might change the behavior
of the configured toolchain.
- Flag processing was already being done in the Driver proper,
separating the flag handling even more than it already is.
Instead of this, we can simply have the dispatch logic in the Driver
which previously created a HostInfo object create the ToolChain objects.
Adding caching in the Driver layer is a tiny amount of code. Finally,
pulling the flag processing into the Driver puts it where it belongs and
consolidates it in one location.
The result is that two functions, and maybe 100 lines of new code
replace over 10 classes and 800 lines of code. Woot.
This also paves the way to introduce more detailed ToolChain objects for
various OSes without threading through a new HostInfo type as well, and
the accompanying boiler plate. That, of course, was the yak I started to
shave that began this entire refactoring escapade. Wheee!
llvm-svn: 148950
2012-01-25 19:01:57 +08:00
|
|
|
break;
|
|
|
|
case llvm::Triple::OpenBSD:
|
2012-02-19 09:38:32 +08:00
|
|
|
TC = new toolchains::OpenBSD(*this, Target, Args);
|
Delete the driver's HostInfo class. This abstraction just never really
did anything. The two big pieces of functionality it tried to provide
was to cache the ToolChain objects for each target, and to figure out
the exact target based on the flag set coming in to an invocation.
However, it had a lot of flaws even with those goals:
- Neither of these have anything to do with the host, or its info.
- The HostInfo class was setup as a full blown class *hierarchy* with
a separate implementation for each "host" OS. This required
dispatching just to create the objects in the first place.
- The hierarchy claimed to represent the host, when in fact it was
based on the target OS.
- Each leaf in the hierarchy was responsible for implementing the flag
processing and caching, resulting in a *lot* of copy-paste code and
quite a few bugs.
- The caching was consistently done based on architecture alone, even
though *any* aspect of the targeted triple might change the behavior
of the configured toolchain.
- Flag processing was already being done in the Driver proper,
separating the flag handling even more than it already is.
Instead of this, we can simply have the dispatch logic in the Driver
which previously created a HostInfo object create the ToolChain objects.
Adding caching in the Driver layer is a tiny amount of code. Finally,
pulling the flag processing into the Driver puts it where it belongs and
consolidates it in one location.
The result is that two functions, and maybe 100 lines of new code
replace over 10 classes and 800 lines of code. Woot.
This also paves the way to introduce more detailed ToolChain objects for
various OSes without threading through a new HostInfo type as well, and
the accompanying boiler plate. That, of course, was the yak I started to
shave that began this entire refactoring escapade. Wheee!
llvm-svn: 148950
2012-01-25 19:01:57 +08:00
|
|
|
break;
|
2012-08-09 07:57:20 +08:00
|
|
|
case llvm::Triple::Bitrig:
|
|
|
|
TC = new toolchains::Bitrig(*this, Target, Args);
|
|
|
|
break;
|
Delete the driver's HostInfo class. This abstraction just never really
did anything. The two big pieces of functionality it tried to provide
was to cache the ToolChain objects for each target, and to figure out
the exact target based on the flag set coming in to an invocation.
However, it had a lot of flaws even with those goals:
- Neither of these have anything to do with the host, or its info.
- The HostInfo class was setup as a full blown class *hierarchy* with
a separate implementation for each "host" OS. This required
dispatching just to create the objects in the first place.
- The hierarchy claimed to represent the host, when in fact it was
based on the target OS.
- Each leaf in the hierarchy was responsible for implementing the flag
processing and caching, resulting in a *lot* of copy-paste code and
quite a few bugs.
- The caching was consistently done based on architecture alone, even
though *any* aspect of the targeted triple might change the behavior
of the configured toolchain.
- Flag processing was already being done in the Driver proper,
separating the flag handling even more than it already is.
Instead of this, we can simply have the dispatch logic in the Driver
which previously created a HostInfo object create the ToolChain objects.
Adding caching in the Driver layer is a tiny amount of code. Finally,
pulling the flag processing into the Driver puts it where it belongs and
consolidates it in one location.
The result is that two functions, and maybe 100 lines of new code
replace over 10 classes and 800 lines of code. Woot.
This also paves the way to introduce more detailed ToolChain objects for
various OSes without threading through a new HostInfo type as well, and
the accompanying boiler plate. That, of course, was the yak I started to
shave that began this entire refactoring escapade. Wheee!
llvm-svn: 148950
2012-01-25 19:01:57 +08:00
|
|
|
case llvm::Triple::NetBSD:
|
2012-02-19 09:38:32 +08:00
|
|
|
TC = new toolchains::NetBSD(*this, Target, Args);
|
Delete the driver's HostInfo class. This abstraction just never really
did anything. The two big pieces of functionality it tried to provide
was to cache the ToolChain objects for each target, and to figure out
the exact target based on the flag set coming in to an invocation.
However, it had a lot of flaws even with those goals:
- Neither of these have anything to do with the host, or its info.
- The HostInfo class was setup as a full blown class *hierarchy* with
a separate implementation for each "host" OS. This required
dispatching just to create the objects in the first place.
- The hierarchy claimed to represent the host, when in fact it was
based on the target OS.
- Each leaf in the hierarchy was responsible for implementing the flag
processing and caching, resulting in a *lot* of copy-paste code and
quite a few bugs.
- The caching was consistently done based on architecture alone, even
though *any* aspect of the targeted triple might change the behavior
of the configured toolchain.
- Flag processing was already being done in the Driver proper,
separating the flag handling even more than it already is.
Instead of this, we can simply have the dispatch logic in the Driver
which previously created a HostInfo object create the ToolChain objects.
Adding caching in the Driver layer is a tiny amount of code. Finally,
pulling the flag processing into the Driver puts it where it belongs and
consolidates it in one location.
The result is that two functions, and maybe 100 lines of new code
replace over 10 classes and 800 lines of code. Woot.
This also paves the way to introduce more detailed ToolChain objects for
various OSes without threading through a new HostInfo type as well, and
the accompanying boiler plate. That, of course, was the yak I started to
shave that began this entire refactoring escapade. Wheee!
llvm-svn: 148950
2012-01-25 19:01:57 +08:00
|
|
|
break;
|
|
|
|
case llvm::Triple::FreeBSD:
|
2012-02-19 09:38:32 +08:00
|
|
|
TC = new toolchains::FreeBSD(*this, Target, Args);
|
Delete the driver's HostInfo class. This abstraction just never really
did anything. The two big pieces of functionality it tried to provide
was to cache the ToolChain objects for each target, and to figure out
the exact target based on the flag set coming in to an invocation.
However, it had a lot of flaws even with those goals:
- Neither of these have anything to do with the host, or its info.
- The HostInfo class was setup as a full blown class *hierarchy* with
a separate implementation for each "host" OS. This required
dispatching just to create the objects in the first place.
- The hierarchy claimed to represent the host, when in fact it was
based on the target OS.
- Each leaf in the hierarchy was responsible for implementing the flag
processing and caching, resulting in a *lot* of copy-paste code and
quite a few bugs.
- The caching was consistently done based on architecture alone, even
though *any* aspect of the targeted triple might change the behavior
of the configured toolchain.
- Flag processing was already being done in the Driver proper,
separating the flag handling even more than it already is.
Instead of this, we can simply have the dispatch logic in the Driver
which previously created a HostInfo object create the ToolChain objects.
Adding caching in the Driver layer is a tiny amount of code. Finally,
pulling the flag processing into the Driver puts it where it belongs and
consolidates it in one location.
The result is that two functions, and maybe 100 lines of new code
replace over 10 classes and 800 lines of code. Woot.
This also paves the way to introduce more detailed ToolChain objects for
various OSes without threading through a new HostInfo type as well, and
the accompanying boiler plate. That, of course, was the yak I started to
shave that began this entire refactoring escapade. Wheee!
llvm-svn: 148950
2012-01-25 19:01:57 +08:00
|
|
|
break;
|
|
|
|
case llvm::Triple::Minix:
|
2012-02-19 09:38:32 +08:00
|
|
|
TC = new toolchains::Minix(*this, Target, Args);
|
Delete the driver's HostInfo class. This abstraction just never really
did anything. The two big pieces of functionality it tried to provide
was to cache the ToolChain objects for each target, and to figure out
the exact target based on the flag set coming in to an invocation.
However, it had a lot of flaws even with those goals:
- Neither of these have anything to do with the host, or its info.
- The HostInfo class was setup as a full blown class *hierarchy* with
a separate implementation for each "host" OS. This required
dispatching just to create the objects in the first place.
- The hierarchy claimed to represent the host, when in fact it was
based on the target OS.
- Each leaf in the hierarchy was responsible for implementing the flag
processing and caching, resulting in a *lot* of copy-paste code and
quite a few bugs.
- The caching was consistently done based on architecture alone, even
though *any* aspect of the targeted triple might change the behavior
of the configured toolchain.
- Flag processing was already being done in the Driver proper,
separating the flag handling even more than it already is.
Instead of this, we can simply have the dispatch logic in the Driver
which previously created a HostInfo object create the ToolChain objects.
Adding caching in the Driver layer is a tiny amount of code. Finally,
pulling the flag processing into the Driver puts it where it belongs and
consolidates it in one location.
The result is that two functions, and maybe 100 lines of new code
replace over 10 classes and 800 lines of code. Woot.
This also paves the way to introduce more detailed ToolChain objects for
various OSes without threading through a new HostInfo type as well, and
the accompanying boiler plate. That, of course, was the yak I started to
shave that began this entire refactoring escapade. Wheee!
llvm-svn: 148950
2012-01-25 19:01:57 +08:00
|
|
|
break;
|
|
|
|
case llvm::Triple::Linux:
|
2012-01-26 05:03:58 +08:00
|
|
|
if (Target.getArch() == llvm::Triple::hexagon)
|
2012-12-06 20:43:18 +08:00
|
|
|
TC = new toolchains::Hexagon_TC(*this, Target, Args);
|
2012-01-26 05:03:58 +08:00
|
|
|
else
|
2012-02-19 09:38:32 +08:00
|
|
|
TC = new toolchains::Linux(*this, Target, Args);
|
Delete the driver's HostInfo class. This abstraction just never really
did anything. The two big pieces of functionality it tried to provide
was to cache the ToolChain objects for each target, and to figure out
the exact target based on the flag set coming in to an invocation.
However, it had a lot of flaws even with those goals:
- Neither of these have anything to do with the host, or its info.
- The HostInfo class was setup as a full blown class *hierarchy* with
a separate implementation for each "host" OS. This required
dispatching just to create the objects in the first place.
- The hierarchy claimed to represent the host, when in fact it was
based on the target OS.
- Each leaf in the hierarchy was responsible for implementing the flag
processing and caching, resulting in a *lot* of copy-paste code and
quite a few bugs.
- The caching was consistently done based on architecture alone, even
though *any* aspect of the targeted triple might change the behavior
of the configured toolchain.
- Flag processing was already being done in the Driver proper,
separating the flag handling even more than it already is.
Instead of this, we can simply have the dispatch logic in the Driver
which previously created a HostInfo object create the ToolChain objects.
Adding caching in the Driver layer is a tiny amount of code. Finally,
pulling the flag processing into the Driver puts it where it belongs and
consolidates it in one location.
The result is that two functions, and maybe 100 lines of new code
replace over 10 classes and 800 lines of code. Woot.
This also paves the way to introduce more detailed ToolChain objects for
various OSes without threading through a new HostInfo type as well, and
the accompanying boiler plate. That, of course, was the yak I started to
shave that began this entire refactoring escapade. Wheee!
llvm-svn: 148950
2012-01-25 19:01:57 +08:00
|
|
|
break;
|
2015-03-31 04:31:33 +08:00
|
|
|
case llvm::Triple::NaCl:
|
|
|
|
TC = new toolchains::NaCl_TC(*this, Target, Args);
|
|
|
|
break;
|
2012-02-15 21:39:01 +08:00
|
|
|
case llvm::Triple::Solaris:
|
2012-02-19 09:38:32 +08:00
|
|
|
TC = new toolchains::Solaris(*this, Target, Args);
|
2012-02-15 21:39:01 +08:00
|
|
|
break;
|
2015-07-18 09:49:05 +08:00
|
|
|
case llvm::Triple::AMDHSA:
|
|
|
|
TC = new toolchains::AMDGPUToolChain(*this, Target, Args);
|
|
|
|
break;
|
Delete the driver's HostInfo class. This abstraction just never really
did anything. The two big pieces of functionality it tried to provide
was to cache the ToolChain objects for each target, and to figure out
the exact target based on the flag set coming in to an invocation.
However, it had a lot of flaws even with those goals:
- Neither of these have anything to do with the host, or its info.
- The HostInfo class was setup as a full blown class *hierarchy* with
a separate implementation for each "host" OS. This required
dispatching just to create the objects in the first place.
- The hierarchy claimed to represent the host, when in fact it was
based on the target OS.
- Each leaf in the hierarchy was responsible for implementing the flag
processing and caching, resulting in a *lot* of copy-paste code and
quite a few bugs.
- The caching was consistently done based on architecture alone, even
though *any* aspect of the targeted triple might change the behavior
of the configured toolchain.
- Flag processing was already being done in the Driver proper,
separating the flag handling even more than it already is.
Instead of this, we can simply have the dispatch logic in the Driver
which previously created a HostInfo object create the ToolChain objects.
Adding caching in the Driver layer is a tiny amount of code. Finally,
pulling the flag processing into the Driver puts it where it belongs and
consolidates it in one location.
The result is that two functions, and maybe 100 lines of new code
replace over 10 classes and 800 lines of code. Woot.
This also paves the way to introduce more detailed ToolChain objects for
various OSes without threading through a new HostInfo type as well, and
the accompanying boiler plate. That, of course, was the yak I started to
shave that began this entire refactoring escapade. Wheee!
llvm-svn: 148950
2012-01-25 19:01:57 +08:00
|
|
|
case llvm::Triple::Win32:
|
2014-03-28 06:50:18 +08:00
|
|
|
switch (Target.getEnvironment()) {
|
|
|
|
default:
|
|
|
|
if (Target.isOSBinFormatELF())
|
|
|
|
TC = new toolchains::Generic_ELF(*this, Target, Args);
|
|
|
|
else if (Target.isOSBinFormatMachO())
|
|
|
|
TC = new toolchains::MachO(*this, Target, Args);
|
|
|
|
else
|
|
|
|
TC = new toolchains::Generic_GCC(*this, Target, Args);
|
|
|
|
break;
|
|
|
|
case llvm::Triple::GNU:
|
2015-07-02 12:45:27 +08:00
|
|
|
TC = new toolchains::MinGW(*this, Target, Args);
|
2014-03-28 06:50:18 +08:00
|
|
|
break;
|
2014-10-24 11:13:37 +08:00
|
|
|
case llvm::Triple::Itanium:
|
|
|
|
TC = new toolchains::CrossWindowsToolChain(*this, Target, Args);
|
|
|
|
break;
|
2014-03-28 06:50:18 +08:00
|
|
|
case llvm::Triple::MSVC:
|
|
|
|
case llvm::Triple::UnknownEnvironment:
|
2014-10-22 10:37:29 +08:00
|
|
|
TC = new toolchains::MSVCToolChain(*this, Target, Args);
|
2014-03-28 06:50:18 +08:00
|
|
|
break;
|
|
|
|
}
|
Delete the driver's HostInfo class. This abstraction just never really
did anything. The two big pieces of functionality it tried to provide
was to cache the ToolChain objects for each target, and to figure out
the exact target based on the flag set coming in to an invocation.
However, it had a lot of flaws even with those goals:
- Neither of these have anything to do with the host, or its info.
- The HostInfo class was setup as a full blown class *hierarchy* with
a separate implementation for each "host" OS. This required
dispatching just to create the objects in the first place.
- The hierarchy claimed to represent the host, when in fact it was
based on the target OS.
- Each leaf in the hierarchy was responsible for implementing the flag
processing and caching, resulting in a *lot* of copy-paste code and
quite a few bugs.
- The caching was consistently done based on architecture alone, even
though *any* aspect of the targeted triple might change the behavior
of the configured toolchain.
- Flag processing was already being done in the Driver proper,
separating the flag handling even more than it already is.
Instead of this, we can simply have the dispatch logic in the Driver
which previously created a HostInfo object create the ToolChain objects.
Adding caching in the Driver layer is a tiny amount of code. Finally,
pulling the flag processing into the Driver puts it where it belongs and
consolidates it in one location.
The result is that two functions, and maybe 100 lines of new code
replace over 10 classes and 800 lines of code. Woot.
This also paves the way to introduce more detailed ToolChain objects for
various OSes without threading through a new HostInfo type as well, and
the accompanying boiler plate. That, of course, was the yak I started to
shave that began this entire refactoring escapade. Wheee!
llvm-svn: 148950
2012-01-25 19:01:57 +08:00
|
|
|
break;
|
2015-07-14 07:27:56 +08:00
|
|
|
case llvm::Triple::CUDA:
|
|
|
|
TC = new toolchains::CudaToolChain(*this, Target, Args);
|
|
|
|
break;
|
Delete the driver's HostInfo class. This abstraction just never really
did anything. The two big pieces of functionality it tried to provide
was to cache the ToolChain objects for each target, and to figure out
the exact target based on the flag set coming in to an invocation.
However, it had a lot of flaws even with those goals:
- Neither of these have anything to do with the host, or its info.
- The HostInfo class was setup as a full blown class *hierarchy* with
a separate implementation for each "host" OS. This required
dispatching just to create the objects in the first place.
- The hierarchy claimed to represent the host, when in fact it was
based on the target OS.
- Each leaf in the hierarchy was responsible for implementing the flag
processing and caching, resulting in a *lot* of copy-paste code and
quite a few bugs.
- The caching was consistently done based on architecture alone, even
though *any* aspect of the targeted triple might change the behavior
of the configured toolchain.
- Flag processing was already being done in the Driver proper,
separating the flag handling even more than it already is.
Instead of this, we can simply have the dispatch logic in the Driver
which previously created a HostInfo object create the ToolChain objects.
Adding caching in the Driver layer is a tiny amount of code. Finally,
pulling the flag processing into the Driver puts it where it belongs and
consolidates it in one location.
The result is that two functions, and maybe 100 lines of new code
replace over 10 classes and 800 lines of code. Woot.
This also paves the way to introduce more detailed ToolChain objects for
various OSes without threading through a new HostInfo type as well, and
the accompanying boiler plate. That, of course, was the yak I started to
shave that began this entire refactoring escapade. Wheee!
llvm-svn: 148950
2012-01-25 19:01:57 +08:00
|
|
|
default:
|
2015-05-27 02:01:33 +08:00
|
|
|
// Of these targets, Hexagon is the only one that might have
|
|
|
|
// an OS of Linux, in which case it got handled above already.
|
|
|
|
if (Target.getArchName() == "tce")
|
2013-03-19 02:10:27 +08:00
|
|
|
TC = new toolchains::TCEToolChain(*this, Target, Args);
|
2015-05-27 02:01:33 +08:00
|
|
|
else if (Target.getArch() == llvm::Triple::hexagon)
|
2013-03-30 03:09:20 +08:00
|
|
|
TC = new toolchains::Hexagon_TC(*this, Target, Args);
|
2015-05-27 02:01:33 +08:00
|
|
|
else if (Target.getArch() == llvm::Triple::xcore)
|
2013-10-11 18:29:40 +08:00
|
|
|
TC = new toolchains::XCore(*this, Target, Args);
|
2015-06-19 22:55:19 +08:00
|
|
|
else if (Target.getArch() == llvm::Triple::shave)
|
|
|
|
TC = new toolchains::SHAVEToolChain(*this, Target, Args);
|
2015-05-27 02:01:33 +08:00
|
|
|
else if (Target.isOSBinFormatELF())
|
2014-01-10 21:44:34 +08:00
|
|
|
TC = new toolchains::Generic_ELF(*this, Target, Args);
|
2015-05-27 02:01:33 +08:00
|
|
|
else if (Target.isOSBinFormatMachO())
|
2014-01-16 16:48:16 +08:00
|
|
|
TC = new toolchains::MachO(*this, Target, Args);
|
2015-05-27 02:01:33 +08:00
|
|
|
else
|
|
|
|
TC = new toolchains::Generic_GCC(*this, Target, Args);
|
Delete the driver's HostInfo class. This abstraction just never really
did anything. The two big pieces of functionality it tried to provide
was to cache the ToolChain objects for each target, and to figure out
the exact target based on the flag set coming in to an invocation.
However, it had a lot of flaws even with those goals:
- Neither of these have anything to do with the host, or its info.
- The HostInfo class was setup as a full blown class *hierarchy* with
a separate implementation for each "host" OS. This required
dispatching just to create the objects in the first place.
- The hierarchy claimed to represent the host, when in fact it was
based on the target OS.
- Each leaf in the hierarchy was responsible for implementing the flag
processing and caching, resulting in a *lot* of copy-paste code and
quite a few bugs.
- The caching was consistently done based on architecture alone, even
though *any* aspect of the targeted triple might change the behavior
of the configured toolchain.
- Flag processing was already being done in the Driver proper,
separating the flag handling even more than it already is.
Instead of this, we can simply have the dispatch logic in the Driver
which previously created a HostInfo object create the ToolChain objects.
Adding caching in the Driver layer is a tiny amount of code. Finally,
pulling the flag processing into the Driver puts it where it belongs and
consolidates it in one location.
The result is that two functions, and maybe 100 lines of new code
replace over 10 classes and 800 lines of code. Woot.
This also paves the way to introduce more detailed ToolChain objects for
various OSes without threading through a new HostInfo type as well, and
the accompanying boiler plate. That, of course, was the yak I started to
shave that began this entire refactoring escapade. Wheee!
llvm-svn: 148950
2012-01-25 19:01:57 +08:00
|
|
|
break;
|
|
|
|
}
|
2009-05-22 10:53:45 +08:00
|
|
|
}
|
Delete the driver's HostInfo class. This abstraction just never really
did anything. The two big pieces of functionality it tried to provide
was to cache the ToolChain objects for each target, and to figure out
the exact target based on the flag set coming in to an invocation.
However, it had a lot of flaws even with those goals:
- Neither of these have anything to do with the host, or its info.
- The HostInfo class was setup as a full blown class *hierarchy* with
a separate implementation for each "host" OS. This required
dispatching just to create the objects in the first place.
- The hierarchy claimed to represent the host, when in fact it was
based on the target OS.
- Each leaf in the hierarchy was responsible for implementing the flag
processing and caching, resulting in a *lot* of copy-paste code and
quite a few bugs.
- The caching was consistently done based on architecture alone, even
though *any* aspect of the targeted triple might change the behavior
of the configured toolchain.
- Flag processing was already being done in the Driver proper,
separating the flag handling even more than it already is.
Instead of this, we can simply have the dispatch logic in the Driver
which previously created a HostInfo object create the ToolChain objects.
Adding caching in the Driver layer is a tiny amount of code. Finally,
pulling the flag processing into the Driver puts it where it belongs and
consolidates it in one location.
The result is that two functions, and maybe 100 lines of new code
replace over 10 classes and 800 lines of code. Woot.
This also paves the way to introduce more detailed ToolChain objects for
various OSes without threading through a new HostInfo type as well, and
the accompanying boiler plate. That, of course, was the yak I started to
shave that began this entire refactoring escapade. Wheee!
llvm-svn: 148950
2012-01-25 19:01:57 +08:00
|
|
|
return *TC;
|
2009-03-11 07:41:59 +08:00
|
|
|
}
|
2009-03-25 02:57:02 +08:00
|
|
|
|
2013-03-18 23:33:26 +08:00
|
|
|
bool Driver::ShouldUseClangCompiler(const JobAction &JA) const {
|
2015-06-12 23:45:21 +08:00
|
|
|
// Say "no" if there is not exactly one input of a type clang understands.
|
2015-06-26 23:47:46 +08:00
|
|
|
if (JA.size() != 1 || !types::isAcceptedByClang((*JA.begin())->getType()))
|
2012-11-15 13:36:36 +08:00
|
|
|
return false;
|
|
|
|
|
2015-06-12 23:45:21 +08:00
|
|
|
// And say "no" if this is not a kind of action clang understands.
|
2012-11-15 13:36:36 +08:00
|
|
|
if (!isa<PreprocessJobAction>(JA) && !isa<PrecompileJobAction>(JA) &&
|
Reapply "Change -save-temps to emit unoptimized bitcode files."
This reapplies r224503 along with a fix for compiling Fortran by having the
clang driver invoke gcc (see r224546, where it was reverted). I have added
a testcase for that as well.
Original commit message:
It is often convenient to use -save-temps to collect the intermediate
results of a compilation, e.g., when triaging a bug report. Besides the
temporary files for preprocessed source and assembly code, this adds the
unoptimized bitcode files as well.
This adds a new BackendJobAction, which is mostly mechanical, to run after
the CompileJobAction. When not using -save-temps, the BackendJobAction is
combined into one job with the CompileJobAction, similar to the way the
integrated assembler is handled. I've implemented this entirely as a
driver change, so under the hood, it is just using -disable-llvm-optzns
to get the unoptimized bitcode.
Based in part on a patch by Steven Wu.
rdar://problem/18909437
llvm-svn: 224688
2014-12-21 15:00:00 +08:00
|
|
|
!isa<CompileJobAction>(JA) && !isa<BackendJobAction>(JA))
|
2012-11-15 13:36:36 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-09-09 07:36:43 +08:00
|
|
|
/// GetReleaseVersion - Parse (([0-9]+)(.([0-9]+)(.([0-9]+)?))?)? and return the
|
|
|
|
/// grouped values as integers. Numbers which are not provided are set to 0.
|
2009-03-26 23:58:36 +08:00
|
|
|
///
|
2009-09-09 07:36:43 +08:00
|
|
|
/// \return True if the entire string was parsed (9.2), or all groups were
|
|
|
|
/// parsed (10.3.5extrastuff).
|
|
|
|
bool Driver::GetReleaseVersion(const char *Str, unsigned &Major,
|
2009-03-26 23:58:36 +08:00
|
|
|
unsigned &Minor, unsigned &Micro,
|
|
|
|
bool &HadExtra) {
|
|
|
|
HadExtra = false;
|
|
|
|
|
|
|
|
Major = Minor = Micro = 0;
|
2009-09-09 07:36:43 +08:00
|
|
|
if (*Str == '\0')
|
2015-04-07 09:03:35 +08:00
|
|
|
return false;
|
2009-03-26 23:58:36 +08:00
|
|
|
|
|
|
|
char *End;
|
2015-06-26 23:47:46 +08:00
|
|
|
Major = (unsigned)strtol(Str, &End, 10);
|
2009-03-26 23:58:36 +08:00
|
|
|
if (*Str != '\0' && *End == '\0')
|
|
|
|
return true;
|
|
|
|
if (*End != '.')
|
|
|
|
return false;
|
2009-09-09 07:36:43 +08:00
|
|
|
|
2015-06-26 23:47:46 +08:00
|
|
|
Str = End + 1;
|
|
|
|
Minor = (unsigned)strtol(Str, &End, 10);
|
2009-03-26 23:58:36 +08:00
|
|
|
if (*Str != '\0' && *End == '\0')
|
|
|
|
return true;
|
|
|
|
if (*End != '.')
|
|
|
|
return false;
|
|
|
|
|
2015-06-26 23:47:46 +08:00
|
|
|
Str = End + 1;
|
|
|
|
Micro = (unsigned)strtol(Str, &End, 10);
|
2009-03-26 23:58:36 +08:00
|
|
|
if (*Str != '\0' && *End == '\0')
|
|
|
|
return true;
|
|
|
|
if (Str == End)
|
|
|
|
return false;
|
|
|
|
HadExtra = true;
|
|
|
|
return true;
|
|
|
|
}
|
2013-07-27 08:23:45 +08:00
|
|
|
|
|
|
|
std::pair<unsigned, unsigned> Driver::getIncludeExcludeOptionFlagMasks() const {
|
|
|
|
unsigned IncludedFlagsBitmask = 0;
|
2013-09-25 23:54:41 +08:00
|
|
|
unsigned ExcludedFlagsBitmask = options::NoDriverOption;
|
2013-07-27 08:23:45 +08:00
|
|
|
|
|
|
|
if (Mode == CLMode) {
|
2013-08-01 04:51:53 +08:00
|
|
|
// Include CL and Core options.
|
|
|
|
IncludedFlagsBitmask |= options::CLOption;
|
|
|
|
IncludedFlagsBitmask |= options::CoreOption;
|
2013-07-27 08:23:45 +08:00
|
|
|
} else {
|
|
|
|
ExcludedFlagsBitmask |= options::CLOption;
|
|
|
|
}
|
|
|
|
|
|
|
|
return std::make_pair(IncludedFlagsBitmask, ExcludedFlagsBitmask);
|
|
|
|
}
|
2014-03-26 02:02:07 +08:00
|
|
|
|
2015-06-04 22:40:44 +08:00
|
|
|
bool clang::driver::isOptimizationLevelFast(const ArgList &Args) {
|
2014-03-26 02:02:07 +08:00
|
|
|
return Args.hasFlag(options::OPT_Ofast, options::OPT_O_Group, false);
|
|
|
|
}
|