2006-06-18 13:43:12 +08:00
|
|
|
//===--- SourceManager.cpp - Track and cache source files -----------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 03:59:25 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2006-06-18 13:43:12 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements the SourceManager interface.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "clang/Basic/SourceManager.h"
|
2010-03-16 06:54:52 +08:00
|
|
|
#include "clang/Basic/Diagnostic.h"
|
2006-06-18 13:43:12 +08:00
|
|
|
#include "clang/Basic/FileManager.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "clang/Basic/SourceManagerInternals.h"
|
2011-02-04 01:17:35 +08:00
|
|
|
#include "llvm/ADT/Optional.h"
|
2011-08-22 07:33:04 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "llvm/ADT/StringSwitch.h"
|
|
|
|
#include "llvm/Support/Capacity.h"
|
2007-07-24 13:57:19 +08:00
|
|
|
#include "llvm/Support/Compiler.h"
|
2007-04-29 15:12:06 +08:00
|
|
|
#include "llvm/Support/MemoryBuffer.h"
|
2010-11-30 02:12:39 +08:00
|
|
|
#include "llvm/Support/Path.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2006-06-18 13:43:12 +08:00
|
|
|
#include <algorithm>
|
2010-03-16 08:06:06 +08:00
|
|
|
#include <cstring>
|
2012-12-04 17:13:33 +08:00
|
|
|
#include <string>
|
2010-03-16 06:54:52 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
using namespace clang;
|
2006-06-20 13:02:40 +08:00
|
|
|
using namespace SrcMgr;
|
2007-06-16 07:05:46 +08:00
|
|
|
using llvm::MemoryBuffer;
|
2006-06-18 13:43:12 +08:00
|
|
|
|
2009-02-04 08:40:31 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2009-01-26 08:43:02 +08:00
|
|
|
// SourceManager Helper Classes
|
2009-02-04 08:40:31 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2009-01-26 08:43:02 +08:00
|
|
|
|
2007-10-31 05:08:08 +08:00
|
|
|
ContentCache::~ContentCache() {
|
Introduce basic support for loading a precompiled preamble while
reparsing an ASTUnit. When saving a preamble, create a buffer larger
than the actual file we're working with but fill everything from the
end of the preamble to the end of the file with spaces (so the lexer
will quickly skip them). When we load the file, create a buffer of the
same size, filling it with the file and then spaces. Then, instruct
the lexer to start lexing after the preamble, therefore continuing the
parse from the spot where the preamble left off.
It's now possible to perform a simple preamble build + parse (+
reparse) with ASTUnit. However, one has to disable a bunch of checking
in the PCH reader to do so. That part isn't committed; it will likely
be handled with some other kind of flag (e.g., -fno-validate-pch).
As part of this, fix some issues with null termination of the memory
buffers created for the preamble; we were trying to explicitly
NULL-terminate them, even though they were also getting implicitly
NULL terminated, leading to excess warnings about NULL characters in
source files.
llvm-svn: 109445
2010-07-27 05:36:20 +08:00
|
|
|
if (shouldFreeBuffer())
|
|
|
|
delete Buffer.getPointer();
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
2011-07-26 13:17:23 +08:00
|
|
|
/// getSizeBytesMapped - Returns the number of bytes actually mapped for this
|
|
|
|
/// ContentCache. This can be 0 if the MemBuffer was not actually expanded.
|
2009-01-06 09:55:26 +08:00
|
|
|
unsigned ContentCache::getSizeBytesMapped() const {
|
2010-03-17 06:53:51 +08:00
|
|
|
return Buffer.getPointer() ? Buffer.getPointer()->getBufferSize() : 0;
|
2009-01-06 09:55:26 +08:00
|
|
|
}
|
|
|
|
|
2011-04-29 04:36:42 +08:00
|
|
|
/// Returns the kind of memory used to back the memory buffer for
|
|
|
|
/// this content cache. This is used for performance analysis.
|
|
|
|
llvm::MemoryBuffer::BufferKind ContentCache::getMemoryBufferKind() const {
|
|
|
|
assert(Buffer.getPointer());
|
|
|
|
|
|
|
|
// Should be unreachable, but keep for sanity.
|
|
|
|
if (!Buffer.getPointer())
|
|
|
|
return llvm::MemoryBuffer::MemoryBuffer_Malloc;
|
2014-06-28 01:40:03 +08:00
|
|
|
|
|
|
|
llvm::MemoryBuffer *buf = Buffer.getPointer();
|
2011-04-29 04:36:42 +08:00
|
|
|
return buf->getBufferKind();
|
|
|
|
}
|
|
|
|
|
2009-01-06 09:55:26 +08:00
|
|
|
/// getSize - Returns the size of the content encapsulated by this ContentCache.
|
|
|
|
/// This can be the size of the source file or the size of an arbitrary
|
|
|
|
/// scratch buffer. If the ContentCache encapsulates a source file, that
|
2009-12-02 14:49:09 +08:00
|
|
|
/// file is not lazily brought in from disk to satisfy this query.
|
2009-01-06 09:55:26 +08:00
|
|
|
unsigned ContentCache::getSize() const {
|
2010-03-17 06:53:51 +08:00
|
|
|
return Buffer.getPointer() ? (unsigned) Buffer.getPointer()->getBufferSize()
|
2011-03-05 09:03:53 +08:00
|
|
|
: (unsigned) ContentsEntry->getSize();
|
2009-01-06 09:55:26 +08:00
|
|
|
}
|
|
|
|
|
2014-06-28 01:40:03 +08:00
|
|
|
void ContentCache::replaceBuffer(llvm::MemoryBuffer *B, bool DoNotFree) {
|
2012-05-04 05:50:39 +08:00
|
|
|
if (B && B == Buffer.getPointer()) {
|
2011-12-10 09:38:26 +08:00
|
|
|
assert(0 && "Replacing with the same buffer");
|
|
|
|
Buffer.setInt(DoNotFree? DoNotFreeFlag : 0);
|
|
|
|
return;
|
|
|
|
}
|
2009-12-02 14:49:09 +08:00
|
|
|
|
Introduce basic support for loading a precompiled preamble while
reparsing an ASTUnit. When saving a preamble, create a buffer larger
than the actual file we're working with but fill everything from the
end of the preamble to the end of the file with spaces (so the lexer
will quickly skip them). When we load the file, create a buffer of the
same size, filling it with the file and then spaces. Then, instruct
the lexer to start lexing after the preamble, therefore continuing the
parse from the spot where the preamble left off.
It's now possible to perform a simple preamble build + parse (+
reparse) with ASTUnit. However, one has to disable a bunch of checking
in the PCH reader to do so. That part isn't committed; it will likely
be handled with some other kind of flag (e.g., -fno-validate-pch).
As part of this, fix some issues with null termination of the memory
buffers created for the preamble; we were trying to explicitly
NULL-terminate them, even though they were also getting implicitly
NULL terminated, leading to excess warnings about NULL characters in
source files.
llvm-svn: 109445
2010-07-27 05:36:20 +08:00
|
|
|
if (shouldFreeBuffer())
|
|
|
|
delete Buffer.getPointer();
|
2010-03-17 06:53:51 +08:00
|
|
|
Buffer.setPointer(B);
|
Introduce basic support for loading a precompiled preamble while
reparsing an ASTUnit. When saving a preamble, create a buffer larger
than the actual file we're working with but fill everything from the
end of the preamble to the end of the file with spaces (so the lexer
will quickly skip them). When we load the file, create a buffer of the
same size, filling it with the file and then spaces. Then, instruct
the lexer to start lexing after the preamble, therefore continuing the
parse from the spot where the preamble left off.
It's now possible to perform a simple preamble build + parse (+
reparse) with ASTUnit. However, one has to disable a bunch of checking
in the PCH reader to do so. That part isn't committed; it will likely
be handled with some other kind of flag (e.g., -fno-validate-pch).
As part of this, fix some issues with null termination of the memory
buffers created for the preamble; we were trying to explicitly
NULL-terminate them, even though they were also getting implicitly
NULL terminated, leading to excess warnings about NULL characters in
source files.
llvm-svn: 109445
2010-07-27 05:36:20 +08:00
|
|
|
Buffer.setInt(DoNotFree? DoNotFreeFlag : 0);
|
2009-12-02 14:49:09 +08:00
|
|
|
}
|
|
|
|
|
2014-06-28 01:40:03 +08:00
|
|
|
llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
|
|
|
|
const SourceManager &SM,
|
|
|
|
SourceLocation Loc,
|
|
|
|
bool *Invalid) const {
|
2010-11-23 16:50:03 +08:00
|
|
|
// Lazily create the Buffer for ContentCaches that wrap files. If we already
|
2011-04-15 13:22:18 +08:00
|
|
|
// computed it, just return what we have.
|
2014-05-08 14:41:40 +08:00
|
|
|
if (Buffer.getPointer() || !ContentsEntry) {
|
2010-11-23 16:50:03 +08:00
|
|
|
if (Invalid)
|
|
|
|
*Invalid = isBufferInvalid();
|
|
|
|
|
|
|
|
return Buffer.getPointer();
|
|
|
|
}
|
2010-03-22 23:10:57 +08:00
|
|
|
|
2012-07-12 04:59:04 +08:00
|
|
|
bool isVolatile = SM.userFilesAreVolatile() && !IsSystemFile;
|
2014-10-27 06:44:13 +08:00
|
|
|
auto BufferOrError =
|
|
|
|
SM.getFileManager().getBufferForFile(ContentsEntry, isVolatile);
|
2010-03-22 23:10:57 +08:00
|
|
|
|
2010-11-23 16:50:03 +08:00
|
|
|
// If we were unable to open the file, then we are in an inconsistent
|
|
|
|
// situation where the content cache referenced a file which no longer
|
|
|
|
// exists. Most likely, we were using a stat cache with an invalid entry but
|
|
|
|
// the file could also have been removed during processing. Since we can't
|
|
|
|
// really deal with this situation, just create an empty buffer.
|
|
|
|
//
|
|
|
|
// FIXME: This is definitely not ideal, but our immediate clients can't
|
|
|
|
// currently handle returning a null entry here. Ideally we should detect
|
|
|
|
// that we are in an inconsistent situation and error out as quickly as
|
|
|
|
// possible.
|
2014-10-27 06:44:13 +08:00
|
|
|
if (!BufferOrError) {
|
2014-08-31 00:55:52 +08:00
|
|
|
StringRef FillStr("<<<MISSING SOURCE FILE>>>\n");
|
2015-04-07 01:45:11 +08:00
|
|
|
Buffer.setPointer(MemoryBuffer::getNewUninitMemBuffer(
|
|
|
|
ContentsEntry->getSize(), "<invalid>").release());
|
2010-11-23 16:50:03 +08:00
|
|
|
char *Ptr = const_cast<char*>(Buffer.getPointer()->getBufferStart());
|
2011-03-05 09:03:53 +08:00
|
|
|
for (unsigned i = 0, e = ContentsEntry->getSize(); i != e; ++i)
|
2010-11-23 16:50:03 +08:00
|
|
|
Ptr[i] = FillStr[i % FillStr.size()];
|
2010-04-10 09:17:16 +08:00
|
|
|
|
2010-11-23 16:50:03 +08:00
|
|
|
if (Diag.isDiagnosticInFlight())
|
2014-10-27 06:44:13 +08:00
|
|
|
Diag.SetDelayedDiagnostic(diag::err_cannot_open_file,
|
|
|
|
ContentsEntry->getName(),
|
|
|
|
BufferOrError.getError().message());
|
|
|
|
else
|
2010-11-23 16:50:03 +08:00
|
|
|
Diag.Report(Loc, diag::err_cannot_open_file)
|
2014-10-27 06:44:13 +08:00
|
|
|
<< ContentsEntry->getName() << BufferOrError.getError().message();
|
2010-03-22 23:10:57 +08:00
|
|
|
|
2010-11-23 16:50:03 +08:00
|
|
|
Buffer.setInt(Buffer.getInt() | InvalidFlag);
|
2010-04-21 02:14:03 +08:00
|
|
|
|
2010-11-23 16:50:03 +08:00
|
|
|
if (Invalid) *Invalid = true;
|
|
|
|
return Buffer.getPointer();
|
|
|
|
}
|
2014-10-27 06:44:13 +08:00
|
|
|
|
|
|
|
Buffer.setPointer(BufferOrError->release());
|
|
|
|
|
2010-11-23 16:50:03 +08:00
|
|
|
// Check that the file's size is the same as in the file entry (which may
|
|
|
|
// have come from a stat cache).
|
2011-03-05 09:03:53 +08:00
|
|
|
if (getRawBuffer()->getBufferSize() != (size_t)ContentsEntry->getSize()) {
|
2010-11-23 16:50:03 +08:00
|
|
|
if (Diag.isDiagnosticInFlight())
|
|
|
|
Diag.SetDelayedDiagnostic(diag::err_file_modified,
|
2011-03-05 09:03:53 +08:00
|
|
|
ContentsEntry->getName());
|
2010-11-23 16:50:03 +08:00
|
|
|
else
|
|
|
|
Diag.Report(Loc, diag::err_file_modified)
|
2011-03-05 09:03:53 +08:00
|
|
|
<< ContentsEntry->getName();
|
2010-11-23 16:50:03 +08:00
|
|
|
|
|
|
|
Buffer.setInt(Buffer.getInt() | InvalidFlag);
|
|
|
|
if (Invalid) *Invalid = true;
|
|
|
|
return Buffer.getPointer();
|
|
|
|
}
|
2011-04-09 08:01:04 +08:00
|
|
|
|
2010-11-23 16:50:03 +08:00
|
|
|
// If the buffer is valid, check to see if it has a UTF Byte Order Mark
|
2011-04-09 08:01:04 +08:00
|
|
|
// (BOM). We only support UTF-8 with and without a BOM right now. See
|
2010-11-23 16:50:03 +08:00
|
|
|
// http://en.wikipedia.org/wiki/Byte_order_mark for more information.
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef BufStr = Buffer.getPointer()->getBuffer();
|
2011-04-09 08:01:04 +08:00
|
|
|
const char *InvalidBOM = llvm::StringSwitch<const char *>(BufStr)
|
2010-11-23 16:50:03 +08:00
|
|
|
.StartsWith("\xFE\xFF", "UTF-16 (BE)")
|
|
|
|
.StartsWith("\xFF\xFE", "UTF-16 (LE)")
|
|
|
|
.StartsWith("\x00\x00\xFE\xFF", "UTF-32 (BE)")
|
|
|
|
.StartsWith("\xFF\xFE\x00\x00", "UTF-32 (LE)")
|
|
|
|
.StartsWith("\x2B\x2F\x76", "UTF-7")
|
|
|
|
.StartsWith("\xF7\x64\x4C", "UTF-1")
|
|
|
|
.StartsWith("\xDD\x73\x66\x73", "UTF-EBCDIC")
|
|
|
|
.StartsWith("\x0E\xFE\xFF", "SDSU")
|
|
|
|
.StartsWith("\xFB\xEE\x28", "BOCU-1")
|
|
|
|
.StartsWith("\x84\x31\x95\x33", "GB-18030")
|
2014-05-08 14:41:40 +08:00
|
|
|
.Default(nullptr);
|
2010-11-18 20:46:39 +08:00
|
|
|
|
2011-04-09 08:01:04 +08:00
|
|
|
if (InvalidBOM) {
|
2010-11-23 16:50:03 +08:00
|
|
|
Diag.Report(Loc, diag::err_unsupported_bom)
|
2011-04-09 08:01:04 +08:00
|
|
|
<< InvalidBOM << ContentsEntry->getName();
|
2010-11-23 16:50:03 +08:00
|
|
|
Buffer.setInt(Buffer.getInt() | InvalidFlag);
|
2009-01-07 06:43:04 +08:00
|
|
|
}
|
2010-03-16 06:54:52 +08:00
|
|
|
|
2010-03-17 06:53:51 +08:00
|
|
|
if (Invalid)
|
Introduce basic support for loading a precompiled preamble while
reparsing an ASTUnit. When saving a preamble, create a buffer larger
than the actual file we're working with but fill everything from the
end of the preamble to the end of the file with spaces (so the lexer
will quickly skip them). When we load the file, create a buffer of the
same size, filling it with the file and then spaces. Then, instruct
the lexer to start lexing after the preamble, therefore continuing the
parse from the spot where the preamble left off.
It's now possible to perform a simple preamble build + parse (+
reparse) with ASTUnit. However, one has to disable a bunch of checking
in the PCH reader to do so. That part isn't committed; it will likely
be handled with some other kind of flag (e.g., -fno-validate-pch).
As part of this, fix some issues with null termination of the memory
buffers created for the preamble; we were trying to explicitly
NULL-terminate them, even though they were also getting implicitly
NULL terminated, leading to excess warnings about NULL characters in
source files.
llvm-svn: 109445
2010-07-27 05:36:20 +08:00
|
|
|
*Invalid = isBufferInvalid();
|
2010-03-17 06:53:51 +08:00
|
|
|
|
|
|
|
return Buffer.getPointer();
|
2009-01-06 09:55:26 +08:00
|
|
|
}
|
|
|
|
|
2011-07-23 18:55:15 +08:00
|
|
|
unsigned LineTableInfo::getLineTableFilenameID(StringRef Name) {
|
2014-11-19 11:06:06 +08:00
|
|
|
auto IterBool =
|
|
|
|
FilenameIDs.insert(std::make_pair(Name, FilenamesByID.size()));
|
|
|
|
if (IterBool.second)
|
|
|
|
FilenamesByID.push_back(&*IterBool.first);
|
|
|
|
return IterBool.first->second;
|
2009-01-26 15:57:50 +08:00
|
|
|
}
|
|
|
|
|
2009-02-04 06:13:05 +08:00
|
|
|
/// AddLineNote - Add a line note to the line table that indicates that there
|
2012-06-16 05:28:23 +08:00
|
|
|
/// is a \#line at the specified FID/Offset location which changes the presumed
|
2009-02-04 06:13:05 +08:00
|
|
|
/// location to LineNo/FilenameID.
|
2012-06-09 00:40:28 +08:00
|
|
|
void LineTableInfo::AddLineNote(FileID FID, unsigned Offset,
|
2009-02-04 06:13:05 +08:00
|
|
|
unsigned LineNo, int FilenameID) {
|
2009-02-04 08:40:31 +08:00
|
|
|
std::vector<LineEntry> &Entries = LineEntries[FID];
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 08:40:31 +08:00
|
|
|
assert((Entries.empty() || Entries.back().FileOffset < Offset) &&
|
|
|
|
"Adding line entries out of order!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 13:21:58 +08:00
|
|
|
SrcMgr::CharacteristicKind Kind = SrcMgr::C_User;
|
2009-02-04 14:25:26 +08:00
|
|
|
unsigned IncludeOffset = 0;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 13:21:58 +08:00
|
|
|
if (!Entries.empty()) {
|
|
|
|
// If this is a '#line 4' after '#line 42 "foo.h"', make sure to remember
|
|
|
|
// that we are still in "foo.h".
|
|
|
|
if (FilenameID == -1)
|
|
|
|
FilenameID = Entries.back().FilenameID;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 14:25:26 +08:00
|
|
|
// If we are after a line marker that switched us to system header mode, or
|
|
|
|
// that set #include information, preserve it.
|
2009-02-04 13:21:58 +08:00
|
|
|
Kind = Entries.back().FileKind;
|
2009-02-04 14:25:26 +08:00
|
|
|
IncludeOffset = Entries.back().IncludeOffset;
|
2009-02-04 13:21:58 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 14:25:26 +08:00
|
|
|
Entries.push_back(LineEntry::get(Offset, LineNo, FilenameID, Kind,
|
|
|
|
IncludeOffset));
|
2009-02-04 13:21:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// AddLineNote This is the same as the previous version of AddLineNote, but is
|
|
|
|
/// used for GNU line markers. If EntryExit is 0, then this doesn't change the
|
2012-06-17 11:22:59 +08:00
|
|
|
/// presumed \#include stack. If it is 1, this is a file entry, if it is 2 then
|
2009-02-04 13:21:58 +08:00
|
|
|
/// this is a file exit. FileKind specifies whether this is a system header or
|
|
|
|
/// extern C system header.
|
2012-06-09 00:40:28 +08:00
|
|
|
void LineTableInfo::AddLineNote(FileID FID, unsigned Offset,
|
2009-02-04 13:21:58 +08:00
|
|
|
unsigned LineNo, int FilenameID,
|
|
|
|
unsigned EntryExit,
|
|
|
|
SrcMgr::CharacteristicKind FileKind) {
|
|
|
|
assert(FilenameID != -1 && "Unspecified filename should use other accessor");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 13:21:58 +08:00
|
|
|
std::vector<LineEntry> &Entries = LineEntries[FID];
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 13:21:58 +08:00
|
|
|
assert((Entries.empty() || Entries.back().FileOffset < Offset) &&
|
|
|
|
"Adding line entries out of order!");
|
|
|
|
|
2009-02-04 14:25:26 +08:00
|
|
|
unsigned IncludeOffset = 0;
|
|
|
|
if (EntryExit == 0) { // No #include stack change.
|
|
|
|
IncludeOffset = Entries.empty() ? 0 : Entries.back().IncludeOffset;
|
|
|
|
} else if (EntryExit == 1) {
|
|
|
|
IncludeOffset = Offset-1;
|
|
|
|
} else if (EntryExit == 2) {
|
|
|
|
assert(!Entries.empty() && Entries.back().IncludeOffset &&
|
|
|
|
"PPDirectives should have caught case when popping empty include stack");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 14:25:26 +08:00
|
|
|
// Get the include loc of the last entries' include loc as our include loc.
|
|
|
|
IncludeOffset = 0;
|
|
|
|
if (const LineEntry *PrevEntry =
|
|
|
|
FindNearestLineEntry(FID, Entries.back().IncludeOffset))
|
|
|
|
IncludeOffset = PrevEntry->IncludeOffset;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 14:25:26 +08:00
|
|
|
Entries.push_back(LineEntry::get(Offset, LineNo, FilenameID, FileKind,
|
|
|
|
IncludeOffset));
|
2009-02-04 06:13:05 +08:00
|
|
|
}
|
|
|
|
|
2009-02-04 13:21:58 +08:00
|
|
|
|
2009-02-04 09:55:42 +08:00
|
|
|
/// FindNearestLineEntry - Find the line entry nearest to FID that is before
|
|
|
|
/// it. If there is no line entry before Offset in FID, return null.
|
2012-06-09 00:40:28 +08:00
|
|
|
const LineEntry *LineTableInfo::FindNearestLineEntry(FileID FID,
|
2009-02-04 09:55:42 +08:00
|
|
|
unsigned Offset) {
|
|
|
|
const std::vector<LineEntry> &Entries = LineEntries[FID];
|
|
|
|
assert(!Entries.empty() && "No #line entries for this FID after all!");
|
|
|
|
|
2009-02-04 12:46:59 +08:00
|
|
|
// It is very common for the query to be after the last #line, check this
|
|
|
|
// first.
|
|
|
|
if (Entries.back().FileOffset <= Offset)
|
|
|
|
return &Entries.back();
|
2009-02-04 09:55:42 +08:00
|
|
|
|
2009-02-04 12:46:59 +08:00
|
|
|
// Do a binary search to find the maximal element that is still before Offset.
|
|
|
|
std::vector<LineEntry>::const_iterator I =
|
|
|
|
std::upper_bound(Entries.begin(), Entries.end(), Offset);
|
2014-05-08 14:41:40 +08:00
|
|
|
if (I == Entries.begin()) return nullptr;
|
2009-02-04 12:46:59 +08:00
|
|
|
return &*--I;
|
2009-02-04 09:55:42 +08:00
|
|
|
}
|
2009-02-04 06:13:05 +08:00
|
|
|
|
2009-04-14 00:31:14 +08:00
|
|
|
/// \brief Add a new line entry that has already been encoded into
|
|
|
|
/// the internal representation of the line table.
|
2012-06-09 00:40:28 +08:00
|
|
|
void LineTableInfo::AddEntry(FileID FID,
|
2009-04-14 00:31:14 +08:00
|
|
|
const std::vector<LineEntry> &Entries) {
|
|
|
|
LineEntries[FID] = Entries;
|
|
|
|
}
|
2009-02-04 06:13:05 +08:00
|
|
|
|
2009-01-26 15:57:50 +08:00
|
|
|
/// getLineTableFilenameID - Return the uniqued ID for the specified filename.
|
2009-09-09 23:08:12 +08:00
|
|
|
///
|
2011-07-23 18:55:15 +08:00
|
|
|
unsigned SourceManager::getLineTableFilenameID(StringRef Name) {
|
2015-11-12 08:11:19 +08:00
|
|
|
return getLineTable().getLineTableFilenameID(Name);
|
2009-01-26 15:57:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-02-04 05:52:55 +08:00
|
|
|
/// AddLineNote - Add a line note to the line table for the FileID and offset
|
|
|
|
/// specified by Loc. If FilenameID is -1, it is considered to be
|
|
|
|
/// unspecified.
|
|
|
|
void SourceManager::AddLineNote(SourceLocation Loc, unsigned LineNo,
|
|
|
|
int FilenameID) {
|
2011-07-26 04:52:32 +08:00
|
|
|
std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-04-20 08:21:03 +08:00
|
|
|
bool Invalid = false;
|
|
|
|
const SLocEntry &Entry = getSLocEntry(LocInfo.first, &Invalid);
|
|
|
|
if (!Entry.isFile() || Invalid)
|
|
|
|
return;
|
|
|
|
|
|
|
|
const SrcMgr::FileInfo &FileInfo = Entry.getFile();
|
2009-02-04 06:13:05 +08:00
|
|
|
|
|
|
|
// Remember that this file has #line directives now if it doesn't already.
|
|
|
|
const_cast<SrcMgr::FileInfo&>(FileInfo).setHasLineDirectives();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2015-11-12 08:11:19 +08:00
|
|
|
getLineTable().AddLineNote(LocInfo.first, LocInfo.second, LineNo, FilenameID);
|
2009-02-04 05:52:55 +08:00
|
|
|
}
|
|
|
|
|
2009-02-04 13:21:58 +08:00
|
|
|
/// AddLineNote - Add a GNU line marker to the line table.
|
|
|
|
void SourceManager::AddLineNote(SourceLocation Loc, unsigned LineNo,
|
|
|
|
int FilenameID, bool IsFileEntry,
|
|
|
|
bool IsFileExit, bool IsSystemHeader,
|
|
|
|
bool IsExternCHeader) {
|
|
|
|
// If there is no filename and no flags, this is treated just like a #line,
|
|
|
|
// which does not change the flags of the previous line marker.
|
|
|
|
if (FilenameID == -1) {
|
|
|
|
assert(!IsFileEntry && !IsFileExit && !IsSystemHeader && !IsExternCHeader &&
|
|
|
|
"Can't set flags without setting the filename!");
|
|
|
|
return AddLineNote(Loc, LineNo, FilenameID);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-26 04:52:32 +08:00
|
|
|
std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
|
2011-04-20 08:21:03 +08:00
|
|
|
|
|
|
|
bool Invalid = false;
|
|
|
|
const SLocEntry &Entry = getSLocEntry(LocInfo.first, &Invalid);
|
|
|
|
if (!Entry.isFile() || Invalid)
|
|
|
|
return;
|
|
|
|
|
|
|
|
const SrcMgr::FileInfo &FileInfo = Entry.getFile();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 13:21:58 +08:00
|
|
|
// Remember that this file has #line directives now if it doesn't already.
|
|
|
|
const_cast<SrcMgr::FileInfo&>(FileInfo).setHasLineDirectives();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2015-11-12 08:11:19 +08:00
|
|
|
(void) getLineTable();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 13:21:58 +08:00
|
|
|
SrcMgr::CharacteristicKind FileKind;
|
|
|
|
if (IsExternCHeader)
|
|
|
|
FileKind = SrcMgr::C_ExternCSystem;
|
|
|
|
else if (IsSystemHeader)
|
|
|
|
FileKind = SrcMgr::C_System;
|
|
|
|
else
|
|
|
|
FileKind = SrcMgr::C_User;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 13:21:58 +08:00
|
|
|
unsigned EntryExit = 0;
|
|
|
|
if (IsFileEntry)
|
|
|
|
EntryExit = 1;
|
|
|
|
else if (IsFileExit)
|
|
|
|
EntryExit = 2;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-06-09 00:40:28 +08:00
|
|
|
LineTable->AddLineNote(LocInfo.first, LocInfo.second, LineNo, FilenameID,
|
2009-02-04 13:21:58 +08:00
|
|
|
EntryExit, FileKind);
|
|
|
|
}
|
|
|
|
|
2009-04-14 00:31:14 +08:00
|
|
|
LineTableInfo &SourceManager::getLineTable() {
|
2014-05-08 14:41:40 +08:00
|
|
|
if (!LineTable)
|
2009-04-14 00:31:14 +08:00
|
|
|
LineTable = new LineTableInfo();
|
|
|
|
return *LineTable;
|
|
|
|
}
|
2009-02-04 05:52:55 +08:00
|
|
|
|
2009-02-04 08:40:31 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2009-01-26 08:43:02 +08:00
|
|
|
// Private 'Create' methods.
|
2009-02-04 08:40:31 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2009-01-06 09:55:26 +08:00
|
|
|
|
2012-07-12 04:59:04 +08:00
|
|
|
SourceManager::SourceManager(DiagnosticsEngine &Diag, FileManager &FileMgr,
|
|
|
|
bool UserFilesAreVolatile)
|
2011-03-09 07:35:24 +08:00
|
|
|
: Diag(Diag), FileMgr(FileMgr), OverridenFilesKeepOriginalName(true),
|
2015-11-24 12:22:21 +08:00
|
|
|
UserFilesAreVolatile(UserFilesAreVolatile), FilesAreTransient(false),
|
2014-05-08 14:41:40 +08:00
|
|
|
ExternalSLocEntries(nullptr), LineTable(nullptr), NumLinearScans(0),
|
2014-08-19 02:33:41 +08:00
|
|
|
NumBinaryProbes(0) {
|
2010-11-19 04:06:41 +08:00
|
|
|
clearIDTables();
|
|
|
|
Diag.setSourceManager(this);
|
|
|
|
}
|
|
|
|
|
2009-01-26 15:57:50 +08:00
|
|
|
SourceManager::~SourceManager() {
|
|
|
|
delete LineTable;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-03 15:30:45 +08:00
|
|
|
// Delete FileEntry objects corresponding to content caches. Since the actual
|
|
|
|
// content cache objects are bump pointer allocated, we just have to run the
|
|
|
|
// dtors, but we call the deallocate method for completeness.
|
|
|
|
for (unsigned i = 0, e = MemBufferInfos.size(); i != e; ++i) {
|
2011-12-16 07:37:55 +08:00
|
|
|
if (MemBufferInfos[i]) {
|
|
|
|
MemBufferInfos[i]->~ContentCache();
|
|
|
|
ContentCacheAlloc.Deallocate(MemBufferInfos[i]);
|
|
|
|
}
|
2009-02-03 15:30:45 +08:00
|
|
|
}
|
|
|
|
for (llvm::DenseMap<const FileEntry*, SrcMgr::ContentCache*>::iterator
|
|
|
|
I = FileInfos.begin(), E = FileInfos.end(); I != E; ++I) {
|
2011-12-16 07:37:55 +08:00
|
|
|
if (I->second) {
|
|
|
|
I->second->~ContentCache();
|
|
|
|
ContentCacheAlloc.Deallocate(I->second);
|
|
|
|
}
|
2009-02-03 15:30:45 +08:00
|
|
|
}
|
2011-09-26 16:01:50 +08:00
|
|
|
|
2014-02-20 07:44:52 +08:00
|
|
|
llvm::DeleteContainerSeconds(MacroArgsCacheMap);
|
2009-01-26 15:57:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void SourceManager::clearIDTables() {
|
|
|
|
MainFileID = FileID();
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
LocalSLocEntryTable.clear();
|
|
|
|
LoadedSLocEntryTable.clear();
|
|
|
|
SLocEntryLoaded.clear();
|
2009-01-26 15:57:50 +08:00
|
|
|
LastLineNoFileIDQuery = FileID();
|
2014-05-08 14:41:40 +08:00
|
|
|
LastLineNoContentCache = nullptr;
|
2009-01-26 15:57:50 +08:00
|
|
|
LastFileIDLookup = FileID();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-26 15:57:50 +08:00
|
|
|
if (LineTable)
|
|
|
|
LineTable->clear();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-26 13:17:23 +08:00
|
|
|
// Use up FileID #0 as an invalid expansion.
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
NextLocalOffset = 0;
|
2011-08-17 08:31:20 +08:00
|
|
|
CurrentLoadedOffset = MaxLoadedOffset;
|
2011-07-26 11:03:05 +08:00
|
|
|
createExpansionLoc(SourceLocation(),SourceLocation(),SourceLocation(), 1);
|
2009-01-26 15:57:50 +08:00
|
|
|
}
|
|
|
|
|
2009-01-26 08:43:02 +08:00
|
|
|
/// getOrCreateContentCache - Create or return a cached ContentCache for the
|
|
|
|
/// specified file.
|
|
|
|
const ContentCache *
|
2012-07-12 04:59:04 +08:00
|
|
|
SourceManager::getOrCreateContentCache(const FileEntry *FileEnt,
|
|
|
|
bool isSystemFile) {
|
2006-06-18 13:43:12 +08:00
|
|
|
assert(FileEnt && "Didn't specify a file entry to use?");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// Do we already have information about this file?
|
2009-02-03 15:30:45 +08:00
|
|
|
ContentCache *&Entry = FileInfos[FileEnt];
|
|
|
|
if (Entry) return Entry;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2014-04-16 05:34:12 +08:00
|
|
|
// Nope, create a new Cache entry.
|
|
|
|
Entry = ContentCacheAlloc.Allocate<ContentCache>();
|
2011-03-05 09:03:53 +08:00
|
|
|
|
2012-05-04 05:50:39 +08:00
|
|
|
if (OverriddenFilesInfo) {
|
|
|
|
// If the file contents are overridden with contents from another file,
|
|
|
|
// pass that file to ContentCache.
|
|
|
|
llvm::DenseMap<const FileEntry *, const FileEntry *>::iterator
|
|
|
|
overI = OverriddenFilesInfo->OverriddenFiles.find(FileEnt);
|
|
|
|
if (overI == OverriddenFilesInfo->OverriddenFiles.end())
|
|
|
|
new (Entry) ContentCache(FileEnt);
|
|
|
|
else
|
|
|
|
new (Entry) ContentCache(OverridenFilesKeepOriginalName ? FileEnt
|
|
|
|
: overI->second,
|
|
|
|
overI->second);
|
|
|
|
} else {
|
2011-03-05 09:03:53 +08:00
|
|
|
new (Entry) ContentCache(FileEnt);
|
2012-05-04 05:50:39 +08:00
|
|
|
}
|
2011-03-05 09:03:53 +08:00
|
|
|
|
2012-07-12 04:59:04 +08:00
|
|
|
Entry->IsSystemFile = isSystemFile;
|
2015-11-26 10:04:16 +08:00
|
|
|
Entry->IsTransient = FilesAreTransient;
|
2012-07-12 04:59:04 +08:00
|
|
|
|
2009-02-03 15:30:45 +08:00
|
|
|
return Entry;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-11-01 01:53:38 +08:00
|
|
|
/// createMemBufferContentCache - Create a new ContentCache for the specified
|
|
|
|
/// memory buffer. This does no caching.
|
2014-08-29 15:59:55 +08:00
|
|
|
const ContentCache *SourceManager::createMemBufferContentCache(
|
|
|
|
std::unique_ptr<llvm::MemoryBuffer> Buffer) {
|
2014-04-16 05:34:12 +08:00
|
|
|
// Add a new ContentCache to the MemBufferInfos list and return it.
|
|
|
|
ContentCache *Entry = ContentCacheAlloc.Allocate<ContentCache>();
|
2009-02-03 15:30:45 +08:00
|
|
|
new (Entry) ContentCache();
|
|
|
|
MemBufferInfos.push_back(Entry);
|
2014-08-29 15:59:55 +08:00
|
|
|
Entry->setBuffer(std::move(Buffer));
|
2009-02-03 15:30:45 +08:00
|
|
|
return Entry;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
2012-02-21 07:58:07 +08:00
|
|
|
const SrcMgr::SLocEntry &SourceManager::loadSLocEntry(unsigned Index,
|
|
|
|
bool *Invalid) const {
|
|
|
|
assert(!SLocEntryLoaded[Index]);
|
|
|
|
if (ExternalSLocEntries->ReadSLocEntry(-(static_cast<int>(Index) + 2))) {
|
|
|
|
if (Invalid)
|
|
|
|
*Invalid = true;
|
|
|
|
// If the file of the SLocEntry changed we could still have loaded it.
|
|
|
|
if (!SLocEntryLoaded[Index]) {
|
|
|
|
// Try to recover; create a SLocEntry so the rest of clang can handle it.
|
|
|
|
LoadedSLocEntryTable[Index] = SLocEntry::get(0,
|
|
|
|
FileInfo::get(SourceLocation(),
|
|
|
|
getFakeContentCacheForRecovery(),
|
|
|
|
SrcMgr::C_User));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return LoadedSLocEntryTable[Index];
|
|
|
|
}
|
|
|
|
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
std::pair<int, unsigned>
|
|
|
|
SourceManager::AllocateLoadedSLocEntries(unsigned NumSLocEntries,
|
|
|
|
unsigned TotalSize) {
|
|
|
|
assert(ExternalSLocEntries && "Don't have an external sloc source");
|
2015-08-13 06:25:24 +08:00
|
|
|
// Make sure we're not about to run out of source locations.
|
|
|
|
if (CurrentLoadedOffset - TotalSize < NextLocalOffset)
|
|
|
|
return std::make_pair(0, 0);
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
LoadedSLocEntryTable.resize(LoadedSLocEntryTable.size() + NumSLocEntries);
|
|
|
|
SLocEntryLoaded.resize(LoadedSLocEntryTable.size());
|
|
|
|
CurrentLoadedOffset -= TotalSize;
|
|
|
|
int ID = LoadedSLocEntryTable.size();
|
|
|
|
return std::make_pair(-ID - 1, CurrentLoadedOffset);
|
2009-04-28 05:28:04 +08:00
|
|
|
}
|
|
|
|
|
2011-04-20 08:21:03 +08:00
|
|
|
/// \brief As part of recovering from missing or changed content, produce a
|
|
|
|
/// fake, non-empty buffer.
|
2014-06-28 01:40:03 +08:00
|
|
|
llvm::MemoryBuffer *SourceManager::getFakeBufferForRecovery() const {
|
2011-04-20 08:21:03 +08:00
|
|
|
if (!FakeBufferForRecovery)
|
2014-08-28 04:03:29 +08:00
|
|
|
FakeBufferForRecovery =
|
|
|
|
llvm::MemoryBuffer::getMemBuffer("<<<INVALID BUFFER>>");
|
2014-08-19 02:33:41 +08:00
|
|
|
|
|
|
|
return FakeBufferForRecovery.get();
|
2011-04-20 08:21:03 +08:00
|
|
|
}
|
2009-04-27 14:38:32 +08:00
|
|
|
|
2012-02-21 07:58:07 +08:00
|
|
|
/// \brief As part of recovering from missing or changed content, produce a
|
|
|
|
/// fake content cache.
|
|
|
|
const SrcMgr::ContentCache *
|
|
|
|
SourceManager::getFakeContentCacheForRecovery() const {
|
|
|
|
if (!FakeContentCacheForRecovery) {
|
2014-08-19 02:33:41 +08:00
|
|
|
FakeContentCacheForRecovery = llvm::make_unique<SrcMgr::ContentCache>();
|
2012-02-21 07:58:07 +08:00
|
|
|
FakeContentCacheForRecovery->replaceBuffer(getFakeBufferForRecovery(),
|
|
|
|
/*DoNotFree=*/true);
|
|
|
|
}
|
2014-08-19 02:33:41 +08:00
|
|
|
return FakeContentCacheForRecovery.get();
|
2012-02-21 07:58:07 +08:00
|
|
|
}
|
|
|
|
|
2013-05-17 05:37:39 +08:00
|
|
|
/// \brief Returns the previous in-order FileID or an invalid FileID if there
|
|
|
|
/// is no previous one.
|
|
|
|
FileID SourceManager::getPreviousFileID(FileID FID) const {
|
|
|
|
if (FID.isInvalid())
|
|
|
|
return FileID();
|
|
|
|
|
|
|
|
int ID = FID.ID;
|
|
|
|
if (ID == -1)
|
|
|
|
return FileID();
|
|
|
|
|
|
|
|
if (ID > 0) {
|
|
|
|
if (ID-1 == 0)
|
|
|
|
return FileID();
|
|
|
|
} else if (unsigned(-(ID-1) - 2) >= LoadedSLocEntryTable.size()) {
|
|
|
|
return FileID();
|
|
|
|
}
|
|
|
|
|
|
|
|
return FileID::get(ID-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Returns the next in-order FileID or an invalid FileID if there is
|
|
|
|
/// no next one.
|
|
|
|
FileID SourceManager::getNextFileID(FileID FID) const {
|
|
|
|
if (FID.isInvalid())
|
|
|
|
return FileID();
|
|
|
|
|
|
|
|
int ID = FID.ID;
|
|
|
|
if (ID > 0) {
|
|
|
|
if (unsigned(ID+1) >= local_sloc_entry_size())
|
|
|
|
return FileID();
|
|
|
|
} else if (ID+1 >= -1) {
|
|
|
|
return FileID();
|
|
|
|
}
|
|
|
|
|
|
|
|
return FileID::get(ID+1);
|
|
|
|
}
|
|
|
|
|
2009-01-26 08:43:02 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2011-07-26 13:17:23 +08:00
|
|
|
// Methods to create new FileID's and macro expansions.
|
2009-01-26 08:43:02 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2006-06-18 13:43:12 +08:00
|
|
|
|
2010-08-27 05:27:06 +08:00
|
|
|
/// createFileID - Create a new FileID for the specified ContentCache and
|
2007-10-31 06:57:35 +08:00
|
|
|
/// include position. This works regardless of whether the ContentCache
|
|
|
|
/// corresponds to a file or some other input source.
|
2009-01-17 14:22:33 +08:00
|
|
|
FileID SourceManager::createFileID(const ContentCache *File,
|
2009-01-26 08:43:02 +08:00
|
|
|
SourceLocation IncludePos,
|
2009-04-27 14:38:32 +08:00
|
|
|
SrcMgr::CharacteristicKind FileCharacter,
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
int LoadedID, unsigned LoadedOffset) {
|
|
|
|
if (LoadedID < 0) {
|
|
|
|
assert(LoadedID != -1 && "Loading sentinel FileID");
|
|
|
|
unsigned Index = unsigned(-LoadedID) - 2;
|
|
|
|
assert(Index < LoadedSLocEntryTable.size() && "FileID out of range");
|
|
|
|
assert(!SLocEntryLoaded[Index] && "FileID already loaded");
|
|
|
|
LoadedSLocEntryTable[Index] = SLocEntry::get(LoadedOffset,
|
|
|
|
FileInfo::get(IncludePos, File, FileCharacter));
|
|
|
|
SLocEntryLoaded[Index] = true;
|
|
|
|
return FileID::get(LoadedID);
|
2009-04-27 14:38:32 +08:00
|
|
|
}
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
LocalSLocEntryTable.push_back(SLocEntry::get(NextLocalOffset,
|
|
|
|
FileInfo::get(IncludePos, File,
|
|
|
|
FileCharacter)));
|
2009-01-06 09:55:26 +08:00
|
|
|
unsigned FileSize = File->getSize();
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
assert(NextLocalOffset + FileSize + 1 > NextLocalOffset &&
|
|
|
|
NextLocalOffset + FileSize + 1 <= CurrentLoadedOffset &&
|
|
|
|
"Ran out of source locations!");
|
|
|
|
// We do a +1 here because we want a SourceLocation that means "the end of the
|
|
|
|
// file", e.g. for the "no newline at the end of the file" diagnostic.
|
|
|
|
NextLocalOffset += FileSize + 1;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-26 08:43:02 +08:00
|
|
|
// Set LastFileIDLookup to the newly created file. The next getFileID call is
|
|
|
|
// almost guaranteed to be from that file.
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
FileID FID = FileID::get(LocalSLocEntryTable.size()-1);
|
2009-06-23 08:42:06 +08:00
|
|
|
return LastFileIDLookup = FID;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
2011-07-08 07:56:36 +08:00
|
|
|
SourceLocation
|
2011-07-26 11:03:05 +08:00
|
|
|
SourceManager::createMacroArgExpansionLoc(SourceLocation SpellingLoc,
|
|
|
|
SourceLocation ExpansionLoc,
|
|
|
|
unsigned TokLength) {
|
2011-07-26 12:41:47 +08:00
|
|
|
ExpansionInfo Info = ExpansionInfo::createForMacroArg(SpellingLoc,
|
|
|
|
ExpansionLoc);
|
|
|
|
return createExpansionLocImpl(Info, TokLength);
|
2011-07-08 07:56:36 +08:00
|
|
|
}
|
|
|
|
|
2011-07-26 11:03:05 +08:00
|
|
|
SourceLocation
|
|
|
|
SourceManager::createExpansionLoc(SourceLocation SpellingLoc,
|
|
|
|
SourceLocation ExpansionLocStart,
|
|
|
|
SourceLocation ExpansionLocEnd,
|
|
|
|
unsigned TokLength,
|
|
|
|
int LoadedID,
|
|
|
|
unsigned LoadedOffset) {
|
2011-07-26 12:41:47 +08:00
|
|
|
ExpansionInfo Info = ExpansionInfo::create(SpellingLoc, ExpansionLocStart,
|
|
|
|
ExpansionLocEnd);
|
|
|
|
return createExpansionLocImpl(Info, TokLength, LoadedID, LoadedOffset);
|
2011-07-08 07:56:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
SourceLocation
|
2011-07-26 12:41:47 +08:00
|
|
|
SourceManager::createExpansionLocImpl(const ExpansionInfo &Info,
|
2011-07-26 11:03:05 +08:00
|
|
|
unsigned TokLength,
|
|
|
|
int LoadedID,
|
|
|
|
unsigned LoadedOffset) {
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
if (LoadedID < 0) {
|
|
|
|
assert(LoadedID != -1 && "Loading sentinel FileID");
|
|
|
|
unsigned Index = unsigned(-LoadedID) - 2;
|
|
|
|
assert(Index < LoadedSLocEntryTable.size() && "FileID out of range");
|
|
|
|
assert(!SLocEntryLoaded[Index] && "FileID already loaded");
|
2011-07-26 12:41:47 +08:00
|
|
|
LoadedSLocEntryTable[Index] = SLocEntry::get(LoadedOffset, Info);
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
SLocEntryLoaded[Index] = true;
|
|
|
|
return SourceLocation::getMacroLoc(LoadedOffset);
|
2009-04-27 14:38:32 +08:00
|
|
|
}
|
2011-07-26 12:41:47 +08:00
|
|
|
LocalSLocEntryTable.push_back(SLocEntry::get(NextLocalOffset, Info));
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
assert(NextLocalOffset + TokLength + 1 > NextLocalOffset &&
|
|
|
|
NextLocalOffset + TokLength + 1 <= CurrentLoadedOffset &&
|
|
|
|
"Ran out of source locations!");
|
|
|
|
// See createFileID for that +1.
|
|
|
|
NextLocalOffset += TokLength + 1;
|
|
|
|
return SourceLocation::getMacroLoc(NextLocalOffset - (TokLength + 1));
|
2006-06-30 14:10:08 +08:00
|
|
|
}
|
|
|
|
|
2014-06-28 01:40:03 +08:00
|
|
|
llvm::MemoryBuffer *SourceManager::getMemoryBufferForFile(const FileEntry *File,
|
|
|
|
bool *Invalid) {
|
2009-12-02 14:49:09 +08:00
|
|
|
const SrcMgr::ContentCache *IR = getOrCreateContentCache(File);
|
2010-03-16 06:54:52 +08:00
|
|
|
assert(IR && "getOrCreateContentCache() cannot return NULL");
|
2010-04-21 04:35:58 +08:00
|
|
|
return IR->getBuffer(Diag, *this, SourceLocation(), Invalid);
|
2009-12-02 14:49:09 +08:00
|
|
|
}
|
|
|
|
|
2010-10-27 04:47:28 +08:00
|
|
|
void SourceManager::overrideFileContents(const FileEntry *SourceFile,
|
2014-06-28 01:40:03 +08:00
|
|
|
llvm::MemoryBuffer *Buffer,
|
Introduce basic support for loading a precompiled preamble while
reparsing an ASTUnit. When saving a preamble, create a buffer larger
than the actual file we're working with but fill everything from the
end of the preamble to the end of the file with spaces (so the lexer
will quickly skip them). When we load the file, create a buffer of the
same size, filling it with the file and then spaces. Then, instruct
the lexer to start lexing after the preamble, therefore continuing the
parse from the spot where the preamble left off.
It's now possible to perform a simple preamble build + parse (+
reparse) with ASTUnit. However, one has to disable a bunch of checking
in the PCH reader to do so. That part isn't committed; it will likely
be handled with some other kind of flag (e.g., -fno-validate-pch).
As part of this, fix some issues with null termination of the memory
buffers created for the preamble; we were trying to explicitly
NULL-terminate them, even though they were also getting implicitly
NULL terminated, leading to excess warnings about NULL characters in
source files.
llvm-svn: 109445
2010-07-27 05:36:20 +08:00
|
|
|
bool DoNotFree) {
|
2009-12-02 14:49:09 +08:00
|
|
|
const SrcMgr::ContentCache *IR = getOrCreateContentCache(SourceFile);
|
2010-10-27 04:47:28 +08:00
|
|
|
assert(IR && "getOrCreateContentCache() cannot return NULL");
|
2009-12-02 14:49:09 +08:00
|
|
|
|
Introduce basic support for loading a precompiled preamble while
reparsing an ASTUnit. When saving a preamble, create a buffer larger
than the actual file we're working with but fill everything from the
end of the preamble to the end of the file with spaces (so the lexer
will quickly skip them). When we load the file, create a buffer of the
same size, filling it with the file and then spaces. Then, instruct
the lexer to start lexing after the preamble, therefore continuing the
parse from the spot where the preamble left off.
It's now possible to perform a simple preamble build + parse (+
reparse) with ASTUnit. However, one has to disable a bunch of checking
in the PCH reader to do so. That part isn't committed; it will likely
be handled with some other kind of flag (e.g., -fno-validate-pch).
As part of this, fix some issues with null termination of the memory
buffers created for the preamble; we were trying to explicitly
NULL-terminate them, even though they were also getting implicitly
NULL terminated, leading to excess warnings about NULL characters in
source files.
llvm-svn: 109445
2010-07-27 05:36:20 +08:00
|
|
|
const_cast<SrcMgr::ContentCache *>(IR)->replaceBuffer(Buffer, DoNotFree);
|
2011-11-17 04:05:18 +08:00
|
|
|
const_cast<SrcMgr::ContentCache *>(IR)->BufferOverridden = true;
|
2012-05-04 05:50:39 +08:00
|
|
|
|
|
|
|
getOverriddenFilesInfo().OverriddenFilesWithBuffer.insert(SourceFile);
|
2009-12-02 14:49:09 +08:00
|
|
|
}
|
|
|
|
|
2011-03-05 09:03:53 +08:00
|
|
|
void SourceManager::overrideFileContents(const FileEntry *SourceFile,
|
|
|
|
const FileEntry *NewFile) {
|
|
|
|
assert(SourceFile->getSize() == NewFile->getSize() &&
|
|
|
|
"Different sizes, use the FileManager to create a virtual file with "
|
|
|
|
"the correct size");
|
|
|
|
assert(FileInfos.count(SourceFile) == 0 &&
|
|
|
|
"This function should be called at the initialization stage, before "
|
|
|
|
"any parsing occurs.");
|
2012-05-04 05:50:39 +08:00
|
|
|
getOverriddenFilesInfo().OverriddenFiles[SourceFile] = NewFile;
|
|
|
|
}
|
|
|
|
|
|
|
|
void SourceManager::disableFileContentsOverride(const FileEntry *File) {
|
|
|
|
if (!isFileOverridden(File))
|
|
|
|
return;
|
|
|
|
|
|
|
|
const SrcMgr::ContentCache *IR = getOrCreateContentCache(File);
|
2014-05-08 14:41:40 +08:00
|
|
|
const_cast<SrcMgr::ContentCache *>(IR)->replaceBuffer(nullptr);
|
2012-05-04 05:50:39 +08:00
|
|
|
const_cast<SrcMgr::ContentCache *>(IR)->ContentsEntry = IR->OrigEntry;
|
|
|
|
|
|
|
|
assert(OverriddenFilesInfo);
|
|
|
|
OverriddenFilesInfo->OverriddenFiles.erase(File);
|
|
|
|
OverriddenFilesInfo->OverriddenFilesWithBuffer.erase(File);
|
2011-03-05 09:03:53 +08:00
|
|
|
}
|
|
|
|
|
2015-11-26 10:04:16 +08:00
|
|
|
void SourceManager::setFileIsTransient(const FileEntry *File) {
|
2015-08-14 13:02:58 +08:00
|
|
|
const SrcMgr::ContentCache *CC = getOrCreateContentCache(File);
|
2015-11-26 10:04:16 +08:00
|
|
|
const_cast<SrcMgr::ContentCache *>(CC)->IsTransient = true;
|
2015-08-14 13:02:58 +08:00
|
|
|
}
|
|
|
|
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef SourceManager::getBufferData(FileID FID, bool *Invalid) const {
|
2010-03-17 04:01:30 +08:00
|
|
|
bool MyInvalid = false;
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
const SLocEntry &SLoc = getSLocEntry(FID, &MyInvalid);
|
2011-04-20 08:21:03 +08:00
|
|
|
if (!SLoc.isFile() || MyInvalid) {
|
2011-02-01 06:42:36 +08:00
|
|
|
if (Invalid)
|
|
|
|
*Invalid = true;
|
|
|
|
return "<<<<<INVALID SOURCE LOCATION>>>>>";
|
|
|
|
}
|
2014-06-28 01:40:03 +08:00
|
|
|
|
|
|
|
llvm::MemoryBuffer *Buf = SLoc.getFile().getContentCache()->getBuffer(
|
|
|
|
Diag, *this, SourceLocation(), &MyInvalid);
|
2010-03-16 08:06:06 +08:00
|
|
|
if (Invalid)
|
2010-03-17 04:01:30 +08:00
|
|
|
*Invalid = MyInvalid;
|
|
|
|
|
|
|
|
if (MyInvalid)
|
2011-02-01 06:42:36 +08:00
|
|
|
return "<<<<<INVALID SOURCE LOCATION>>>>>";
|
2010-03-17 04:01:30 +08:00
|
|
|
|
2010-03-16 22:14:31 +08:00
|
|
|
return Buf->getBuffer();
|
2010-03-16 06:54:52 +08:00
|
|
|
}
|
2009-01-17 14:22:33 +08:00
|
|
|
|
2009-02-04 08:40:31 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2009-01-26 08:43:02 +08:00
|
|
|
// SourceLocation manipulation methods.
|
2009-02-04 08:40:31 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2009-01-26 08:43:02 +08:00
|
|
|
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
/// \brief Return the FileID for a SourceLocation.
|
2009-01-26 08:43:02 +08:00
|
|
|
///
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
/// This is the cache-miss path of getFileID. Not as hot as that function, but
|
|
|
|
/// still very important. It is responsible for finding the entry in the
|
|
|
|
/// SLocEntry tables that contains the specified location.
|
2009-01-26 08:43:02 +08:00
|
|
|
FileID SourceManager::getFileIDSlow(unsigned SLocOffset) const {
|
2011-04-20 08:21:03 +08:00
|
|
|
if (!SLocOffset)
|
|
|
|
return FileID::get(0);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
// Now it is time to search for the correct file. See where the SLocOffset
|
|
|
|
// sits in the global view and consult local or loaded buffers for it.
|
|
|
|
if (SLocOffset < NextLocalOffset)
|
|
|
|
return getFileIDLocal(SLocOffset);
|
|
|
|
return getFileIDLoaded(SLocOffset);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Return the FileID for a SourceLocation with a low offset.
|
|
|
|
///
|
|
|
|
/// This function knows that the SourceLocation is in a local buffer, not a
|
|
|
|
/// loaded one.
|
|
|
|
FileID SourceManager::getFileIDLocal(unsigned SLocOffset) const {
|
|
|
|
assert(SLocOffset < NextLocalOffset && "Bad function choice");
|
|
|
|
|
2009-01-26 08:43:02 +08:00
|
|
|
// After the first and second level caches, I see two common sorts of
|
2011-07-26 13:17:23 +08:00
|
|
|
// behavior: 1) a lot of searched FileID's are "near" the cached file
|
|
|
|
// location or are "near" the cached expansion location. 2) others are just
|
2009-01-26 08:43:02 +08:00
|
|
|
// completely random and may be a very long way away.
|
|
|
|
//
|
|
|
|
// To handle this, we do a linear search for up to 8 steps to catch #1 quickly
|
|
|
|
// then we fall back to a less cache efficient, but more scalable, binary
|
|
|
|
// search to find the location.
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-26 08:43:02 +08:00
|
|
|
// See if this is near the file point - worst case we start scanning from the
|
|
|
|
// most newly created FileID.
|
2013-02-23 02:29:39 +08:00
|
|
|
const SrcMgr::SLocEntry *I;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
if (LastFileIDLookup.ID < 0 ||
|
|
|
|
LocalSLocEntryTable[LastFileIDLookup.ID].getOffset() < SLocOffset) {
|
2009-01-26 08:43:02 +08:00
|
|
|
// Neither loc prunes our search.
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
I = LocalSLocEntryTable.end();
|
2009-01-26 08:43:02 +08:00
|
|
|
} else {
|
|
|
|
// Perhaps it is near the file point.
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
I = LocalSLocEntryTable.begin()+LastFileIDLookup.ID;
|
2009-01-26 08:43:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Find the FileID that contains this. "I" is an iterator that points to a
|
|
|
|
// FileID whose offset is known to be larger than SLocOffset.
|
|
|
|
unsigned NumProbes = 0;
|
|
|
|
while (1) {
|
|
|
|
--I;
|
|
|
|
if (I->getOffset() <= SLocOffset) {
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
FileID Res = FileID::get(int(I - LocalSLocEntryTable.begin()));
|
2009-04-27 14:38:32 +08:00
|
|
|
|
2011-07-26 13:17:23 +08:00
|
|
|
// If this isn't an expansion, remember it. We have good locality across
|
|
|
|
// FileID lookups.
|
2011-07-26 12:56:51 +08:00
|
|
|
if (!I->isExpansion())
|
2009-01-26 08:43:02 +08:00
|
|
|
LastFileIDLookup = Res;
|
|
|
|
NumLinearScans += NumProbes+1;
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
if (++NumProbes == 8)
|
|
|
|
break;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-26 08:43:02 +08:00
|
|
|
// Convert "I" back into an index. We know that it is an entry whose index is
|
|
|
|
// larger than the offset we are looking for.
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
unsigned GreaterIndex = I - LocalSLocEntryTable.begin();
|
2009-01-26 08:43:02 +08:00
|
|
|
// LessIndex - This is the lower bound of the range that we're searching.
|
|
|
|
// We know that the offset corresponding to the FileID is is less than
|
|
|
|
// SLocOffset.
|
|
|
|
unsigned LessIndex = 0;
|
|
|
|
NumProbes = 0;
|
|
|
|
while (1) {
|
2011-04-20 08:21:03 +08:00
|
|
|
bool Invalid = false;
|
2009-01-26 08:43:02 +08:00
|
|
|
unsigned MiddleIndex = (GreaterIndex-LessIndex)/2+LessIndex;
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
unsigned MidOffset = getLocalSLocEntry(MiddleIndex, &Invalid).getOffset();
|
2011-04-20 08:21:03 +08:00
|
|
|
if (Invalid)
|
|
|
|
return FileID::get(0);
|
|
|
|
|
2009-01-26 08:43:02 +08:00
|
|
|
++NumProbes;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-26 08:43:02 +08:00
|
|
|
// If the offset of the midpoint is too large, chop the high side of the
|
|
|
|
// range to the midpoint.
|
|
|
|
if (MidOffset > SLocOffset) {
|
|
|
|
GreaterIndex = MiddleIndex;
|
|
|
|
continue;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-26 08:43:02 +08:00
|
|
|
// If the middle index contains the value, succeed and return.
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
// FIXME: This could be made faster by using a function that's aware of
|
|
|
|
// being in the local area.
|
2009-01-26 08:43:02 +08:00
|
|
|
if (isOffsetInFileID(FileID::get(MiddleIndex), SLocOffset)) {
|
|
|
|
FileID Res = FileID::get(MiddleIndex);
|
|
|
|
|
2011-07-26 12:56:51 +08:00
|
|
|
// If this isn't a macro expansion, remember it. We have good locality
|
2009-01-26 08:43:02 +08:00
|
|
|
// across FileID lookups.
|
2011-07-26 12:56:51 +08:00
|
|
|
if (!LocalSLocEntryTable[MiddleIndex].isExpansion())
|
2009-01-26 08:43:02 +08:00
|
|
|
LastFileIDLookup = Res;
|
|
|
|
NumBinaryProbes += NumProbes;
|
|
|
|
return Res;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-26 08:43:02 +08:00
|
|
|
// Otherwise, move the low-side up to the middle index.
|
|
|
|
LessIndex = MiddleIndex;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
/// \brief Return the FileID for a SourceLocation with a high offset.
|
|
|
|
///
|
|
|
|
/// This function knows that the SourceLocation is in a loaded buffer, not a
|
|
|
|
/// local one.
|
|
|
|
FileID SourceManager::getFileIDLoaded(unsigned SLocOffset) const {
|
2011-10-04 07:43:01 +08:00
|
|
|
// Sanity checking, otherwise a bug may lead to hanging in release build.
|
2011-10-25 08:29:44 +08:00
|
|
|
if (SLocOffset < CurrentLoadedOffset) {
|
|
|
|
assert(0 && "Invalid SLocOffset or bad function choice");
|
2011-10-04 07:43:01 +08:00
|
|
|
return FileID();
|
2011-10-25 08:29:44 +08:00
|
|
|
}
|
2011-10-04 07:43:01 +08:00
|
|
|
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
// Essentially the same as the local case, but the loaded array is sorted
|
|
|
|
// in the other direction.
|
|
|
|
|
|
|
|
// First do a linear scan from the last lookup position, if possible.
|
|
|
|
unsigned I;
|
|
|
|
int LastID = LastFileIDLookup.ID;
|
|
|
|
if (LastID >= 0 || getLoadedSLocEntryByID(LastID).getOffset() < SLocOffset)
|
|
|
|
I = 0;
|
|
|
|
else
|
|
|
|
I = (-LastID - 2) + 1;
|
|
|
|
|
|
|
|
unsigned NumProbes;
|
|
|
|
for (NumProbes = 0; NumProbes < 8; ++NumProbes, ++I) {
|
|
|
|
// Make sure the entry is loaded!
|
|
|
|
const SrcMgr::SLocEntry &E = getLoadedSLocEntry(I);
|
|
|
|
if (E.getOffset() <= SLocOffset) {
|
|
|
|
FileID Res = FileID::get(-int(I) - 2);
|
|
|
|
|
2011-07-26 12:56:51 +08:00
|
|
|
if (!E.isExpansion())
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
LastFileIDLookup = Res;
|
|
|
|
NumLinearScans += NumProbes + 1;
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Linear scan failed. Do the binary search. Note the reverse sorting of the
|
|
|
|
// table: GreaterIndex is the one where the offset is greater, which is
|
|
|
|
// actually a lower index!
|
|
|
|
unsigned GreaterIndex = I;
|
|
|
|
unsigned LessIndex = LoadedSLocEntryTable.size();
|
|
|
|
NumProbes = 0;
|
|
|
|
while (1) {
|
|
|
|
++NumProbes;
|
|
|
|
unsigned MiddleIndex = (LessIndex - GreaterIndex) / 2 + GreaterIndex;
|
|
|
|
const SrcMgr::SLocEntry &E = getLoadedSLocEntry(MiddleIndex);
|
2013-03-01 11:26:00 +08:00
|
|
|
if (E.getOffset() == 0)
|
|
|
|
return FileID(); // invalid entry.
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
|
|
|
|
++NumProbes;
|
|
|
|
|
|
|
|
if (E.getOffset() > SLocOffset) {
|
2013-03-01 11:26:00 +08:00
|
|
|
// Sanity checking, otherwise a bug may lead to hanging in release build.
|
|
|
|
if (GreaterIndex == MiddleIndex) {
|
|
|
|
assert(0 && "binary search missed the entry");
|
|
|
|
return FileID();
|
|
|
|
}
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
GreaterIndex = MiddleIndex;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isOffsetInFileID(FileID::get(-int(MiddleIndex) - 2), SLocOffset)) {
|
|
|
|
FileID Res = FileID::get(-int(MiddleIndex) - 2);
|
2011-07-26 12:56:51 +08:00
|
|
|
if (!E.isExpansion())
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
LastFileIDLookup = Res;
|
|
|
|
NumBinaryProbes += NumProbes;
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
2013-03-01 11:43:33 +08:00
|
|
|
// Sanity checking, otherwise a bug may lead to hanging in release build.
|
|
|
|
if (LessIndex == MiddleIndex) {
|
|
|
|
assert(0 && "binary search missed the entry");
|
|
|
|
return FileID();
|
|
|
|
}
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
LessIndex = MiddleIndex;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-01-27 04:04:19 +08:00
|
|
|
SourceLocation SourceManager::
|
2011-07-26 04:52:26 +08:00
|
|
|
getExpansionLocSlowCase(SourceLocation Loc) const {
|
2009-01-27 04:04:19 +08:00
|
|
|
do {
|
2010-02-13 03:31:35 +08:00
|
|
|
// Note: If Loc indicates an offset into a token that came from a macro
|
|
|
|
// expansion (e.g. the 5th character of the token) we do not want to add
|
2011-07-26 12:56:51 +08:00
|
|
|
// this offset when going to the expansion location. The expansion
|
2010-02-13 03:31:35 +08:00
|
|
|
// location is the macro invocation, which the offset has nothing to do
|
|
|
|
// with. This is unlike when we get the spelling loc, because the offset
|
|
|
|
// directly correspond to the token whose spelling we're inspecting.
|
2011-07-26 12:56:51 +08:00
|
|
|
Loc = getSLocEntry(getFileID(Loc)).getExpansion().getExpansionLocStart();
|
2009-01-27 04:04:19 +08:00
|
|
|
} while (!Loc.isFileID());
|
|
|
|
|
|
|
|
return Loc;
|
|
|
|
}
|
|
|
|
|
|
|
|
SourceLocation SourceManager::getSpellingLocSlowCase(SourceLocation Loc) const {
|
|
|
|
do {
|
|
|
|
std::pair<FileID, unsigned> LocInfo = getDecomposedLoc(Loc);
|
2011-07-26 12:56:51 +08:00
|
|
|
Loc = getSLocEntry(LocInfo.first).getExpansion().getSpellingLoc();
|
2011-09-20 04:40:19 +08:00
|
|
|
Loc = Loc.getLocWithOffset(LocInfo.second);
|
2009-01-27 04:04:19 +08:00
|
|
|
} while (!Loc.isFileID());
|
|
|
|
return Loc;
|
|
|
|
}
|
|
|
|
|
2011-10-12 15:07:40 +08:00
|
|
|
SourceLocation SourceManager::getFileLocSlowCase(SourceLocation Loc) const {
|
|
|
|
do {
|
|
|
|
if (isMacroArgExpansion(Loc))
|
|
|
|
Loc = getImmediateSpellingLoc(Loc);
|
|
|
|
else
|
|
|
|
Loc = getImmediateExpansionRange(Loc).first;
|
|
|
|
} while (!Loc.isFileID());
|
|
|
|
return Loc;
|
|
|
|
}
|
|
|
|
|
2009-01-27 04:04:19 +08:00
|
|
|
|
2009-01-26 08:43:02 +08:00
|
|
|
std::pair<FileID, unsigned>
|
2011-07-26 04:52:32 +08:00
|
|
|
SourceManager::getDecomposedExpansionLocSlowCase(
|
2011-07-07 11:40:27 +08:00
|
|
|
const SrcMgr::SLocEntry *E) const {
|
2011-07-26 13:17:23 +08:00
|
|
|
// If this is an expansion record, walk through all the expansion points.
|
2009-01-26 08:43:02 +08:00
|
|
|
FileID FID;
|
|
|
|
SourceLocation Loc;
|
2011-07-07 11:40:27 +08:00
|
|
|
unsigned Offset;
|
2009-01-26 08:43:02 +08:00
|
|
|
do {
|
2011-07-26 12:56:51 +08:00
|
|
|
Loc = E->getExpansion().getExpansionLocStart();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-26 08:43:02 +08:00
|
|
|
FID = getFileID(Loc);
|
|
|
|
E = &getSLocEntry(FID);
|
2011-07-07 11:40:27 +08:00
|
|
|
Offset = Loc.getOffset()-E->getOffset();
|
2009-01-27 03:41:58 +08:00
|
|
|
} while (!Loc.isFileID());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-26 08:43:02 +08:00
|
|
|
return std::make_pair(FID, Offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::pair<FileID, unsigned>
|
|
|
|
SourceManager::getDecomposedSpellingLocSlowCase(const SrcMgr::SLocEntry *E,
|
|
|
|
unsigned Offset) const {
|
2011-07-26 13:17:23 +08:00
|
|
|
// If this is an expansion record, walk through all the expansion points.
|
2009-01-27 03:41:58 +08:00
|
|
|
FileID FID;
|
|
|
|
SourceLocation Loc;
|
|
|
|
do {
|
2011-07-26 12:56:51 +08:00
|
|
|
Loc = E->getExpansion().getSpellingLoc();
|
2011-09-20 04:40:19 +08:00
|
|
|
Loc = Loc.getLocWithOffset(Offset);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-27 03:41:58 +08:00
|
|
|
FID = getFileID(Loc);
|
|
|
|
E = &getSLocEntry(FID);
|
2011-08-24 05:02:41 +08:00
|
|
|
Offset = Loc.getOffset()-E->getOffset();
|
2009-01-27 03:41:58 +08:00
|
|
|
} while (!Loc.isFileID());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-26 08:43:02 +08:00
|
|
|
return std::make_pair(FID, Offset);
|
|
|
|
}
|
|
|
|
|
2009-02-17 16:04:48 +08:00
|
|
|
/// getImmediateSpellingLoc - Given a SourceLocation object, return the
|
|
|
|
/// spelling location referenced by the ID. This is the first level down
|
|
|
|
/// towards the place where the characters that make up the lexed token can be
|
|
|
|
/// found. This should not generally be used by clients.
|
|
|
|
SourceLocation SourceManager::getImmediateSpellingLoc(SourceLocation Loc) const{
|
|
|
|
if (Loc.isFileID()) return Loc;
|
|
|
|
std::pair<FileID, unsigned> LocInfo = getDecomposedLoc(Loc);
|
2011-07-26 12:56:51 +08:00
|
|
|
Loc = getSLocEntry(LocInfo.first).getExpansion().getSpellingLoc();
|
2011-09-20 04:40:19 +08:00
|
|
|
return Loc.getLocWithOffset(LocInfo.second);
|
2009-02-17 16:04:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-07-26 13:17:23 +08:00
|
|
|
/// getImmediateExpansionRange - Loc is required to be an expansion location.
|
|
|
|
/// Return the start/end of the expansion information.
|
2009-02-16 04:52:18 +08:00
|
|
|
std::pair<SourceLocation,SourceLocation>
|
2011-07-26 04:52:21 +08:00
|
|
|
SourceManager::getImmediateExpansionRange(SourceLocation Loc) const {
|
2011-07-26 13:17:23 +08:00
|
|
|
assert(Loc.isMacroID() && "Not a macro expansion loc!");
|
2011-07-26 12:56:51 +08:00
|
|
|
const ExpansionInfo &Expansion = getSLocEntry(getFileID(Loc)).getExpansion();
|
2011-07-26 12:41:47 +08:00
|
|
|
return Expansion.getExpansionLocRange();
|
2009-02-16 04:52:18 +08:00
|
|
|
}
|
|
|
|
|
2011-07-26 00:56:02 +08:00
|
|
|
/// getExpansionRange - Given a SourceLocation object, return the range of
|
|
|
|
/// tokens covered by the expansion in the ultimate file.
|
2009-02-16 05:26:50 +08:00
|
|
|
std::pair<SourceLocation,SourceLocation>
|
2011-07-26 00:56:02 +08:00
|
|
|
SourceManager::getExpansionRange(SourceLocation Loc) const {
|
2009-02-16 05:26:50 +08:00
|
|
|
if (Loc.isFileID()) return std::make_pair(Loc, Loc);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-16 05:26:50 +08:00
|
|
|
std::pair<SourceLocation,SourceLocation> Res =
|
2011-07-26 04:52:21 +08:00
|
|
|
getImmediateExpansionRange(Loc);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-26 13:17:23 +08:00
|
|
|
// Fully resolve the start and end locations to their ultimate expansion
|
2009-02-16 05:26:50 +08:00
|
|
|
// points.
|
|
|
|
while (!Res.first.isFileID())
|
2011-07-26 04:52:21 +08:00
|
|
|
Res.first = getImmediateExpansionRange(Res.first).first;
|
2009-02-16 05:26:50 +08:00
|
|
|
while (!Res.second.isFileID())
|
2011-07-26 04:52:21 +08:00
|
|
|
Res.second = getImmediateExpansionRange(Res.second).second;
|
2009-02-16 05:26:50 +08:00
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
2015-09-24 09:21:01 +08:00
|
|
|
bool SourceManager::isMacroArgExpansion(SourceLocation Loc,
|
|
|
|
SourceLocation *StartLoc) const {
|
2011-07-08 07:56:36 +08:00
|
|
|
if (!Loc.isMacroID()) return false;
|
|
|
|
|
|
|
|
FileID FID = getFileID(Loc);
|
2013-01-12 08:54:16 +08:00
|
|
|
const SrcMgr::ExpansionInfo &Expansion = getSLocEntry(FID).getExpansion();
|
2015-09-24 09:21:01 +08:00
|
|
|
if (!Expansion.isMacroArgExpansion()) return false;
|
|
|
|
|
|
|
|
if (StartLoc)
|
|
|
|
*StartLoc = Expansion.getExpansionLocStart();
|
|
|
|
return true;
|
2011-07-08 07:56:36 +08:00
|
|
|
}
|
2009-02-16 04:52:18 +08:00
|
|
|
|
2013-01-12 08:54:16 +08:00
|
|
|
bool SourceManager::isMacroBodyExpansion(SourceLocation Loc) const {
|
|
|
|
if (!Loc.isMacroID()) return false;
|
|
|
|
|
|
|
|
FileID FID = getFileID(Loc);
|
|
|
|
const SrcMgr::ExpansionInfo &Expansion = getSLocEntry(FID).getExpansion();
|
|
|
|
return Expansion.isMacroBodyExpansion();
|
|
|
|
}
|
|
|
|
|
2013-05-17 05:37:39 +08:00
|
|
|
bool SourceManager::isAtStartOfImmediateMacroExpansion(SourceLocation Loc,
|
|
|
|
SourceLocation *MacroBegin) const {
|
|
|
|
assert(Loc.isValid() && Loc.isMacroID() && "Expected a valid macro loc");
|
|
|
|
|
|
|
|
std::pair<FileID, unsigned> DecompLoc = getDecomposedLoc(Loc);
|
|
|
|
if (DecompLoc.second > 0)
|
|
|
|
return false; // Does not point at the start of expansion range.
|
|
|
|
|
|
|
|
bool Invalid = false;
|
|
|
|
const SrcMgr::ExpansionInfo &ExpInfo =
|
|
|
|
getSLocEntry(DecompLoc.first, &Invalid).getExpansion();
|
|
|
|
if (Invalid)
|
|
|
|
return false;
|
|
|
|
SourceLocation ExpLoc = ExpInfo.getExpansionLocStart();
|
|
|
|
|
|
|
|
if (ExpInfo.isMacroArgExpansion()) {
|
|
|
|
// For macro argument expansions, check if the previous FileID is part of
|
|
|
|
// the same argument expansion, in which case this Loc is not at the
|
|
|
|
// beginning of the expansion.
|
|
|
|
FileID PrevFID = getPreviousFileID(DecompLoc.first);
|
|
|
|
if (!PrevFID.isInvalid()) {
|
|
|
|
const SrcMgr::SLocEntry &PrevEntry = getSLocEntry(PrevFID, &Invalid);
|
|
|
|
if (Invalid)
|
|
|
|
return false;
|
|
|
|
if (PrevEntry.isExpansion() &&
|
|
|
|
PrevEntry.getExpansion().getExpansionLocStart() == ExpLoc)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MacroBegin)
|
|
|
|
*MacroBegin = ExpLoc;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SourceManager::isAtEndOfImmediateMacroExpansion(SourceLocation Loc,
|
|
|
|
SourceLocation *MacroEnd) const {
|
|
|
|
assert(Loc.isValid() && Loc.isMacroID() && "Expected a valid macro loc");
|
|
|
|
|
|
|
|
FileID FID = getFileID(Loc);
|
|
|
|
SourceLocation NextLoc = Loc.getLocWithOffset(1);
|
|
|
|
if (isInFileID(NextLoc, FID))
|
|
|
|
return false; // Does not point at the end of expansion range.
|
|
|
|
|
|
|
|
bool Invalid = false;
|
|
|
|
const SrcMgr::ExpansionInfo &ExpInfo =
|
|
|
|
getSLocEntry(FID, &Invalid).getExpansion();
|
|
|
|
if (Invalid)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (ExpInfo.isMacroArgExpansion()) {
|
|
|
|
// For macro argument expansions, check if the next FileID is part of the
|
|
|
|
// same argument expansion, in which case this Loc is not at the end of the
|
|
|
|
// expansion.
|
|
|
|
FileID NextFID = getNextFileID(FID);
|
|
|
|
if (!NextFID.isInvalid()) {
|
|
|
|
const SrcMgr::SLocEntry &NextEntry = getSLocEntry(NextFID, &Invalid);
|
|
|
|
if (Invalid)
|
|
|
|
return false;
|
|
|
|
if (NextEntry.isExpansion() &&
|
|
|
|
NextEntry.getExpansion().getExpansionLocStart() ==
|
|
|
|
ExpInfo.getExpansionLocStart())
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MacroEnd)
|
|
|
|
*MacroEnd = ExpInfo.getExpansionLocEnd();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-01-26 08:43:02 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Queries about the code at a SourceLocation.
|
|
|
|
//===----------------------------------------------------------------------===//
|
2006-06-21 11:01:55 +08:00
|
|
|
|
2006-06-19 00:22:51 +08:00
|
|
|
/// getCharacterData - Return a pointer to the start of the specified location
|
2007-04-29 15:12:06 +08:00
|
|
|
/// in the appropriate MemoryBuffer.
|
2010-03-16 13:20:39 +08:00
|
|
|
const char *SourceManager::getCharacterData(SourceLocation SL,
|
|
|
|
bool *Invalid) const {
|
2006-07-05 07:01:03 +08:00
|
|
|
// Note that this is a hot function in the getSpelling() path, which is
|
|
|
|
// heavily used by -E mode.
|
2009-01-26 08:43:02 +08:00
|
|
|
std::pair<FileID, unsigned> LocInfo = getDecomposedSpellingLoc(SL);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-06 09:55:26 +08:00
|
|
|
// Note that calling 'getBuffer()' may lazily page in a source file.
|
2010-03-16 13:20:39 +08:00
|
|
|
bool CharDataInvalid = false;
|
2011-04-20 08:21:03 +08:00
|
|
|
const SLocEntry &Entry = getSLocEntry(LocInfo.first, &CharDataInvalid);
|
|
|
|
if (CharDataInvalid || !Entry.isFile()) {
|
|
|
|
if (Invalid)
|
|
|
|
*Invalid = true;
|
|
|
|
|
|
|
|
return "<<<<INVALID BUFFER>>>>";
|
|
|
|
}
|
2014-06-28 01:40:03 +08:00
|
|
|
llvm::MemoryBuffer *Buffer = Entry.getFile().getContentCache()->getBuffer(
|
|
|
|
Diag, *this, SourceLocation(), &CharDataInvalid);
|
2010-03-16 13:20:39 +08:00
|
|
|
if (Invalid)
|
|
|
|
*Invalid = CharDataInvalid;
|
|
|
|
return Buffer->getBufferStart() + (CharDataInvalid? 0 : LocInfo.second);
|
2006-06-19 00:22:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-07-21 00:37:10 +08:00
|
|
|
/// getColumnNumber - Return the column # for the specified file position.
|
2009-02-04 08:55:58 +08:00
|
|
|
/// this is significantly cheaper to compute than the line number.
|
2010-03-16 13:20:39 +08:00
|
|
|
unsigned SourceManager::getColumnNumber(FileID FID, unsigned FilePos,
|
|
|
|
bool *Invalid) const {
|
|
|
|
bool MyInvalid = false;
|
2014-06-28 01:40:03 +08:00
|
|
|
llvm::MemoryBuffer *MemBuf = getBuffer(FID, &MyInvalid);
|
2010-03-16 13:20:39 +08:00
|
|
|
if (Invalid)
|
|
|
|
*Invalid = MyInvalid;
|
|
|
|
|
|
|
|
if (MyInvalid)
|
|
|
|
return 1;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-06-19 11:09:38 +08:00
|
|
|
// It is okay to request a position just past the end of the buffer.
|
|
|
|
if (FilePos > MemBuf->getBufferSize()) {
|
2011-12-10 08:30:38 +08:00
|
|
|
if (Invalid)
|
2012-06-19 11:09:38 +08:00
|
|
|
*Invalid = true;
|
2011-12-10 08:30:38 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2012-10-19 12:40:38 +08:00
|
|
|
// See if we just calculated the line number for this FilePos and can use
|
|
|
|
// that to lookup the start of the line instead of searching for it.
|
|
|
|
if (LastLineNoFileIDQuery == FID &&
|
2014-05-08 14:41:40 +08:00
|
|
|
LastLineNoContentCache->SourceLineCache != nullptr &&
|
2012-12-16 13:58:32 +08:00
|
|
|
LastLineNoResult < LastLineNoContentCache->NumLines) {
|
2012-10-19 12:40:38 +08:00
|
|
|
unsigned *SourceLineCache = LastLineNoContentCache->SourceLineCache;
|
|
|
|
unsigned LineStart = SourceLineCache[LastLineNoResult - 1];
|
|
|
|
unsigned LineEnd = SourceLineCache[LastLineNoResult];
|
|
|
|
if (FilePos >= LineStart && FilePos < LineEnd)
|
|
|
|
return FilePos - LineStart + 1;
|
|
|
|
}
|
|
|
|
|
2011-12-19 16:51:05 +08:00
|
|
|
const char *Buf = MemBuf->getBufferStart();
|
2006-06-18 13:43:12 +08:00
|
|
|
unsigned LineStart = FilePos;
|
|
|
|
while (LineStart && Buf[LineStart-1] != '\n' && Buf[LineStart-1] != '\r')
|
|
|
|
--LineStart;
|
|
|
|
return FilePos-LineStart+1;
|
|
|
|
}
|
|
|
|
|
2010-10-06 01:56:33 +08:00
|
|
|
// isInvalid - Return the result of calling loc.isInvalid(), and
|
|
|
|
// if Invalid is not null, set its value to same.
|
|
|
|
static bool isInvalid(SourceLocation Loc, bool *Invalid) {
|
|
|
|
bool MyInvalid = Loc.isInvalid();
|
|
|
|
if (Invalid)
|
|
|
|
*Invalid = MyInvalid;
|
|
|
|
return MyInvalid;
|
|
|
|
}
|
|
|
|
|
2010-03-16 13:20:39 +08:00
|
|
|
unsigned SourceManager::getSpellingColumnNumber(SourceLocation Loc,
|
|
|
|
bool *Invalid) const {
|
2010-10-06 01:56:33 +08:00
|
|
|
if (isInvalid(Loc, Invalid)) return 0;
|
2009-02-04 08:55:58 +08:00
|
|
|
std::pair<FileID, unsigned> LocInfo = getDecomposedSpellingLoc(Loc);
|
2010-03-16 13:20:39 +08:00
|
|
|
return getColumnNumber(LocInfo.first, LocInfo.second, Invalid);
|
2009-02-04 08:55:58 +08:00
|
|
|
}
|
|
|
|
|
2011-07-26 04:57:57 +08:00
|
|
|
unsigned SourceManager::getExpansionColumnNumber(SourceLocation Loc,
|
|
|
|
bool *Invalid) const {
|
2010-10-06 01:56:33 +08:00
|
|
|
if (isInvalid(Loc, Invalid)) return 0;
|
2011-07-26 04:52:32 +08:00
|
|
|
std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
|
2010-03-16 13:20:39 +08:00
|
|
|
return getColumnNumber(LocInfo.first, LocInfo.second, Invalid);
|
2009-02-04 08:55:58 +08:00
|
|
|
}
|
|
|
|
|
2011-02-23 08:47:48 +08:00
|
|
|
unsigned SourceManager::getPresumedColumnNumber(SourceLocation Loc,
|
|
|
|
bool *Invalid) const {
|
|
|
|
if (isInvalid(Loc, Invalid)) return 0;
|
|
|
|
return getPresumedLoc(Loc).getColumn();
|
|
|
|
}
|
|
|
|
|
2012-04-07 04:49:55 +08:00
|
|
|
#ifdef __SSE2__
|
|
|
|
#include <emmintrin.h>
|
|
|
|
#endif
|
|
|
|
|
2010-10-23 16:44:57 +08:00
|
|
|
static LLVM_ATTRIBUTE_NOINLINE void
|
2011-09-26 07:23:43 +08:00
|
|
|
ComputeLineNumbers(DiagnosticsEngine &Diag, ContentCache *FI,
|
2010-04-21 04:35:58 +08:00
|
|
|
llvm::BumpPtrAllocator &Alloc,
|
|
|
|
const SourceManager &SM, bool &Invalid);
|
2011-09-26 07:23:43 +08:00
|
|
|
static void ComputeLineNumbers(DiagnosticsEngine &Diag, ContentCache *FI,
|
2010-04-21 04:35:58 +08:00
|
|
|
llvm::BumpPtrAllocator &Alloc,
|
|
|
|
const SourceManager &SM, bool &Invalid) {
|
2009-01-06 09:55:26 +08:00
|
|
|
// Note that calling 'getBuffer()' may lazily page in the file.
|
2014-06-28 01:40:03 +08:00
|
|
|
MemoryBuffer *Buffer = FI->getBuffer(Diag, SM, SourceLocation(), &Invalid);
|
2010-03-16 13:20:39 +08:00
|
|
|
if (Invalid)
|
|
|
|
return;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-07-24 13:57:19 +08:00
|
|
|
// Find the file offsets of all of the *physical* source lines. This does
|
|
|
|
// not look at trigraphs, escaped newlines, or anything else tricky.
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<unsigned, 256> LineOffsets;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-07-24 13:57:19 +08:00
|
|
|
// Line #1 starts at char 0.
|
|
|
|
LineOffsets.push_back(0);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-07-24 13:57:19 +08:00
|
|
|
const unsigned char *Buf = (const unsigned char *)Buffer->getBufferStart();
|
|
|
|
const unsigned char *End = (const unsigned char *)Buffer->getBufferEnd();
|
|
|
|
unsigned Offs = 0;
|
|
|
|
while (1) {
|
|
|
|
// Skip over the contents of the line.
|
|
|
|
const unsigned char *NextBuf = (const unsigned char *)Buf;
|
2012-04-07 04:49:55 +08:00
|
|
|
|
|
|
|
#ifdef __SSE2__
|
|
|
|
// Try to skip to the next newline using SSE instructions. This is very
|
|
|
|
// performance sensitive for programs with lots of diagnostics and in -E
|
|
|
|
// mode.
|
|
|
|
__m128i CRs = _mm_set1_epi8('\r');
|
|
|
|
__m128i LFs = _mm_set1_epi8('\n');
|
|
|
|
|
|
|
|
// First fix up the alignment to 16 bytes.
|
|
|
|
while (((uintptr_t)NextBuf & 0xF) != 0) {
|
|
|
|
if (*NextBuf == '\n' || *NextBuf == '\r' || *NextBuf == '\0')
|
|
|
|
goto FoundSpecialChar;
|
|
|
|
++NextBuf;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Scan 16 byte chunks for '\r' and '\n'. Ignore '\0'.
|
|
|
|
while (NextBuf+16 <= End) {
|
2012-09-06 23:59:27 +08:00
|
|
|
const __m128i Chunk = *(const __m128i*)NextBuf;
|
2012-04-07 04:49:55 +08:00
|
|
|
__m128i Cmp = _mm_or_si128(_mm_cmpeq_epi8(Chunk, CRs),
|
|
|
|
_mm_cmpeq_epi8(Chunk, LFs));
|
|
|
|
unsigned Mask = _mm_movemask_epi8(Cmp);
|
|
|
|
|
|
|
|
// If we found a newline, adjust the pointer and jump to the handling code.
|
|
|
|
if (Mask != 0) {
|
2013-05-25 05:42:04 +08:00
|
|
|
NextBuf += llvm::countTrailingZeros(Mask);
|
2012-04-07 04:49:55 +08:00
|
|
|
goto FoundSpecialChar;
|
|
|
|
}
|
|
|
|
NextBuf += 16;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2007-07-24 13:57:19 +08:00
|
|
|
while (*NextBuf != '\n' && *NextBuf != '\r' && *NextBuf != '\0')
|
|
|
|
++NextBuf;
|
2012-04-07 04:49:55 +08:00
|
|
|
|
|
|
|
#ifdef __SSE2__
|
|
|
|
FoundSpecialChar:
|
|
|
|
#endif
|
2007-07-24 13:57:19 +08:00
|
|
|
Offs += NextBuf-Buf;
|
|
|
|
Buf = NextBuf;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-07-24 13:57:19 +08:00
|
|
|
if (Buf[0] == '\n' || Buf[0] == '\r') {
|
|
|
|
// If this is \n\r or \r\n, skip both characters.
|
|
|
|
if ((Buf[1] == '\n' || Buf[1] == '\r') && Buf[0] != Buf[1])
|
|
|
|
++Offs, ++Buf;
|
|
|
|
++Offs, ++Buf;
|
|
|
|
LineOffsets.push_back(Offs);
|
|
|
|
} else {
|
|
|
|
// Otherwise, this is a null. If end of file, exit.
|
|
|
|
if (Buf == End) break;
|
|
|
|
// Otherwise, skip the null.
|
|
|
|
++Offs, ++Buf;
|
|
|
|
}
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-07-24 13:57:19 +08:00
|
|
|
// Copy the offsets into the FileInfo structure.
|
|
|
|
FI->NumLines = LineOffsets.size();
|
2009-02-03 15:30:45 +08:00
|
|
|
FI->SourceLineCache = Alloc.Allocate<unsigned>(LineOffsets.size());
|
2007-07-24 13:57:19 +08:00
|
|
|
std::copy(LineOffsets.begin(), LineOffsets.end(), FI->SourceLineCache);
|
|
|
|
}
|
2006-06-21 12:57:09 +08:00
|
|
|
|
2009-01-16 15:00:02 +08:00
|
|
|
/// getLineNumber - Given a SourceLocation, return the spelling line number
|
2006-06-18 13:43:12 +08:00
|
|
|
/// for the position indicated. This requires building and caching a table of
|
2007-04-29 15:12:06 +08:00
|
|
|
/// line offsets for the MemoryBuffer, so this is not cheap: use only when
|
2006-06-18 13:43:12 +08:00
|
|
|
/// about to emit a diagnostic.
|
2010-03-16 13:20:39 +08:00
|
|
|
unsigned SourceManager::getLineNumber(FileID FID, unsigned FilePos,
|
|
|
|
bool *Invalid) const {
|
2011-05-18 06:09:53 +08:00
|
|
|
if (FID.isInvalid()) {
|
|
|
|
if (Invalid)
|
|
|
|
*Invalid = true;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2009-01-26 08:43:02 +08:00
|
|
|
ContentCache *Content;
|
2009-02-04 09:06:56 +08:00
|
|
|
if (LastLineNoFileIDQuery == FID)
|
2007-10-31 05:08:08 +08:00
|
|
|
Content = LastLineNoContentCache;
|
2011-04-20 08:21:03 +08:00
|
|
|
else {
|
|
|
|
bool MyInvalid = false;
|
|
|
|
const SLocEntry &Entry = getSLocEntry(FID, &MyInvalid);
|
|
|
|
if (MyInvalid || !Entry.isFile()) {
|
|
|
|
if (Invalid)
|
|
|
|
*Invalid = true;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
Content = const_cast<ContentCache*>(Entry.getFile().getContentCache());
|
|
|
|
}
|
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
// If this is the first use of line information for this buffer, compute the
|
2007-07-24 13:57:19 +08:00
|
|
|
/// SourceLineCache for it on demand.
|
2014-05-08 14:41:40 +08:00
|
|
|
if (!Content->SourceLineCache) {
|
2010-03-16 13:20:39 +08:00
|
|
|
bool MyInvalid = false;
|
2010-04-21 04:35:58 +08:00
|
|
|
ComputeLineNumbers(Diag, Content, ContentCacheAlloc, *this, MyInvalid);
|
2010-03-16 13:20:39 +08:00
|
|
|
if (Invalid)
|
|
|
|
*Invalid = MyInvalid;
|
|
|
|
if (MyInvalid)
|
|
|
|
return 1;
|
|
|
|
} else if (Invalid)
|
|
|
|
*Invalid = false;
|
2006-06-18 13:43:12 +08:00
|
|
|
|
|
|
|
// Okay, we know we have a line number table. Do a binary search to find the
|
|
|
|
// line number that this character position lands on.
|
2007-10-31 05:08:08 +08:00
|
|
|
unsigned *SourceLineCache = Content->SourceLineCache;
|
2007-07-24 13:57:19 +08:00
|
|
|
unsigned *SourceLineCacheStart = SourceLineCache;
|
2007-10-31 05:08:08 +08:00
|
|
|
unsigned *SourceLineCacheEnd = SourceLineCache + Content->NumLines;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 09:06:56 +08:00
|
|
|
unsigned QueriedFilePos = FilePos+1;
|
2007-07-24 13:57:19 +08:00
|
|
|
|
2009-05-19 01:30:52 +08:00
|
|
|
// FIXME: I would like to be convinced that this code is worth being as
|
2009-09-09 23:08:12 +08:00
|
|
|
// complicated as it is, binary search isn't that slow.
|
2009-05-19 01:30:52 +08:00
|
|
|
//
|
|
|
|
// If it is worth being optimized, then in my opinion it could be more
|
|
|
|
// performant, simpler, and more obviously correct by just "galloping" outward
|
|
|
|
// from the queried file position. In fact, this could be incorporated into a
|
|
|
|
// generic algorithm such as lower_bound_with_hint.
|
|
|
|
//
|
|
|
|
// If someone gives me a test case where this matters, and I will do it! - DWD
|
|
|
|
|
2007-07-24 13:57:19 +08:00
|
|
|
// If the previous query was to the same file, we know both the file pos from
|
|
|
|
// that query and the line number returned. This allows us to narrow the
|
|
|
|
// search space from the entire file to something near the match.
|
2009-02-04 09:06:56 +08:00
|
|
|
if (LastLineNoFileIDQuery == FID) {
|
2007-07-24 13:57:19 +08:00
|
|
|
if (QueriedFilePos >= LastLineNoFilePos) {
|
2009-05-19 01:30:52 +08:00
|
|
|
// FIXME: Potential overflow?
|
2007-07-24 13:57:19 +08:00
|
|
|
SourceLineCache = SourceLineCache+LastLineNoResult-1;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-07-24 13:57:19 +08:00
|
|
|
// The query is likely to be nearby the previous one. Here we check to
|
|
|
|
// see if it is within 5, 10 or 20 lines. It can be far away in cases
|
|
|
|
// where big comment blocks and vertical whitespace eat up lines but
|
|
|
|
// contribute no tokens.
|
|
|
|
if (SourceLineCache+5 < SourceLineCacheEnd) {
|
|
|
|
if (SourceLineCache[5] > QueriedFilePos)
|
|
|
|
SourceLineCacheEnd = SourceLineCache+5;
|
|
|
|
else if (SourceLineCache+10 < SourceLineCacheEnd) {
|
|
|
|
if (SourceLineCache[10] > QueriedFilePos)
|
|
|
|
SourceLineCacheEnd = SourceLineCache+10;
|
|
|
|
else if (SourceLineCache+20 < SourceLineCacheEnd) {
|
|
|
|
if (SourceLineCache[20] > QueriedFilePos)
|
|
|
|
SourceLineCacheEnd = SourceLineCache+20;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2009-05-19 01:30:52 +08:00
|
|
|
if (LastLineNoResult < Content->NumLines)
|
|
|
|
SourceLineCacheEnd = SourceLineCache+LastLineNoResult+1;
|
2007-07-24 13:57:19 +08:00
|
|
|
}
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-07-24 14:43:46 +08:00
|
|
|
unsigned *Pos
|
|
|
|
= std::lower_bound(SourceLineCache, SourceLineCacheEnd, QueriedFilePos);
|
2007-07-24 13:57:19 +08:00
|
|
|
unsigned LineNo = Pos-SourceLineCacheStart;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 09:06:56 +08:00
|
|
|
LastLineNoFileIDQuery = FID;
|
2007-10-31 05:08:08 +08:00
|
|
|
LastLineNoContentCache = Content;
|
2007-07-24 13:57:19 +08:00
|
|
|
LastLineNoFilePos = QueriedFilePos;
|
|
|
|
LastLineNoResult = LineNo;
|
|
|
|
return LineNo;
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
|
|
|
|
2011-02-23 08:47:48 +08:00
|
|
|
unsigned SourceManager::getSpellingLineNumber(SourceLocation Loc,
|
|
|
|
bool *Invalid) const {
|
|
|
|
if (isInvalid(Loc, Invalid)) return 0;
|
|
|
|
std::pair<FileID, unsigned> LocInfo = getDecomposedSpellingLoc(Loc);
|
|
|
|
return getLineNumber(LocInfo.first, LocInfo.second);
|
|
|
|
}
|
2011-07-26 05:09:52 +08:00
|
|
|
unsigned SourceManager::getExpansionLineNumber(SourceLocation Loc,
|
|
|
|
bool *Invalid) const {
|
2010-10-06 01:56:33 +08:00
|
|
|
if (isInvalid(Loc, Invalid)) return 0;
|
2011-07-26 04:52:32 +08:00
|
|
|
std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
|
2009-02-04 09:06:56 +08:00
|
|
|
return getLineNumber(LocInfo.first, LocInfo.second);
|
|
|
|
}
|
2011-02-23 08:47:48 +08:00
|
|
|
unsigned SourceManager::getPresumedLineNumber(SourceLocation Loc,
|
2010-03-16 13:20:39 +08:00
|
|
|
bool *Invalid) const {
|
2010-10-06 01:56:33 +08:00
|
|
|
if (isInvalid(Loc, Invalid)) return 0;
|
2011-02-23 08:47:48 +08:00
|
|
|
return getPresumedLoc(Loc).getLine();
|
2009-02-04 09:06:56 +08:00
|
|
|
}
|
|
|
|
|
2009-02-04 13:33:01 +08:00
|
|
|
/// getFileCharacteristic - return the file characteristic of the specified
|
2009-09-09 23:08:12 +08:00
|
|
|
/// source location, indicating whether this is a normal file, a system
|
2009-02-04 13:33:01 +08:00
|
|
|
/// header, or an "implicit extern C" system header.
|
|
|
|
///
|
|
|
|
/// This state can be modified with flags on GNU linemarker directives like:
|
|
|
|
/// # 4 "foo.h" 3
|
|
|
|
/// which changes all source locations in the current file after that to be
|
|
|
|
/// considered to be from a system header.
|
2009-09-09 23:08:12 +08:00
|
|
|
SrcMgr::CharacteristicKind
|
2009-02-04 13:33:01 +08:00
|
|
|
SourceManager::getFileCharacteristic(SourceLocation Loc) const {
|
2015-10-03 18:46:20 +08:00
|
|
|
assert(Loc.isValid() && "Can't get file characteristic of invalid loc!");
|
2011-07-26 04:52:32 +08:00
|
|
|
std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
|
2011-04-20 08:21:03 +08:00
|
|
|
bool Invalid = false;
|
|
|
|
const SLocEntry &SEntry = getSLocEntry(LocInfo.first, &Invalid);
|
|
|
|
if (Invalid || !SEntry.isFile())
|
|
|
|
return C_User;
|
|
|
|
|
|
|
|
const SrcMgr::FileInfo &FI = SEntry.getFile();
|
2009-02-04 13:33:01 +08:00
|
|
|
|
|
|
|
// If there are no #line directives in this file, just return the whole-file
|
|
|
|
// state.
|
|
|
|
if (!FI.hasLineDirectives())
|
|
|
|
return FI.getFileCharacteristic();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 13:33:01 +08:00
|
|
|
assert(LineTable && "Can't have linetable entries without a LineTable!");
|
|
|
|
// See if there is a #line directive before the location.
|
|
|
|
const LineEntry *Entry =
|
2012-06-09 00:40:28 +08:00
|
|
|
LineTable->FindNearestLineEntry(LocInfo.first, LocInfo.second);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 13:33:01 +08:00
|
|
|
// If this is before the first line marker, use the file characteristic.
|
|
|
|
if (!Entry)
|
|
|
|
return FI.getFileCharacteristic();
|
|
|
|
|
|
|
|
return Entry->FileKind;
|
|
|
|
}
|
|
|
|
|
2009-02-17 16:39:06 +08:00
|
|
|
/// Return the filename or buffer identifier of the buffer the location is in.
|
2012-06-17 11:22:59 +08:00
|
|
|
/// Note that this name does not respect \#line directives. Use getPresumedLoc
|
2009-02-17 16:39:06 +08:00
|
|
|
/// for normal clients.
|
2010-03-16 13:20:39 +08:00
|
|
|
const char *SourceManager::getBufferName(SourceLocation Loc,
|
|
|
|
bool *Invalid) const {
|
2010-10-06 01:56:33 +08:00
|
|
|
if (isInvalid(Loc, Invalid)) return "<invalid loc>";
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-03-16 13:20:39 +08:00
|
|
|
return getBuffer(getFileID(Loc), Invalid)->getBufferIdentifier();
|
2009-02-17 16:39:06 +08:00
|
|
|
}
|
|
|
|
|
2009-02-04 09:06:56 +08:00
|
|
|
|
2009-01-27 15:57:44 +08:00
|
|
|
/// getPresumedLoc - This method returns the "presumed" location of a
|
2012-06-17 11:22:59 +08:00
|
|
|
/// SourceLocation specifies. A "presumed location" can be modified by \#line
|
2009-01-27 15:57:44 +08:00
|
|
|
/// or GNU line marker directives. This provides a view on the data that a
|
|
|
|
/// user should see in diagnostics, for example.
|
|
|
|
///
|
2011-07-26 13:17:23 +08:00
|
|
|
/// Note that a presumed location is always given as the expansion point of an
|
|
|
|
/// expansion location, not at the spelling location.
|
2012-11-15 07:55:25 +08:00
|
|
|
PresumedLoc SourceManager::getPresumedLoc(SourceLocation Loc,
|
|
|
|
bool UseLineDirectives) const {
|
2009-01-27 15:57:44 +08:00
|
|
|
if (Loc.isInvalid()) return PresumedLoc();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-26 13:17:23 +08:00
|
|
|
// Presumed locations are always for expansion points.
|
2011-07-26 04:52:32 +08:00
|
|
|
std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-04-20 08:21:03 +08:00
|
|
|
bool Invalid = false;
|
|
|
|
const SLocEntry &Entry = getSLocEntry(LocInfo.first, &Invalid);
|
|
|
|
if (Invalid || !Entry.isFile())
|
|
|
|
return PresumedLoc();
|
|
|
|
|
|
|
|
const SrcMgr::FileInfo &FI = Entry.getFile();
|
2009-01-27 15:57:44 +08:00
|
|
|
const SrcMgr::ContentCache *C = FI.getContentCache();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 09:55:42 +08:00
|
|
|
// To get the source name, first consult the FileEntry (if one exists)
|
|
|
|
// before the MemBuffer as this will avoid unnecessarily paging in the
|
|
|
|
// MemBuffer.
|
2010-04-21 04:35:58 +08:00
|
|
|
const char *Filename;
|
2011-03-05 09:03:53 +08:00
|
|
|
if (C->OrigEntry)
|
|
|
|
Filename = C->OrigEntry->getName();
|
2010-04-21 04:35:58 +08:00
|
|
|
else
|
|
|
|
Filename = C->getBuffer(Diag, *this)->getBufferIdentifier();
|
2011-04-20 08:21:03 +08:00
|
|
|
|
2010-11-02 08:39:22 +08:00
|
|
|
unsigned LineNo = getLineNumber(LocInfo.first, LocInfo.second, &Invalid);
|
|
|
|
if (Invalid)
|
|
|
|
return PresumedLoc();
|
|
|
|
unsigned ColNo = getColumnNumber(LocInfo.first, LocInfo.second, &Invalid);
|
|
|
|
if (Invalid)
|
|
|
|
return PresumedLoc();
|
|
|
|
|
2009-02-04 09:55:42 +08:00
|
|
|
SourceLocation IncludeLoc = FI.getIncludeLoc();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 09:55:42 +08:00
|
|
|
// If we have #line directives in this file, update and overwrite the physical
|
|
|
|
// location info if appropriate.
|
2012-11-15 07:55:25 +08:00
|
|
|
if (UseLineDirectives && FI.hasLineDirectives()) {
|
2009-02-04 09:55:42 +08:00
|
|
|
assert(LineTable && "Can't have linetable entries without a LineTable!");
|
|
|
|
// See if there is a #line directive before this. If so, get it.
|
|
|
|
if (const LineEntry *Entry =
|
2012-06-09 00:40:28 +08:00
|
|
|
LineTable->FindNearestLineEntry(LocInfo.first, LocInfo.second)) {
|
2009-02-04 10:00:59 +08:00
|
|
|
// If the LineEntry indicates a filename, use it.
|
2009-02-04 09:55:42 +08:00
|
|
|
if (Entry->FilenameID != -1)
|
|
|
|
Filename = LineTable->getFilename(Entry->FilenameID);
|
2009-02-04 10:00:59 +08:00
|
|
|
|
|
|
|
// Use the line number specified by the LineEntry. This line number may
|
|
|
|
// be multiple lines down from the line entry. Add the difference in
|
|
|
|
// physical line numbers from the query point and the line marker to the
|
|
|
|
// total.
|
|
|
|
unsigned MarkerLineNo = getLineNumber(LocInfo.first, Entry->FileOffset);
|
|
|
|
LineNo = Entry->LineNo + (LineNo-MarkerLineNo-1);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 10:15:40 +08:00
|
|
|
// Note that column numbers are not molested by line markers.
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-02-04 14:25:26 +08:00
|
|
|
// Handle virtual #include manipulation.
|
|
|
|
if (Entry->IncludeOffset) {
|
|
|
|
IncludeLoc = getLocForStartOfFile(LocInfo.first);
|
2011-09-20 04:40:19 +08:00
|
|
|
IncludeLoc = IncludeLoc.getLocWithOffset(Entry->IncludeOffset);
|
2009-02-04 14:25:26 +08:00
|
|
|
}
|
2009-02-04 09:55:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return PresumedLoc(Filename, LineNo, ColNo, IncludeLoc);
|
2009-01-26 08:43:02 +08:00
|
|
|
}
|
|
|
|
|
2013-09-28 01:12:50 +08:00
|
|
|
/// \brief Returns whether the PresumedLoc for a given SourceLocation is
|
|
|
|
/// in the main file.
|
|
|
|
///
|
|
|
|
/// This computes the "presumed" location for a SourceLocation, then checks
|
|
|
|
/// whether it came from a file other than the main file. This is different
|
|
|
|
/// from isWrittenInMainFile() because it takes line marker directives into
|
|
|
|
/// account.
|
|
|
|
bool SourceManager::isInMainFile(SourceLocation Loc) const {
|
|
|
|
if (Loc.isInvalid()) return false;
|
|
|
|
|
|
|
|
// Presumed locations are always for expansion points.
|
|
|
|
std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
|
|
|
|
|
|
|
|
bool Invalid = false;
|
|
|
|
const SLocEntry &Entry = getSLocEntry(LocInfo.first, &Invalid);
|
|
|
|
if (Invalid || !Entry.isFile())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const SrcMgr::FileInfo &FI = Entry.getFile();
|
|
|
|
|
|
|
|
// Check if there is a line directive for this location.
|
|
|
|
if (FI.hasLineDirectives())
|
|
|
|
if (const LineEntry *Entry =
|
|
|
|
LineTable->FindNearestLineEntry(LocInfo.first, LocInfo.second))
|
|
|
|
if (Entry->IncludeOffset)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return FI.getIncludeLoc().isInvalid();
|
|
|
|
}
|
|
|
|
|
2013-11-24 09:47:49 +08:00
|
|
|
/// \brief The size of the SLocEntry that \p FID represents.
|
2011-08-24 05:02:28 +08:00
|
|
|
unsigned SourceManager::getFileIDSize(FileID FID) const {
|
|
|
|
bool Invalid = false;
|
|
|
|
const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
|
|
|
|
if (Invalid)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
int ID = FID.ID;
|
|
|
|
unsigned NextOffset;
|
|
|
|
if ((ID > 0 && unsigned(ID+1) == local_sloc_entry_size()))
|
|
|
|
NextOffset = getNextLocalOffset();
|
|
|
|
else if (ID+1 == -1)
|
|
|
|
NextOffset = MaxLoadedOffset;
|
|
|
|
else
|
|
|
|
NextOffset = getSLocEntry(FileID::get(ID+1)).getOffset();
|
|
|
|
|
|
|
|
return NextOffset - Entry.getOffset() - 1;
|
|
|
|
}
|
|
|
|
|
2009-01-26 08:43:02 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Other miscellaneous methods.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2011-02-04 01:17:35 +08:00
|
|
|
/// \brief Retrieve the inode for the given file entry, if possible.
|
|
|
|
///
|
|
|
|
/// This routine involves a system call, and therefore should only be used
|
|
|
|
/// in non-performance-critical code.
|
2013-07-30 05:26:52 +08:00
|
|
|
static Optional<llvm::sys::fs::UniqueID>
|
|
|
|
getActualFileUID(const FileEntry *File) {
|
2011-02-04 01:17:35 +08:00
|
|
|
if (!File)
|
2013-02-21 09:47:18 +08:00
|
|
|
return None;
|
2013-07-30 02:43:40 +08:00
|
|
|
|
2013-07-30 05:26:52 +08:00
|
|
|
llvm::sys::fs::UniqueID ID;
|
2013-07-30 02:43:40 +08:00
|
|
|
if (llvm::sys::fs::getUniqueID(File->getName(), ID))
|
2013-02-21 09:47:18 +08:00
|
|
|
return None;
|
2013-07-30 02:43:40 +08:00
|
|
|
|
|
|
|
return ID;
|
2011-02-04 01:17:35 +08:00
|
|
|
}
|
|
|
|
|
2009-06-20 16:09:57 +08:00
|
|
|
/// \brief Get the source location for the given file:line:col triplet.
|
|
|
|
///
|
|
|
|
/// If the source file is included multiple times, the source location will
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
/// be based upon an arbitrary inclusion.
|
2011-08-17 08:31:20 +08:00
|
|
|
SourceLocation SourceManager::translateFileLineCol(const FileEntry *SourceFile,
|
2011-09-20 04:40:35 +08:00
|
|
|
unsigned Line,
|
|
|
|
unsigned Col) const {
|
2009-06-20 16:09:57 +08:00
|
|
|
assert(SourceFile && "Null source file!");
|
|
|
|
assert(Line && Col && "Line and column should start from 1!");
|
|
|
|
|
2011-09-28 01:22:25 +08:00
|
|
|
FileID FirstFID = translateFile(SourceFile);
|
|
|
|
return translateLineCol(FirstFID, Line, Col);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Get the FileID for the given file.
|
|
|
|
///
|
|
|
|
/// If the source file is included multiple times, the FileID will be the
|
|
|
|
/// first inclusion.
|
|
|
|
FileID SourceManager::translateFile(const FileEntry *SourceFile) const {
|
|
|
|
assert(SourceFile && "Null source file!");
|
|
|
|
|
2009-12-02 13:34:39 +08:00
|
|
|
// Find the first file ID that corresponds to the given file.
|
|
|
|
FileID FirstFID;
|
|
|
|
|
|
|
|
// First, check the main file ID, since it is common to look for a
|
|
|
|
// location in the main file.
|
2013-07-30 05:26:52 +08:00
|
|
|
Optional<llvm::sys::fs::UniqueID> SourceFileUID;
|
2013-02-21 06:23:23 +08:00
|
|
|
Optional<StringRef> SourceFileName;
|
2015-10-03 18:46:20 +08:00
|
|
|
if (MainFileID.isValid()) {
|
2011-04-20 08:21:03 +08:00
|
|
|
bool Invalid = false;
|
|
|
|
const SLocEntry &MainSLoc = getSLocEntry(MainFileID, &Invalid);
|
|
|
|
if (Invalid)
|
2011-09-28 01:22:25 +08:00
|
|
|
return FileID();
|
2011-04-20 08:21:03 +08:00
|
|
|
|
2011-02-04 01:17:35 +08:00
|
|
|
if (MainSLoc.isFile()) {
|
|
|
|
const ContentCache *MainContentCache
|
|
|
|
= MainSLoc.getFile().getContentCache();
|
2011-02-12 02:08:15 +08:00
|
|
|
if (!MainContentCache) {
|
|
|
|
// Can't do anything
|
2011-03-05 09:03:53 +08:00
|
|
|
} else if (MainContentCache->OrigEntry == SourceFile) {
|
2011-02-04 01:17:35 +08:00
|
|
|
FirstFID = MainFileID;
|
2011-02-12 02:08:15 +08:00
|
|
|
} else {
|
2011-02-04 01:17:35 +08:00
|
|
|
// Fall back: check whether we have the same base name and inode
|
|
|
|
// as the main file.
|
2011-03-05 09:03:53 +08:00
|
|
|
const FileEntry *MainFile = MainContentCache->OrigEntry;
|
2011-02-04 01:17:35 +08:00
|
|
|
SourceFileName = llvm::sys::path::filename(SourceFile->getName());
|
|
|
|
if (*SourceFileName == llvm::sys::path::filename(MainFile->getName())) {
|
2013-07-30 02:43:40 +08:00
|
|
|
SourceFileUID = getActualFileUID(SourceFile);
|
|
|
|
if (SourceFileUID) {
|
2013-07-30 05:26:52 +08:00
|
|
|
if (Optional<llvm::sys::fs::UniqueID> MainFileUID =
|
|
|
|
getActualFileUID(MainFile)) {
|
2013-07-30 02:43:40 +08:00
|
|
|
if (*SourceFileUID == *MainFileUID) {
|
2011-02-17 03:09:24 +08:00
|
|
|
FirstFID = MainFileID;
|
|
|
|
SourceFile = MainFile;
|
|
|
|
}
|
|
|
|
}
|
2011-02-04 01:17:35 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-12-02 13:34:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (FirstFID.isInvalid()) {
|
|
|
|
// The location we're looking for isn't in the main file; look
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
// through all of the local source locations.
|
|
|
|
for (unsigned I = 0, N = local_sloc_entry_size(); I != N; ++I) {
|
2011-04-20 08:21:03 +08:00
|
|
|
bool Invalid = false;
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
const SLocEntry &SLoc = getLocalSLocEntry(I, &Invalid);
|
2011-04-20 08:21:03 +08:00
|
|
|
if (Invalid)
|
2011-09-28 01:22:25 +08:00
|
|
|
return FileID();
|
2011-04-20 08:21:03 +08:00
|
|
|
|
2011-02-04 01:17:35 +08:00
|
|
|
if (SLoc.isFile() &&
|
|
|
|
SLoc.getFile().getContentCache() &&
|
2011-03-05 09:03:53 +08:00
|
|
|
SLoc.getFile().getContentCache()->OrigEntry == SourceFile) {
|
2009-12-02 13:34:39 +08:00
|
|
|
FirstFID = FileID::get(I);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
// If that still didn't help, try the modules.
|
|
|
|
if (FirstFID.isInvalid()) {
|
|
|
|
for (unsigned I = 0, N = loaded_sloc_entry_size(); I != N; ++I) {
|
|
|
|
const SLocEntry &SLoc = getLoadedSLocEntry(I);
|
|
|
|
if (SLoc.isFile() &&
|
|
|
|
SLoc.getFile().getContentCache() &&
|
|
|
|
SLoc.getFile().getContentCache()->OrigEntry == SourceFile) {
|
|
|
|
FirstFID = FileID::get(-int(I) - 2);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-12-02 13:34:39 +08:00
|
|
|
}
|
2011-02-04 01:17:35 +08:00
|
|
|
|
|
|
|
// If we haven't found what we want yet, try again, but this time stat()
|
|
|
|
// each of the files in case the files have changed since we originally
|
2013-07-30 02:43:40 +08:00
|
|
|
// parsed the file.
|
2011-02-04 01:17:35 +08:00
|
|
|
if (FirstFID.isInvalid() &&
|
2013-07-30 02:43:40 +08:00
|
|
|
(SourceFileName ||
|
2011-02-04 01:17:35 +08:00
|
|
|
(SourceFileName = llvm::sys::path::filename(SourceFile->getName()))) &&
|
2013-07-30 02:43:40 +08:00
|
|
|
(SourceFileUID || (SourceFileUID = getActualFileUID(SourceFile)))) {
|
2011-04-20 08:21:03 +08:00
|
|
|
bool Invalid = false;
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
for (unsigned I = 0, N = local_sloc_entry_size(); I != N; ++I) {
|
|
|
|
FileID IFileID;
|
|
|
|
IFileID.ID = I;
|
|
|
|
const SLocEntry &SLoc = getSLocEntry(IFileID, &Invalid);
|
2011-04-20 08:21:03 +08:00
|
|
|
if (Invalid)
|
2011-09-28 01:22:25 +08:00
|
|
|
return FileID();
|
2011-04-20 08:21:03 +08:00
|
|
|
|
2011-02-04 01:17:35 +08:00
|
|
|
if (SLoc.isFile()) {
|
|
|
|
const ContentCache *FileContentCache
|
|
|
|
= SLoc.getFile().getContentCache();
|
2014-05-08 14:41:40 +08:00
|
|
|
const FileEntry *Entry = FileContentCache ? FileContentCache->OrigEntry
|
|
|
|
: nullptr;
|
2011-02-04 01:17:35 +08:00
|
|
|
if (Entry &&
|
2011-02-12 02:08:15 +08:00
|
|
|
*SourceFileName == llvm::sys::path::filename(Entry->getName())) {
|
2013-07-30 05:26:52 +08:00
|
|
|
if (Optional<llvm::sys::fs::UniqueID> EntryUID =
|
|
|
|
getActualFileUID(Entry)) {
|
2013-07-30 02:43:40 +08:00
|
|
|
if (*SourceFileUID == *EntryUID) {
|
2011-02-12 02:08:15 +08:00
|
|
|
FirstFID = FileID::get(I);
|
|
|
|
SourceFile = Entry;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2011-02-04 01:17:35 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-09-28 01:22:25 +08:00
|
|
|
|
2012-10-13 06:56:33 +08:00
|
|
|
(void) SourceFile;
|
2011-09-28 01:22:25 +08:00
|
|
|
return FirstFID;
|
2011-09-20 04:40:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Get the source location in \arg FID for the given line:col.
|
|
|
|
/// Returns null location if \arg FID is not a file SLocEntry.
|
|
|
|
SourceLocation SourceManager::translateLineCol(FileID FID,
|
2011-09-20 04:40:35 +08:00
|
|
|
unsigned Line,
|
|
|
|
unsigned Col) const {
|
2013-11-19 02:29:00 +08:00
|
|
|
// Lines are used as a one-based index into a zero-based array. This assert
|
|
|
|
// checks for possible buffer underruns.
|
2015-12-01 17:00:41 +08:00
|
|
|
assert(Line && Col && "Line and column should start from 1!");
|
2013-11-19 02:29:00 +08:00
|
|
|
|
2011-09-20 04:40:29 +08:00
|
|
|
if (FID.isInvalid())
|
|
|
|
return SourceLocation();
|
|
|
|
|
|
|
|
bool Invalid = false;
|
|
|
|
const SLocEntry &Entry = getSLocEntry(FID, &Invalid);
|
|
|
|
if (Invalid)
|
|
|
|
return SourceLocation();
|
2013-07-30 06:26:10 +08:00
|
|
|
|
2011-09-20 04:40:29 +08:00
|
|
|
if (!Entry.isFile())
|
2009-12-02 13:34:39 +08:00
|
|
|
return SourceLocation();
|
|
|
|
|
2011-09-21 06:14:54 +08:00
|
|
|
SourceLocation FileLoc = SourceLocation::getFileLoc(Entry.getOffset());
|
|
|
|
|
2011-02-04 01:17:35 +08:00
|
|
|
if (Line == 1 && Col == 1)
|
2011-09-21 06:14:54 +08:00
|
|
|
return FileLoc;
|
2011-02-04 01:17:35 +08:00
|
|
|
|
|
|
|
ContentCache *Content
|
2011-09-20 04:40:29 +08:00
|
|
|
= const_cast<ContentCache *>(Entry.getFile().getContentCache());
|
2011-02-04 01:17:35 +08:00
|
|
|
if (!Content)
|
|
|
|
return SourceLocation();
|
2013-07-30 06:26:10 +08:00
|
|
|
|
2011-02-04 01:17:35 +08:00
|
|
|
// If this is the first use of line information for this buffer, compute the
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
// SourceLineCache for it on demand.
|
2014-05-08 14:41:40 +08:00
|
|
|
if (!Content->SourceLineCache) {
|
2011-02-04 01:17:35 +08:00
|
|
|
bool MyInvalid = false;
|
|
|
|
ComputeLineNumbers(Diag, Content, ContentCacheAlloc, *this, MyInvalid);
|
|
|
|
if (MyInvalid)
|
|
|
|
return SourceLocation();
|
|
|
|
}
|
|
|
|
|
2010-02-27 10:42:25 +08:00
|
|
|
if (Line > Content->NumLines) {
|
2010-04-21 04:35:58 +08:00
|
|
|
unsigned Size = Content->getBuffer(Diag, *this)->getBufferSize();
|
2010-02-27 10:42:25 +08:00
|
|
|
if (Size > 0)
|
|
|
|
--Size;
|
2011-09-21 06:14:54 +08:00
|
|
|
return FileLoc.getLocWithOffset(Size);
|
2010-02-27 10:42:25 +08:00
|
|
|
}
|
|
|
|
|
2014-06-28 01:40:03 +08:00
|
|
|
llvm::MemoryBuffer *Buffer = Content->getBuffer(Diag, *this);
|
2010-02-27 10:42:25 +08:00
|
|
|
unsigned FilePos = Content->SourceLineCache[Line - 1];
|
2011-12-19 16:51:05 +08:00
|
|
|
const char *Buf = Buffer->getBufferStart() + FilePos;
|
|
|
|
unsigned BufLength = Buffer->getBufferSize() - FilePos;
|
2011-09-21 06:14:54 +08:00
|
|
|
if (BufLength == 0)
|
|
|
|
return FileLoc.getLocWithOffset(FilePos);
|
|
|
|
|
2010-02-27 10:42:25 +08:00
|
|
|
unsigned i = 0;
|
|
|
|
|
|
|
|
// Check that the given column is valid.
|
|
|
|
while (i < BufLength-1 && i < Col-1 && Buf[i] != '\n' && Buf[i] != '\r')
|
|
|
|
++i;
|
2013-07-30 06:26:10 +08:00
|
|
|
return FileLoc.getLocWithOffset(FilePos + i);
|
2009-06-20 16:09:57 +08:00
|
|
|
}
|
|
|
|
|
2011-08-22 07:33:04 +08:00
|
|
|
/// \brief Compute a map of macro argument chunks to their expanded source
|
|
|
|
/// location. Chunks that are not part of a macro argument will map to an
|
|
|
|
/// invalid source location. e.g. if a file contains one macro argument at
|
|
|
|
/// offset 100 with length 10, this is how the map will be formed:
|
|
|
|
/// 0 -> SourceLocation()
|
|
|
|
/// 100 -> Expanded macro arg location
|
|
|
|
/// 110 -> SourceLocation()
|
2011-09-26 16:01:50 +08:00
|
|
|
void SourceManager::computeMacroArgsCache(MacroArgsMap *&CachePtr,
|
2011-09-20 04:40:35 +08:00
|
|
|
FileID FID) const {
|
2015-10-03 18:46:20 +08:00
|
|
|
assert(FID.isValid());
|
2011-09-26 16:01:50 +08:00
|
|
|
assert(!CachePtr);
|
2011-08-22 07:33:04 +08:00
|
|
|
|
2011-09-26 16:01:50 +08:00
|
|
|
CachePtr = new MacroArgsMap();
|
|
|
|
MacroArgsMap &MacroArgsCache = *CachePtr;
|
2011-08-22 07:33:04 +08:00
|
|
|
// Initially no macro argument chunk is present.
|
|
|
|
MacroArgsCache.insert(std::make_pair(0, SourceLocation()));
|
2011-08-17 08:31:20 +08:00
|
|
|
|
|
|
|
int ID = FID.ID;
|
|
|
|
while (1) {
|
|
|
|
++ID;
|
|
|
|
// Stop if there are no more FileIDs to check.
|
|
|
|
if (ID > 0) {
|
|
|
|
if (unsigned(ID) >= local_sloc_entry_size())
|
2011-08-22 07:33:04 +08:00
|
|
|
return;
|
2011-08-17 08:31:20 +08:00
|
|
|
} else if (ID == -1) {
|
2011-08-22 07:33:04 +08:00
|
|
|
return;
|
2011-08-17 08:31:20 +08:00
|
|
|
}
|
|
|
|
|
2013-06-08 01:57:59 +08:00
|
|
|
bool Invalid = false;
|
|
|
|
const SrcMgr::SLocEntry &Entry = getSLocEntryByID(ID, &Invalid);
|
|
|
|
if (Invalid)
|
|
|
|
return;
|
2011-08-17 08:31:20 +08:00
|
|
|
if (Entry.isFile()) {
|
2011-08-22 07:33:04 +08:00
|
|
|
SourceLocation IncludeLoc = Entry.getFile().getIncludeLoc();
|
|
|
|
if (IncludeLoc.isInvalid())
|
|
|
|
continue;
|
|
|
|
if (!isInFileID(IncludeLoc, FID))
|
|
|
|
return; // No more files/macros that may be "contained" in this file.
|
|
|
|
|
|
|
|
// Skip the files/macros of the #include'd file, we only care about macros
|
|
|
|
// that lexed macro arguments from our file.
|
|
|
|
if (Entry.getFile().NumCreatedFIDs)
|
|
|
|
ID += Entry.getFile().NumCreatedFIDs - 1/*because of next ++ID*/;
|
2011-08-17 08:31:20 +08:00
|
|
|
continue;
|
|
|
|
}
|
2011-08-22 07:33:04 +08:00
|
|
|
|
2011-12-22 00:56:35 +08:00
|
|
|
const ExpansionInfo &ExpInfo = Entry.getExpansion();
|
|
|
|
|
|
|
|
if (ExpInfo.getExpansionLocStart().isFileID()) {
|
|
|
|
if (!isInFileID(ExpInfo.getExpansionLocStart(), FID))
|
|
|
|
return; // No more files/macros that may be "contained" in this file.
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ExpInfo.isMacroArgExpansion())
|
2011-08-17 08:31:20 +08:00
|
|
|
continue;
|
2011-12-22 00:56:35 +08:00
|
|
|
|
2012-10-20 08:51:32 +08:00
|
|
|
associateFileChunkWithMacroArgExp(MacroArgsCache, FID,
|
|
|
|
ExpInfo.getSpellingLoc(),
|
|
|
|
SourceLocation::getMacroLoc(Entry.getOffset()),
|
|
|
|
getFileIDSize(FileID::get(ID)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void SourceManager::associateFileChunkWithMacroArgExp(
|
|
|
|
MacroArgsMap &MacroArgsCache,
|
|
|
|
FileID FID,
|
|
|
|
SourceLocation SpellLoc,
|
|
|
|
SourceLocation ExpansionLoc,
|
|
|
|
unsigned ExpansionLength) const {
|
|
|
|
if (!SpellLoc.isFileID()) {
|
|
|
|
unsigned SpellBeginOffs = SpellLoc.getOffset();
|
|
|
|
unsigned SpellEndOffs = SpellBeginOffs + ExpansionLength;
|
|
|
|
|
|
|
|
// The spelling range for this macro argument expansion can span multiple
|
|
|
|
// consecutive FileID entries. Go through each entry contained in the
|
|
|
|
// spelling range and if one is itself a macro argument expansion, recurse
|
|
|
|
// and associate the file chunk that it represents.
|
|
|
|
|
|
|
|
FileID SpellFID; // Current FileID in the spelling range.
|
|
|
|
unsigned SpellRelativeOffs;
|
2014-03-02 21:01:17 +08:00
|
|
|
std::tie(SpellFID, SpellRelativeOffs) = getDecomposedLoc(SpellLoc);
|
2012-10-20 08:51:32 +08:00
|
|
|
while (1) {
|
|
|
|
const SLocEntry &Entry = getSLocEntry(SpellFID);
|
|
|
|
unsigned SpellFIDBeginOffs = Entry.getOffset();
|
|
|
|
unsigned SpellFIDSize = getFileIDSize(SpellFID);
|
|
|
|
unsigned SpellFIDEndOffs = SpellFIDBeginOffs + SpellFIDSize;
|
|
|
|
const ExpansionInfo &Info = Entry.getExpansion();
|
|
|
|
if (Info.isMacroArgExpansion()) {
|
|
|
|
unsigned CurrSpellLength;
|
|
|
|
if (SpellFIDEndOffs < SpellEndOffs)
|
|
|
|
CurrSpellLength = SpellFIDSize - SpellRelativeOffs;
|
|
|
|
else
|
|
|
|
CurrSpellLength = ExpansionLength;
|
|
|
|
associateFileChunkWithMacroArgExp(MacroArgsCache, FID,
|
|
|
|
Info.getSpellingLoc().getLocWithOffset(SpellRelativeOffs),
|
|
|
|
ExpansionLoc, CurrSpellLength);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (SpellFIDEndOffs >= SpellEndOffs)
|
|
|
|
return; // we covered all FileID entries in the spelling range.
|
|
|
|
|
|
|
|
// Move to the next FileID entry in the spelling range.
|
|
|
|
unsigned advance = SpellFIDSize - SpellRelativeOffs + 1;
|
|
|
|
ExpansionLoc = ExpansionLoc.getLocWithOffset(advance);
|
|
|
|
ExpansionLength -= advance;
|
|
|
|
++SpellFID.ID;
|
|
|
|
SpellRelativeOffs = 0;
|
2011-12-22 00:56:35 +08:00
|
|
|
}
|
|
|
|
|
2011-08-17 08:31:20 +08:00
|
|
|
}
|
2012-10-20 08:51:32 +08:00
|
|
|
|
|
|
|
assert(SpellLoc.isFileID());
|
|
|
|
|
|
|
|
unsigned BeginOffs;
|
|
|
|
if (!isInFileID(SpellLoc, FID, &BeginOffs))
|
|
|
|
return;
|
|
|
|
|
|
|
|
unsigned EndOffs = BeginOffs + ExpansionLength;
|
|
|
|
|
|
|
|
// Add a new chunk for this macro argument. A previous macro argument chunk
|
|
|
|
// may have been lexed again, so e.g. if the map is
|
|
|
|
// 0 -> SourceLocation()
|
|
|
|
// 100 -> Expanded loc #1
|
|
|
|
// 110 -> SourceLocation()
|
|
|
|
// and we found a new macro FileID that lexed from offet 105 with length 3,
|
|
|
|
// the new map will be:
|
|
|
|
// 0 -> SourceLocation()
|
|
|
|
// 100 -> Expanded loc #1
|
|
|
|
// 105 -> Expanded loc #2
|
|
|
|
// 108 -> Expanded loc #1
|
|
|
|
// 110 -> SourceLocation()
|
|
|
|
//
|
|
|
|
// Since re-lexed macro chunks will always be the same size or less of
|
|
|
|
// previous chunks, we only need to find where the ending of the new macro
|
|
|
|
// chunk is mapped to and update the map with new begin/end mappings.
|
|
|
|
|
|
|
|
MacroArgsMap::iterator I = MacroArgsCache.upper_bound(EndOffs);
|
|
|
|
--I;
|
|
|
|
SourceLocation EndOffsMappedLoc = I->second;
|
|
|
|
MacroArgsCache[BeginOffs] = ExpansionLoc;
|
|
|
|
MacroArgsCache[EndOffs] = EndOffsMappedLoc;
|
2011-08-17 08:31:20 +08:00
|
|
|
}
|
|
|
|
|
2011-08-22 07:33:04 +08:00
|
|
|
/// \brief If \arg Loc points inside a function macro argument, the returned
|
|
|
|
/// location will be the macro location in which the argument was expanded.
|
|
|
|
/// If a macro argument is used multiple times, the expanded location will
|
|
|
|
/// be at the first expansion of the argument.
|
|
|
|
/// e.g.
|
|
|
|
/// MY_MACRO(foo);
|
|
|
|
/// ^
|
|
|
|
/// Passing a file location pointing at 'foo', will yield a macro location
|
|
|
|
/// where 'foo' was expanded into.
|
2011-09-20 04:40:35 +08:00
|
|
|
SourceLocation
|
|
|
|
SourceManager::getMacroArgExpandedLocation(SourceLocation Loc) const {
|
2011-08-22 07:33:04 +08:00
|
|
|
if (Loc.isInvalid() || !Loc.isFileID())
|
|
|
|
return Loc;
|
|
|
|
|
|
|
|
FileID FID;
|
|
|
|
unsigned Offset;
|
2014-03-02 21:01:17 +08:00
|
|
|
std::tie(FID, Offset) = getDecomposedLoc(Loc);
|
2011-08-22 07:33:04 +08:00
|
|
|
if (FID.isInvalid())
|
|
|
|
return Loc;
|
|
|
|
|
2011-09-26 16:01:50 +08:00
|
|
|
MacroArgsMap *&MacroArgsCache = MacroArgsCacheMap[FID];
|
|
|
|
if (!MacroArgsCache)
|
|
|
|
computeMacroArgsCache(MacroArgsCache, FID);
|
|
|
|
|
|
|
|
assert(!MacroArgsCache->empty());
|
|
|
|
MacroArgsMap::iterator I = MacroArgsCache->upper_bound(Offset);
|
2011-08-22 07:33:04 +08:00
|
|
|
--I;
|
|
|
|
|
|
|
|
unsigned MacroArgBeginOffs = I->first;
|
|
|
|
SourceLocation MacroArgExpandedLoc = I->second;
|
|
|
|
if (MacroArgExpandedLoc.isValid())
|
2011-09-20 04:40:19 +08:00
|
|
|
return MacroArgExpandedLoc.getLocWithOffset(Offset - MacroArgBeginOffs);
|
2011-08-22 07:33:04 +08:00
|
|
|
|
|
|
|
return Loc;
|
|
|
|
}
|
|
|
|
|
2013-04-13 09:03:57 +08:00
|
|
|
std::pair<FileID, unsigned>
|
|
|
|
SourceManager::getDecomposedIncludedLoc(FileID FID) const {
|
2013-05-25 06:24:04 +08:00
|
|
|
if (FID.isInvalid())
|
|
|
|
return std::make_pair(FileID(), 0);
|
|
|
|
|
2013-04-13 09:03:57 +08:00
|
|
|
// Uses IncludedLocMap to retrieve/cache the decomposed loc.
|
|
|
|
|
|
|
|
typedef std::pair<FileID, unsigned> DecompTy;
|
|
|
|
typedef llvm::DenseMap<FileID, DecompTy> MapTy;
|
|
|
|
std::pair<MapTy::iterator, bool>
|
|
|
|
InsertOp = IncludedLocMap.insert(std::make_pair(FID, DecompTy()));
|
|
|
|
DecompTy &DecompLoc = InsertOp.first->second;
|
|
|
|
if (!InsertOp.second)
|
|
|
|
return DecompLoc; // already in map.
|
|
|
|
|
|
|
|
SourceLocation UpperLoc;
|
2013-05-25 06:24:04 +08:00
|
|
|
bool Invalid = false;
|
|
|
|
const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
|
|
|
|
if (!Invalid) {
|
|
|
|
if (Entry.isExpansion())
|
|
|
|
UpperLoc = Entry.getExpansion().getExpansionLocStart();
|
|
|
|
else
|
|
|
|
UpperLoc = Entry.getFile().getIncludeLoc();
|
|
|
|
}
|
2013-04-13 09:03:57 +08:00
|
|
|
|
|
|
|
if (UpperLoc.isValid())
|
|
|
|
DecompLoc = getDecomposedLoc(UpperLoc);
|
|
|
|
|
|
|
|
return DecompLoc;
|
|
|
|
}
|
|
|
|
|
2011-07-26 13:17:23 +08:00
|
|
|
/// Given a decomposed source location, move it up the include/expansion stack
|
|
|
|
/// to the parent source location. If this is possible, return the decomposed
|
|
|
|
/// version of the parent in Loc and return false. If Loc is the top-level
|
|
|
|
/// entry, return true and don't modify it.
|
2010-05-08 04:35:24 +08:00
|
|
|
static bool MoveUpIncludeHierarchy(std::pair<FileID, unsigned> &Loc,
|
|
|
|
const SourceManager &SM) {
|
2013-04-13 09:03:57 +08:00
|
|
|
std::pair<FileID, unsigned> UpperLoc = SM.getDecomposedIncludedLoc(Loc.first);
|
|
|
|
if (UpperLoc.first.isInvalid())
|
2010-05-08 04:35:24 +08:00
|
|
|
return true; // We reached the top.
|
2013-04-13 09:03:57 +08:00
|
|
|
|
|
|
|
Loc = UpperLoc;
|
2010-05-08 04:35:24 +08:00
|
|
|
return false;
|
|
|
|
}
|
2013-02-27 08:00:26 +08:00
|
|
|
|
|
|
|
/// Return the cache entry for comparing the given file IDs
|
|
|
|
/// for isBeforeInTranslationUnit.
|
|
|
|
InBeforeInTUCacheEntry &SourceManager::getInBeforeInTUCache(FileID LFID,
|
|
|
|
FileID RFID) const {
|
|
|
|
// This is a magic number for limiting the cache size. It was experimentally
|
|
|
|
// derived from a small Objective-C project (where the cache filled
|
|
|
|
// out to ~250 items). We can make it larger if necessary.
|
|
|
|
enum { MagicCacheSize = 300 };
|
|
|
|
IsBeforeInTUCacheKey Key(LFID, RFID);
|
|
|
|
|
|
|
|
// If the cache size isn't too large, do a lookup and if necessary default
|
|
|
|
// construct an entry. We can then return it to the caller for direct
|
|
|
|
// use. When they update the value, the cache will get automatically
|
|
|
|
// updated as well.
|
|
|
|
if (IBTUCache.size() < MagicCacheSize)
|
|
|
|
return IBTUCache[Key];
|
|
|
|
|
|
|
|
// Otherwise, do a lookup that will not construct a new value.
|
|
|
|
InBeforeInTUCache::iterator I = IBTUCache.find(Key);
|
|
|
|
if (I != IBTUCache.end())
|
|
|
|
return I->second;
|
|
|
|
|
|
|
|
// Fall back to the overflow value.
|
|
|
|
return IBTUCacheOverflow;
|
|
|
|
}
|
2010-05-08 04:35:24 +08:00
|
|
|
|
2009-06-24 06:01:48 +08:00
|
|
|
/// \brief Determines the order of 2 source locations in the translation unit.
|
|
|
|
///
|
|
|
|
/// \returns true if LHS source location comes before RHS, false otherwise.
|
|
|
|
bool SourceManager::isBeforeInTranslationUnit(SourceLocation LHS,
|
|
|
|
SourceLocation RHS) const {
|
|
|
|
assert(LHS.isValid() && RHS.isValid() && "Passed invalid source location!");
|
|
|
|
if (LHS == RHS)
|
|
|
|
return false;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-06-24 06:01:48 +08:00
|
|
|
std::pair<FileID, unsigned> LOffs = getDecomposedLoc(LHS);
|
|
|
|
std::pair<FileID, unsigned> ROffs = getDecomposedLoc(RHS);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2013-05-25 07:47:43 +08:00
|
|
|
// getDecomposedLoc may have failed to return a valid FileID because, e.g. it
|
|
|
|
// is a serialized one referring to a file that was removed after we loaded
|
|
|
|
// the PCH.
|
2013-05-25 06:24:04 +08:00
|
|
|
if (LOffs.first.isInvalid() || ROffs.first.isInvalid())
|
2013-05-25 09:03:03 +08:00
|
|
|
return LOffs.first.isInvalid() && !ROffs.first.isInvalid();
|
2013-05-25 06:24:04 +08:00
|
|
|
|
2009-06-24 06:01:48 +08:00
|
|
|
// If the source locations are in the same file, just compare offsets.
|
|
|
|
if (LOffs.first == ROffs.first)
|
|
|
|
return LOffs.second < ROffs.second;
|
|
|
|
|
2013-02-27 08:00:26 +08:00
|
|
|
// If we are comparing a source location with multiple locations in the same
|
|
|
|
// file, we get a big win by caching the result.
|
|
|
|
InBeforeInTUCacheEntry &IsBeforeInTUCache =
|
|
|
|
getInBeforeInTUCache(LOffs.first, ROffs.first);
|
|
|
|
|
2009-06-24 06:01:48 +08:00
|
|
|
// If we are comparing a source location with multiple locations in the same
|
|
|
|
// file, we get a big win by caching the result.
|
2010-05-07 13:10:46 +08:00
|
|
|
if (IsBeforeInTUCache.isCacheValid(LOffs.first, ROffs.first))
|
|
|
|
return IsBeforeInTUCache.getCachedResult(LOffs.second, ROffs.second);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-05-07 09:17:07 +08:00
|
|
|
// Okay, we missed in the cache, start updating the cache for this query.
|
2011-08-17 08:31:18 +08:00
|
|
|
IsBeforeInTUCache.setQueryFIDs(LOffs.first, ROffs.first,
|
|
|
|
/*isLFIDBeforeRFID=*/LOffs.first.ID < ROffs.first.ID);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
// We need to find the common ancestor. The only way of doing this is to
|
|
|
|
// build the complete include chain for one and then walking up the chain
|
|
|
|
// of the other looking for a match.
|
|
|
|
// We use a map from FileID to Offset to store the chain. Easier than writing
|
|
|
|
// a custom set hash info that only depends on the first part of a pair.
|
2013-04-13 09:03:57 +08:00
|
|
|
typedef llvm::SmallDenseMap<FileID, unsigned, 16> LocSet;
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
LocSet LChain;
|
2010-05-07 13:51:13 +08:00
|
|
|
do {
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
LChain.insert(LOffs);
|
|
|
|
// We catch the case where LOffs is in a file included by ROffs and
|
|
|
|
// quit early. The other way round unfortunately remains suboptimal.
|
|
|
|
} while (LOffs.first != ROffs.first && !MoveUpIncludeHierarchy(LOffs, *this));
|
|
|
|
LocSet::iterator I;
|
|
|
|
while((I = LChain.find(ROffs.first)) == LChain.end()) {
|
|
|
|
if (MoveUpIncludeHierarchy(ROffs, *this))
|
|
|
|
break; // Met at topmost file.
|
|
|
|
}
|
|
|
|
if (I != LChain.end())
|
|
|
|
LOffs = *I;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-05-07 13:51:13 +08:00
|
|
|
// If we exited because we found a nearest common ancestor, compare the
|
|
|
|
// locations within the common file and cache them.
|
|
|
|
if (LOffs.first == ROffs.first) {
|
|
|
|
IsBeforeInTUCache.setCommonLoc(LOffs.first, LOffs.second, ROffs.second);
|
|
|
|
return IsBeforeInTUCache.getCachedResult(LOffs.second, ROffs.second);
|
2009-06-24 06:01:48 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2015-03-17 01:54:54 +08:00
|
|
|
// If we arrived here, the location is either in a built-ins buffer or
|
|
|
|
// associated with global inline asm. PR5662 and PR22576 are examples.
|
|
|
|
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
// Clear the lookup cache, it depends on a common location.
|
2011-08-17 08:31:18 +08:00
|
|
|
IsBeforeInTUCache.clear();
|
2015-03-17 01:54:54 +08:00
|
|
|
llvm::MemoryBuffer *LBuf = getBuffer(LOffs.first);
|
|
|
|
llvm::MemoryBuffer *RBuf = getBuffer(ROffs.first);
|
|
|
|
bool LIsBuiltins = strcmp("<built-in>", LBuf->getBufferIdentifier()) == 0;
|
|
|
|
bool RIsBuiltins = strcmp("<built-in>", RBuf->getBufferIdentifier()) == 0;
|
|
|
|
// Sort built-in before non-built-in.
|
|
|
|
if (LIsBuiltins || RIsBuiltins) {
|
|
|
|
if (LIsBuiltins != RIsBuiltins)
|
|
|
|
return LIsBuiltins;
|
|
|
|
// Both are in built-in buffers, but from different files. We just claim that
|
|
|
|
// lower IDs come first.
|
|
|
|
return LOffs.first < ROffs.first;
|
|
|
|
}
|
|
|
|
bool LIsAsm = strcmp("<inline asm>", LBuf->getBufferIdentifier()) == 0;
|
|
|
|
bool RIsAsm = strcmp("<inline asm>", RBuf->getBufferIdentifier()) == 0;
|
|
|
|
// Sort assembler after built-ins, but before the rest.
|
|
|
|
if (LIsAsm || RIsAsm) {
|
|
|
|
if (LIsAsm != RIsAsm)
|
|
|
|
return RIsAsm;
|
|
|
|
assert(LOffs.first == ROffs.first);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
llvm_unreachable("Unsortable locations found");
|
2009-06-24 06:01:48 +08:00
|
|
|
}
|
2009-01-26 08:43:02 +08:00
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
void SourceManager::PrintStats() const {
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << "\n*** Source Manager Stats:\n";
|
|
|
|
llvm::errs() << FileInfos.size() << " files mapped, " << MemBufferInfos.size()
|
|
|
|
<< " mem buffers mapped.\n";
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
llvm::errs() << LocalSLocEntryTable.size() << " local SLocEntry's allocated ("
|
2011-07-28 02:41:16 +08:00
|
|
|
<< llvm::capacity_in_bytes(LocalSLocEntryTable)
|
2011-07-07 11:40:24 +08:00
|
|
|
<< " bytes of capacity), "
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
<< NextLocalOffset << "B of Sloc address space used.\n";
|
|
|
|
llvm::errs() << LoadedSLocEntryTable.size()
|
|
|
|
<< " loaded SLocEntries allocated, "
|
2011-08-17 08:31:20 +08:00
|
|
|
<< MaxLoadedOffset - CurrentLoadedOffset
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
llvm-svn: 135484
2011-07-20 00:10:42 +08:00
|
|
|
<< "B of Sloc address space used.\n";
|
|
|
|
|
2006-06-18 13:43:12 +08:00
|
|
|
unsigned NumLineNumsComputed = 0;
|
|
|
|
unsigned NumFileBytesMapped = 0;
|
2009-02-03 15:30:45 +08:00
|
|
|
for (fileinfo_iterator I = fileinfo_begin(), E = fileinfo_end(); I != E; ++I){
|
2014-05-08 14:41:40 +08:00
|
|
|
NumLineNumsComputed += I->second->SourceLineCache != nullptr;
|
2009-02-03 15:30:45 +08:00
|
|
|
NumFileBytesMapped += I->second->getSizeBytesMapped();
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
2011-09-26 16:01:50 +08:00
|
|
|
unsigned NumMacroArgsComputed = MacroArgsCacheMap.size();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << NumFileBytesMapped << " bytes of files mapped, "
|
2011-08-22 07:33:04 +08:00
|
|
|
<< NumLineNumsComputed << " files with line #'s computed, "
|
|
|
|
<< NumMacroArgsComputed << " files with macro args computed.\n";
|
2009-08-23 20:08:50 +08:00
|
|
|
llvm::errs() << "FileID scans: " << NumLinearScans << " linear, "
|
|
|
|
<< NumBinaryProbes << " binary.\n";
|
2006-06-18 13:43:12 +08:00
|
|
|
}
|
2009-04-27 14:38:32 +08:00
|
|
|
|
2015-08-13 08:45:11 +08:00
|
|
|
LLVM_DUMP_METHOD void SourceManager::dump() const {
|
|
|
|
llvm::raw_ostream &out = llvm::errs();
|
|
|
|
|
|
|
|
auto DumpSLocEntry = [&](int ID, const SrcMgr::SLocEntry &Entry,
|
|
|
|
llvm::Optional<unsigned> NextStart) {
|
|
|
|
out << "SLocEntry <FileID " << ID << "> " << (Entry.isFile() ? "file" : "expansion")
|
|
|
|
<< " <SourceLocation " << Entry.getOffset() << ":";
|
|
|
|
if (NextStart)
|
|
|
|
out << *NextStart << ">\n";
|
|
|
|
else
|
|
|
|
out << "???\?>\n";
|
|
|
|
if (Entry.isFile()) {
|
|
|
|
auto &FI = Entry.getFile();
|
|
|
|
if (FI.NumCreatedFIDs)
|
|
|
|
out << " covers <FileID " << ID << ":" << int(ID + FI.NumCreatedFIDs)
|
|
|
|
<< ">\n";
|
|
|
|
if (FI.getIncludeLoc().isValid())
|
|
|
|
out << " included from " << FI.getIncludeLoc().getOffset() << "\n";
|
|
|
|
if (auto *CC = FI.getContentCache()) {
|
|
|
|
out << " for " << (CC->OrigEntry ? CC->OrigEntry->getName() : "<none>")
|
|
|
|
<< "\n";
|
|
|
|
if (CC->BufferOverridden)
|
|
|
|
out << " contents overridden\n";
|
|
|
|
if (CC->ContentsEntry != CC->OrigEntry) {
|
|
|
|
out << " contents from "
|
|
|
|
<< (CC->ContentsEntry ? CC->ContentsEntry->getName() : "<none>")
|
|
|
|
<< "\n";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
auto &EI = Entry.getExpansion();
|
|
|
|
out << " spelling from " << EI.getSpellingLoc().getOffset() << "\n";
|
|
|
|
out << " macro " << (EI.isMacroArgExpansion() ? "arg" : "body")
|
|
|
|
<< " range <" << EI.getExpansionLocStart().getOffset() << ":"
|
|
|
|
<< EI.getExpansionLocEnd().getOffset() << ">\n";
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Dump local SLocEntries.
|
|
|
|
for (unsigned ID = 0, NumIDs = LocalSLocEntryTable.size(); ID != NumIDs; ++ID) {
|
|
|
|
DumpSLocEntry(ID, LocalSLocEntryTable[ID],
|
|
|
|
ID == NumIDs - 1 ? NextLocalOffset
|
|
|
|
: LocalSLocEntryTable[ID + 1].getOffset());
|
|
|
|
}
|
|
|
|
// Dump loaded SLocEntries.
|
|
|
|
llvm::Optional<unsigned> NextStart;
|
|
|
|
for (unsigned Index = 0; Index != LoadedSLocEntryTable.size(); ++Index) {
|
|
|
|
int ID = -(int)Index - 2;
|
|
|
|
if (SLocEntryLoaded[Index]) {
|
|
|
|
DumpSLocEntry(ID, LoadedSLocEntryTable[Index], NextStart);
|
|
|
|
NextStart = LoadedSLocEntryTable[Index].getOffset();
|
|
|
|
} else {
|
|
|
|
NextStart = None;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-20 21:23:58 +08:00
|
|
|
ExternalSLocEntrySource::~ExternalSLocEntrySource() { }
|
2011-04-29 04:36:42 +08:00
|
|
|
|
|
|
|
/// Return the amount of memory used by memory buffers, breaking down
|
|
|
|
/// by heap-backed versus mmap'ed memory.
|
|
|
|
SourceManager::MemoryBufferSizes SourceManager::getMemoryBufferSizes() const {
|
|
|
|
size_t malloc_bytes = 0;
|
|
|
|
size_t mmap_bytes = 0;
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = MemBufferInfos.size(); i != e; ++i)
|
|
|
|
if (size_t sized_mapped = MemBufferInfos[i]->getSizeBytesMapped())
|
|
|
|
switch (MemBufferInfos[i]->getMemoryBufferKind()) {
|
|
|
|
case llvm::MemoryBuffer::MemoryBuffer_MMap:
|
|
|
|
mmap_bytes += sized_mapped;
|
|
|
|
break;
|
|
|
|
case llvm::MemoryBuffer::MemoryBuffer_Malloc:
|
|
|
|
malloc_bytes += sized_mapped;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return MemoryBufferSizes(malloc_bytes, mmap_bytes);
|
|
|
|
}
|
|
|
|
|
2011-07-27 07:46:06 +08:00
|
|
|
size_t SourceManager::getDataStructureSizes() const {
|
2012-05-04 05:50:39 +08:00
|
|
|
size_t size = llvm::capacity_in_bytes(MemBufferInfos)
|
2011-07-28 02:41:16 +08:00
|
|
|
+ llvm::capacity_in_bytes(LocalSLocEntryTable)
|
|
|
|
+ llvm::capacity_in_bytes(LoadedSLocEntryTable)
|
|
|
|
+ llvm::capacity_in_bytes(SLocEntryLoaded)
|
2012-05-04 05:50:39 +08:00
|
|
|
+ llvm::capacity_in_bytes(FileInfos);
|
|
|
|
|
|
|
|
if (OverriddenFilesInfo)
|
|
|
|
size += llvm::capacity_in_bytes(OverriddenFilesInfo->OverriddenFiles);
|
|
|
|
|
|
|
|
return size;
|
2011-07-27 07:46:06 +08:00
|
|
|
}
|