2008-10-21 08:54:44 +08:00
|
|
|
//===--- CacheTokens.cpp - Caching of lexer tokens for PCH support --------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This provides a possible implementation of PCH support for Clang that is
|
|
|
|
// based on caching lexed tokens and identifiers.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "clang.h"
|
|
|
|
#include "clang/Basic/FileManager.h"
|
|
|
|
#include "clang/Basic/SourceManager.h"
|
|
|
|
#include "clang/Basic/IdentifierTable.h"
|
|
|
|
#include "clang/Basic/Diagnostic.h"
|
|
|
|
#include "clang/Lex/Lexer.h"
|
|
|
|
#include "clang/Lex/Preprocessor.h"
|
2009-01-08 10:44:06 +08:00
|
|
|
#include "llvm/ADT/StringMap.h"
|
2008-10-21 08:54:44 +08:00
|
|
|
#include "llvm/Support/MemoryBuffer.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2008-12-03 03:44:08 +08:00
|
|
|
#include "llvm/System/Path.h"
|
2009-01-08 09:17:37 +08:00
|
|
|
#include "llvm/Support/Compiler.h"
|
2009-01-16 02:47:46 +08:00
|
|
|
#include "llvm/Support/Streams.h"
|
2008-10-21 08:54:44 +08:00
|
|
|
|
|
|
|
using namespace clang;
|
|
|
|
|
2008-12-03 03:44:08 +08:00
|
|
|
typedef uint32_t Offset;
|
|
|
|
|
2009-02-10 09:06:17 +08:00
|
|
|
static void Emit8(llvm::raw_ostream& Out, uint32_t V) {
|
|
|
|
Out << (unsigned char)(V);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void Emit16(llvm::raw_ostream& Out, uint32_t V) {
|
|
|
|
Out << (unsigned char)(V);
|
|
|
|
Out << (unsigned char)(V >> 8);
|
|
|
|
assert((V >> 16) == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void Emit32(llvm::raw_ostream& Out, uint32_t V) {
|
|
|
|
Out << (unsigned char)(V);
|
|
|
|
Out << (unsigned char)(V >> 8);
|
|
|
|
Out << (unsigned char)(V >> 16);
|
|
|
|
Out << (unsigned char)(V >> 24);
|
|
|
|
}
|
|
|
|
|
2009-02-12 11:26:59 +08:00
|
|
|
static void Emit64(llvm::raw_ostream& Out, uint64_t V) {
|
|
|
|
Out << (unsigned char)(V);
|
|
|
|
Out << (unsigned char)(V >> 8);
|
|
|
|
Out << (unsigned char)(V >> 16);
|
|
|
|
Out << (unsigned char)(V >> 24);
|
|
|
|
Out << (unsigned char)(V >> 32);
|
|
|
|
Out << (unsigned char)(V >> 40);
|
|
|
|
Out << (unsigned char)(V >> 48);
|
|
|
|
Out << (unsigned char)(V >> 56);
|
|
|
|
}
|
|
|
|
|
2009-02-11 06:16:22 +08:00
|
|
|
static void Pad(llvm::raw_fd_ostream& Out, unsigned A) {
|
|
|
|
Offset off = (Offset) Out.tell();
|
|
|
|
uint32_t n = ((uintptr_t)(off+A-1) & ~(uintptr_t)(A-1)) - off;
|
|
|
|
for ( ; n ; --n ) Emit8(Out, 0);
|
2009-02-10 09:06:17 +08:00
|
|
|
}
|
|
|
|
|
2009-02-12 05:29:16 +08:00
|
|
|
// Bernstein hash function:
|
|
|
|
// This is basically copy-and-paste from StringMap. This likely won't
|
|
|
|
// stay here, which is why I didn't both to expose this function from
|
|
|
|
// String Map.
|
|
|
|
static unsigned BernsteinHash(const char* x) {
|
|
|
|
unsigned int R = 0;
|
|
|
|
for ( ; *x != '\0' ; ++x) R = R * 33 + *x;
|
|
|
|
return R + (R >> 5);
|
|
|
|
}
|
|
|
|
|
2009-02-10 09:14:45 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// On Disk Hashtable Logic. This will eventually get refactored and put
|
|
|
|
// elsewhere.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
template<typename Info>
|
|
|
|
class OnDiskChainedHashTableGenerator {
|
|
|
|
unsigned NumBuckets;
|
|
|
|
unsigned NumEntries;
|
|
|
|
llvm::BumpPtrAllocator BA;
|
|
|
|
|
|
|
|
class Item {
|
|
|
|
public:
|
2009-02-11 06:16:22 +08:00
|
|
|
typename Info::key_type key;
|
|
|
|
typename Info::data_type data;
|
2009-02-10 09:14:45 +08:00
|
|
|
Item *next;
|
|
|
|
const uint32_t hash;
|
|
|
|
|
2009-02-11 06:16:22 +08:00
|
|
|
Item(typename Info::key_type_ref k, typename Info::data_type_ref d)
|
|
|
|
: key(k), data(d), next(0), hash(Info::ComputeHash(k)) {}
|
2009-02-10 09:14:45 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
class Bucket {
|
|
|
|
public:
|
|
|
|
Offset off;
|
|
|
|
Item* head;
|
|
|
|
unsigned length;
|
|
|
|
|
|
|
|
Bucket() {}
|
|
|
|
};
|
|
|
|
|
|
|
|
Bucket* Buckets;
|
|
|
|
|
|
|
|
private:
|
2009-02-11 06:16:22 +08:00
|
|
|
void insert(Bucket* b, size_t size, Item* E) {
|
2009-02-10 09:14:45 +08:00
|
|
|
unsigned idx = E->hash & (size - 1);
|
|
|
|
Bucket& B = b[idx];
|
|
|
|
E->next = B.head;
|
|
|
|
++B.length;
|
|
|
|
B.head = E;
|
|
|
|
}
|
|
|
|
|
|
|
|
void resize(size_t newsize) {
|
2009-02-11 06:16:22 +08:00
|
|
|
Bucket* newBuckets = (Bucket*) calloc(newsize, sizeof(Bucket));
|
|
|
|
// Populate newBuckets with the old entries.
|
2009-02-10 09:14:45 +08:00
|
|
|
for (unsigned i = 0; i < NumBuckets; ++i)
|
2009-02-11 06:16:22 +08:00
|
|
|
for (Item* E = Buckets[i].head; E ; ) {
|
2009-02-10 09:14:45 +08:00
|
|
|
Item* N = E->next;
|
2009-02-11 06:16:22 +08:00
|
|
|
E->next = 0;
|
2009-02-10 09:14:45 +08:00
|
|
|
insert(newBuckets, newsize, E);
|
|
|
|
E = N;
|
|
|
|
}
|
|
|
|
|
|
|
|
free(Buckets);
|
|
|
|
NumBuckets = newsize;
|
|
|
|
Buckets = newBuckets;
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
2009-02-11 06:16:22 +08:00
|
|
|
void insert(typename Info::key_type_ref key,
|
|
|
|
typename Info::data_type_ref data) {
|
|
|
|
|
2009-02-10 09:14:45 +08:00
|
|
|
++NumEntries;
|
|
|
|
if (4*NumEntries >= 3*NumBuckets) resize(NumBuckets*2);
|
|
|
|
insert(Buckets, NumBuckets, new (BA.Allocate<Item>()) Item(key, data));
|
|
|
|
}
|
|
|
|
|
|
|
|
Offset Emit(llvm::raw_fd_ostream& out) {
|
|
|
|
// Emit the payload of the table.
|
|
|
|
for (unsigned i = 0; i < NumBuckets; ++i) {
|
|
|
|
Bucket& B = Buckets[i];
|
|
|
|
if (!B.head) continue;
|
|
|
|
|
|
|
|
// Store the offset for the data of this bucket.
|
|
|
|
B.off = out.tell();
|
|
|
|
|
2009-02-11 06:16:22 +08:00
|
|
|
// Write out the number of items in the bucket.
|
|
|
|
Emit16(out, B.length);
|
2009-02-10 09:14:45 +08:00
|
|
|
|
|
|
|
// Write out the entries in the bucket.
|
|
|
|
for (Item *I = B.head; I ; I = I->next) {
|
|
|
|
Emit32(out, I->hash);
|
2009-02-11 06:16:22 +08:00
|
|
|
const std::pair<unsigned, unsigned>& Len =
|
|
|
|
Info::EmitKeyDataLength(out, I->key, I->data);
|
|
|
|
Info::EmitKey(out, I->key, Len.first);
|
2009-02-12 11:26:59 +08:00
|
|
|
Info::EmitData(out, I->key, I->data, Len.second);
|
2009-02-10 09:14:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Emit the hashtable itself.
|
|
|
|
Pad(out, 4);
|
|
|
|
Offset TableOff = out.tell();
|
2009-02-11 06:16:22 +08:00
|
|
|
Emit32(out, NumBuckets);
|
|
|
|
Emit32(out, NumEntries);
|
2009-02-10 09:14:45 +08:00
|
|
|
for (unsigned i = 0; i < NumBuckets; ++i) Emit32(out, Buckets[i].off);
|
|
|
|
|
|
|
|
return TableOff;
|
|
|
|
}
|
|
|
|
|
|
|
|
OnDiskChainedHashTableGenerator() {
|
|
|
|
NumEntries = 0;
|
2009-02-11 06:16:22 +08:00
|
|
|
NumBuckets = 64;
|
|
|
|
// Note that we do not need to run the constructors of the individual
|
|
|
|
// Bucket objects since 'calloc' returns bytes that are all 0.
|
|
|
|
Buckets = (Bucket*) calloc(NumBuckets, sizeof(Bucket));
|
2009-02-10 09:14:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
~OnDiskChainedHashTableGenerator() {
|
|
|
|
free(Buckets);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// PTH-specific stuff.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2009-01-08 10:44:06 +08:00
|
|
|
namespace {
|
|
|
|
class VISIBILITY_HIDDEN PCHEntry {
|
|
|
|
Offset TokenData, PPCondData;
|
|
|
|
|
|
|
|
public:
|
|
|
|
PCHEntry() {}
|
|
|
|
|
2009-01-27 08:01:05 +08:00
|
|
|
PCHEntry(Offset td, Offset ppcd)
|
|
|
|
: TokenData(td), PPCondData(ppcd) {}
|
2009-01-08 10:44:06 +08:00
|
|
|
|
2009-01-27 08:01:05 +08:00
|
|
|
Offset getTokenOffset() const { return TokenData; }
|
2009-01-08 10:44:06 +08:00
|
|
|
Offset getPPCondTableOffset() const { return PPCondData; }
|
2009-01-27 08:01:05 +08:00
|
|
|
};
|
2009-01-08 10:44:06 +08:00
|
|
|
|
2009-02-11 06:16:22 +08:00
|
|
|
class VISIBILITY_HIDDEN FileEntryPCHEntryInfo {
|
|
|
|
public:
|
|
|
|
typedef const FileEntry* key_type;
|
|
|
|
typedef key_type key_type_ref;
|
|
|
|
|
|
|
|
typedef PCHEntry data_type;
|
|
|
|
typedef const PCHEntry& data_type_ref;
|
|
|
|
|
|
|
|
static unsigned ComputeHash(const FileEntry* FE) {
|
2009-02-12 05:29:16 +08:00
|
|
|
return BernsteinHash(FE->getName());
|
2009-02-11 06:16:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static std::pair<unsigned,unsigned>
|
|
|
|
EmitKeyDataLength(llvm::raw_ostream& Out, const FileEntry* FE,
|
|
|
|
const PCHEntry& E) {
|
|
|
|
|
|
|
|
unsigned n = strlen(FE->getName()) + 1;
|
|
|
|
::Emit16(Out, n);
|
2009-02-12 11:26:59 +08:00
|
|
|
return std::make_pair(n,(4*2)+(4+4+2+8+8));
|
2009-02-11 06:16:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void EmitKey(llvm::raw_ostream& Out, const FileEntry* FE, unsigned n) {
|
|
|
|
Out.write(FE->getName(), n);
|
|
|
|
}
|
|
|
|
|
2009-02-12 11:26:59 +08:00
|
|
|
static void EmitData(llvm::raw_ostream& Out, const FileEntry* FE,
|
|
|
|
const PCHEntry& E, unsigned) {
|
2009-02-11 06:16:22 +08:00
|
|
|
::Emit32(Out, E.getTokenOffset());
|
|
|
|
::Emit32(Out, E.getPPCondTableOffset());
|
2009-02-12 11:26:59 +08:00
|
|
|
// Emit stat information.
|
|
|
|
::Emit32(Out, FE->getInode());
|
|
|
|
::Emit32(Out, FE->getDevice());
|
|
|
|
::Emit16(Out, FE->getFileMode());
|
|
|
|
::Emit64(Out, FE->getModificationTime());
|
|
|
|
::Emit64(Out, FE->getSize());
|
2009-02-11 06:16:22 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2009-01-27 08:01:05 +08:00
|
|
|
class OffsetOpt {
|
|
|
|
bool valid;
|
|
|
|
Offset off;
|
|
|
|
public:
|
|
|
|
OffsetOpt() : valid(false) {}
|
|
|
|
bool hasOffset() const { return valid; }
|
|
|
|
Offset getOffset() const { assert(valid); return off; }
|
|
|
|
void setOffset(Offset o) { off = o; valid = true; }
|
2009-01-08 10:44:06 +08:00
|
|
|
};
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
2009-02-11 06:16:22 +08:00
|
|
|
typedef OnDiskChainedHashTableGenerator<FileEntryPCHEntryInfo> PCHMap;
|
2008-12-03 03:44:08 +08:00
|
|
|
typedef llvm::DenseMap<const IdentifierInfo*,uint32_t> IDMap;
|
2009-01-27 08:01:05 +08:00
|
|
|
typedef llvm::StringMap<OffsetOpt, llvm::BumpPtrAllocator> CachedStrsTy;
|
2008-10-21 08:54:44 +08:00
|
|
|
|
2009-01-08 09:17:37 +08:00
|
|
|
namespace {
|
|
|
|
class VISIBILITY_HIDDEN PTHWriter {
|
|
|
|
IDMap IM;
|
|
|
|
llvm::raw_fd_ostream& Out;
|
|
|
|
Preprocessor& PP;
|
|
|
|
uint32_t idcount;
|
|
|
|
PCHMap PM;
|
2009-01-08 10:44:06 +08:00
|
|
|
CachedStrsTy CachedStrs;
|
2009-01-27 08:01:05 +08:00
|
|
|
Offset CurStrOffset;
|
|
|
|
std::vector<llvm::StringMapEntry<OffsetOpt>*> StrEntries;
|
2008-10-21 08:54:44 +08:00
|
|
|
|
2009-01-08 09:17:37 +08:00
|
|
|
//// Get the persistent id for the given IdentifierInfo*.
|
|
|
|
uint32_t ResolveID(const IdentifierInfo* II);
|
|
|
|
|
|
|
|
/// Emit a token to the PTH file.
|
|
|
|
void EmitToken(const Token& T);
|
2008-10-21 08:54:44 +08:00
|
|
|
|
2009-01-08 09:17:37 +08:00
|
|
|
void Emit8(uint32_t V) {
|
|
|
|
Out << (unsigned char)(V);
|
|
|
|
}
|
|
|
|
|
2009-02-10 09:06:17 +08:00
|
|
|
void Emit16(uint32_t V) { ::Emit16(Out, V); }
|
2009-01-08 09:17:37 +08:00
|
|
|
|
|
|
|
void Emit24(uint32_t V) {
|
|
|
|
Out << (unsigned char)(V);
|
|
|
|
Out << (unsigned char)(V >> 8);
|
|
|
|
Out << (unsigned char)(V >> 16);
|
|
|
|
assert((V >> 24) == 0);
|
|
|
|
}
|
2008-12-24 02:41:34 +08:00
|
|
|
|
2009-02-10 09:06:17 +08:00
|
|
|
void Emit32(uint32_t V) { ::Emit32(Out, V); }
|
|
|
|
|
2009-01-08 09:17:37 +08:00
|
|
|
void EmitBuf(const char* I, const char* E) {
|
|
|
|
for ( ; I != E ; ++I) Out << *I;
|
|
|
|
}
|
|
|
|
|
2009-02-12 05:29:16 +08:00
|
|
|
/// EmitIdentifierTable - Emits two tables to the PTH file. The first is
|
|
|
|
/// a hashtable mapping from identifier strings to persistent IDs.
|
|
|
|
/// The second is a straight table mapping from persistent IDs to string data
|
|
|
|
/// (the keys of the first table).
|
2009-02-12 00:06:55 +08:00
|
|
|
std::pair<Offset, Offset> EmitIdentifierTable();
|
|
|
|
|
|
|
|
/// EmitFileTable - Emit a table mapping from file name strings to PTH
|
|
|
|
/// token data.
|
|
|
|
Offset EmitFileTable() { return PM.Emit(Out); }
|
|
|
|
|
2009-01-08 10:44:06 +08:00
|
|
|
PCHEntry LexTokens(Lexer& L);
|
2009-01-27 08:01:05 +08:00
|
|
|
Offset EmitCachedSpellings();
|
2009-01-08 10:44:06 +08:00
|
|
|
|
2009-01-08 09:17:37 +08:00
|
|
|
public:
|
|
|
|
PTHWriter(llvm::raw_fd_ostream& out, Preprocessor& pp)
|
2009-01-27 08:01:05 +08:00
|
|
|
: Out(out), PP(pp), idcount(0), CurStrOffset(0) {}
|
2009-01-08 09:17:37 +08:00
|
|
|
|
|
|
|
void GeneratePTH();
|
|
|
|
};
|
|
|
|
} // end anonymous namespace
|
2008-12-03 03:44:08 +08:00
|
|
|
|
2009-01-08 09:17:37 +08:00
|
|
|
uint32_t PTHWriter::ResolveID(const IdentifierInfo* II) {
|
2008-12-03 03:44:08 +08:00
|
|
|
// Null IdentifierInfo's map to the persistent ID 0.
|
|
|
|
if (!II)
|
|
|
|
return 0;
|
|
|
|
|
2008-10-21 08:54:44 +08:00
|
|
|
IDMap::iterator I = IM.find(II);
|
|
|
|
|
|
|
|
if (I == IM.end()) {
|
2009-01-08 09:17:37 +08:00
|
|
|
IM[II] = ++idcount; // Pre-increment since '0' is reserved for NULL.
|
|
|
|
return idcount;
|
2008-10-21 08:54:44 +08:00
|
|
|
}
|
|
|
|
|
2008-12-03 03:44:08 +08:00
|
|
|
return I->second; // We've already added 1.
|
2008-10-21 08:54:44 +08:00
|
|
|
}
|
|
|
|
|
2009-01-08 09:17:37 +08:00
|
|
|
void PTHWriter::EmitToken(const Token& T) {
|
2009-01-20 07:13:15 +08:00
|
|
|
Emit32(((uint32_t) T.getKind()) |
|
|
|
|
(((uint32_t) T.getFlags()) << 8) |
|
|
|
|
(((uint32_t) T.getLength()) << 16));
|
2009-01-27 08:01:05 +08:00
|
|
|
|
2009-01-27 03:29:26 +08:00
|
|
|
// Literals (strings, numbers, characters) get cached spellings.
|
|
|
|
if (T.isLiteral()) {
|
|
|
|
// FIXME: This uses the slow getSpelling(). Perhaps we do better
|
|
|
|
// in the future? This only slows down PTH generation.
|
|
|
|
const std::string &spelling = PP.getSpelling(T);
|
|
|
|
const char* s = spelling.c_str();
|
|
|
|
|
|
|
|
// Get the string entry.
|
2009-01-27 08:01:05 +08:00
|
|
|
llvm::StringMapEntry<OffsetOpt> *E =
|
|
|
|
&CachedStrs.GetOrCreateValue(s, s+spelling.size());
|
|
|
|
|
|
|
|
if (!E->getValue().hasOffset()) {
|
|
|
|
E->getValue().setOffset(CurStrOffset);
|
|
|
|
StrEntries.push_back(E);
|
|
|
|
CurStrOffset += spelling.size() + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
Emit32(E->getValue().getOffset());
|
2009-01-08 09:17:37 +08:00
|
|
|
}
|
2009-01-27 08:01:05 +08:00
|
|
|
else
|
|
|
|
Emit32(ResolveID(T.getIdentifierInfo()));
|
|
|
|
|
2009-01-27 14:27:13 +08:00
|
|
|
Emit32(PP.getSourceManager().getFileOffset(T.getLocation()));
|
2008-10-21 08:54:44 +08:00
|
|
|
}
|
|
|
|
|
2009-01-08 10:44:06 +08:00
|
|
|
PCHEntry PTHWriter::LexTokens(Lexer& L) {
|
2009-01-20 07:13:15 +08:00
|
|
|
// Pad 0's so that we emit tokens to a 4-byte alignment.
|
|
|
|
// This speed up reading them back in.
|
2009-02-10 09:06:17 +08:00
|
|
|
Pad(Out, 4);
|
|
|
|
Offset off = (Offset) Out.tell();
|
2008-12-12 07:36:38 +08:00
|
|
|
|
|
|
|
// Keep track of matching '#if' ... '#endif'.
|
|
|
|
typedef std::vector<std::pair<Offset, unsigned> > PPCondTable;
|
|
|
|
PPCondTable PPCond;
|
2008-12-13 02:31:09 +08:00
|
|
|
std::vector<unsigned> PPStartCond;
|
2008-12-23 09:30:52 +08:00
|
|
|
bool ParsingPreprocessorDirective = false;
|
2008-11-26 11:36:26 +08:00
|
|
|
Token Tok;
|
|
|
|
|
|
|
|
do {
|
|
|
|
L.LexFromRawLexer(Tok);
|
2009-02-11 06:43:16 +08:00
|
|
|
NextToken:
|
|
|
|
|
2008-12-23 09:30:52 +08:00
|
|
|
if ((Tok.isAtStartOfLine() || Tok.is(tok::eof)) &&
|
|
|
|
ParsingPreprocessorDirective) {
|
|
|
|
// Insert an eom token into the token cache. It has the same
|
|
|
|
// position as the next token that is not on the same line as the
|
|
|
|
// preprocessor directive. Observe that we continue processing
|
|
|
|
// 'Tok' when we exit this branch.
|
|
|
|
Token Tmp = Tok;
|
|
|
|
Tmp.setKind(tok::eom);
|
|
|
|
Tmp.clearFlag(Token::StartOfLine);
|
|
|
|
Tmp.setIdentifierInfo(0);
|
2009-01-08 09:17:37 +08:00
|
|
|
EmitToken(Tmp);
|
2008-12-23 09:30:52 +08:00
|
|
|
ParsingPreprocessorDirective = false;
|
|
|
|
}
|
|
|
|
|
2008-11-26 11:36:26 +08:00
|
|
|
if (Tok.is(tok::identifier)) {
|
|
|
|
Tok.setIdentifierInfo(PP.LookUpIdentifierInfo(Tok));
|
2009-02-11 06:27:09 +08:00
|
|
|
EmitToken(Tok);
|
2008-12-23 09:30:52 +08:00
|
|
|
continue;
|
2008-11-26 11:36:26 +08:00
|
|
|
}
|
2008-12-23 09:30:52 +08:00
|
|
|
|
|
|
|
if (Tok.is(tok::hash) && Tok.isAtStartOfLine()) {
|
2008-11-26 11:36:26 +08:00
|
|
|
// Special processing for #include. Store the '#' token and lex
|
|
|
|
// the next token.
|
2008-12-23 09:30:52 +08:00
|
|
|
assert(!ParsingPreprocessorDirective);
|
2008-12-12 07:36:38 +08:00
|
|
|
Offset HashOff = (Offset) Out.tell();
|
2009-01-08 09:17:37 +08:00
|
|
|
EmitToken(Tok);
|
2008-12-13 02:31:09 +08:00
|
|
|
|
|
|
|
// Get the next token.
|
2008-11-26 11:36:26 +08:00
|
|
|
L.LexFromRawLexer(Tok);
|
2008-12-23 09:30:52 +08:00
|
|
|
|
|
|
|
assert(!Tok.isAtStartOfLine());
|
2008-11-26 11:36:26 +08:00
|
|
|
|
|
|
|
// Did we see 'include'/'import'/'include_next'?
|
2009-02-11 06:27:09 +08:00
|
|
|
if (!Tok.is(tok::identifier)) {
|
|
|
|
EmitToken(Tok);
|
2008-11-26 11:36:26 +08:00
|
|
|
continue;
|
2009-02-11 06:27:09 +08:00
|
|
|
}
|
2008-11-26 11:36:26 +08:00
|
|
|
|
|
|
|
IdentifierInfo* II = PP.LookUpIdentifierInfo(Tok);
|
|
|
|
Tok.setIdentifierInfo(II);
|
|
|
|
tok::PPKeywordKind K = II->getPPKeywordID();
|
|
|
|
|
2008-12-23 09:30:52 +08:00
|
|
|
assert(K != tok::pp_not_keyword);
|
|
|
|
ParsingPreprocessorDirective = true;
|
|
|
|
|
|
|
|
switch (K) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case tok::pp_include:
|
|
|
|
case tok::pp_import:
|
|
|
|
case tok::pp_include_next: {
|
2008-11-26 11:36:26 +08:00
|
|
|
// Save the 'include' token.
|
2009-01-08 09:17:37 +08:00
|
|
|
EmitToken(Tok);
|
2008-11-26 11:36:26 +08:00
|
|
|
// Lex the next token as an include string.
|
|
|
|
L.setParsingPreprocessorDirective(true);
|
|
|
|
L.LexIncludeFilename(Tok);
|
|
|
|
L.setParsingPreprocessorDirective(false);
|
2008-12-23 09:30:52 +08:00
|
|
|
assert(!Tok.isAtStartOfLine());
|
2008-11-26 11:36:26 +08:00
|
|
|
if (Tok.is(tok::identifier))
|
|
|
|
Tok.setIdentifierInfo(PP.LookUpIdentifierInfo(Tok));
|
2008-12-23 09:30:52 +08:00
|
|
|
|
|
|
|
break;
|
2008-11-26 11:36:26 +08:00
|
|
|
}
|
2008-12-23 09:30:52 +08:00
|
|
|
case tok::pp_if:
|
|
|
|
case tok::pp_ifdef:
|
|
|
|
case tok::pp_ifndef: {
|
2009-02-11 06:27:09 +08:00
|
|
|
// Add an entry for '#if' and friends. We initially set the target
|
|
|
|
// index to 0. This will get backpatched when we hit #endif.
|
2008-12-12 07:36:38 +08:00
|
|
|
PPStartCond.push_back(PPCond.size());
|
2008-12-13 02:31:09 +08:00
|
|
|
PPCond.push_back(std::make_pair(HashOff, 0U));
|
2008-12-23 09:30:52 +08:00
|
|
|
break;
|
2008-12-12 07:36:38 +08:00
|
|
|
}
|
2008-12-23 09:30:52 +08:00
|
|
|
case tok::pp_endif: {
|
2008-12-12 07:36:38 +08:00
|
|
|
// Add an entry for '#endif'. We set the target table index to itself.
|
2008-12-13 02:31:09 +08:00
|
|
|
// This will later be set to zero when emitting to the PTH file. We
|
|
|
|
// use 0 for uninitialized indices because that is easier to debug.
|
2008-12-12 07:36:38 +08:00
|
|
|
unsigned index = PPCond.size();
|
|
|
|
// Backpatch the opening '#if' entry.
|
2008-12-13 02:31:09 +08:00
|
|
|
assert(!PPStartCond.empty());
|
|
|
|
assert(PPCond.size() > PPStartCond.back());
|
2008-12-12 07:36:38 +08:00
|
|
|
assert(PPCond[PPStartCond.back()].second == 0);
|
|
|
|
PPCond[PPStartCond.back()].second = index;
|
|
|
|
PPStartCond.pop_back();
|
2008-12-13 02:31:09 +08:00
|
|
|
// Add the new entry to PPCond.
|
|
|
|
PPCond.push_back(std::make_pair(HashOff, index));
|
2009-02-11 06:43:16 +08:00
|
|
|
EmitToken(Tok);
|
|
|
|
|
|
|
|
// Some files have gibberish on the same line as '#endif'.
|
|
|
|
// Discard these tokens.
|
|
|
|
do L.LexFromRawLexer(Tok); while (!Tok.is(tok::eof) &&
|
|
|
|
!Tok.isAtStartOfLine());
|
|
|
|
// We have the next token in hand.
|
|
|
|
// Don't immediately lex the next one.
|
|
|
|
goto NextToken;
|
2008-12-12 07:36:38 +08:00
|
|
|
}
|
2008-12-23 09:30:52 +08:00
|
|
|
case tok::pp_elif:
|
|
|
|
case tok::pp_else: {
|
|
|
|
// Add an entry for #elif or #else.
|
2008-12-13 02:31:09 +08:00
|
|
|
// This serves as both a closing and opening of a conditional block.
|
|
|
|
// This means that its entry will get backpatched later.
|
2008-12-12 07:36:38 +08:00
|
|
|
unsigned index = PPCond.size();
|
|
|
|
// Backpatch the previous '#if' entry.
|
2008-12-13 02:31:09 +08:00
|
|
|
assert(!PPStartCond.empty());
|
|
|
|
assert(PPCond.size() > PPStartCond.back());
|
2008-12-12 07:36:38 +08:00
|
|
|
assert(PPCond[PPStartCond.back()].second == 0);
|
|
|
|
PPCond[PPStartCond.back()].second = index;
|
|
|
|
PPStartCond.pop_back();
|
|
|
|
// Now add '#elif' as a new block opening.
|
2008-12-13 02:31:09 +08:00
|
|
|
PPCond.push_back(std::make_pair(HashOff, 0U));
|
|
|
|
PPStartCond.push_back(index);
|
2008-12-23 09:30:52 +08:00
|
|
|
break;
|
|
|
|
}
|
2008-12-12 07:36:38 +08:00
|
|
|
}
|
2009-02-11 06:27:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
EmitToken(Tok);
|
2008-11-26 11:36:26 +08:00
|
|
|
}
|
2009-02-11 06:27:09 +08:00
|
|
|
while (Tok.isNot(tok::eof));
|
2009-01-08 09:17:37 +08:00
|
|
|
|
2008-12-13 02:31:09 +08:00
|
|
|
assert(PPStartCond.empty() && "Error: imblanced preprocessor conditionals.");
|
2009-01-08 09:17:37 +08:00
|
|
|
|
2008-12-12 07:36:38 +08:00
|
|
|
// Next write out PPCond.
|
|
|
|
Offset PPCondOff = (Offset) Out.tell();
|
2008-12-13 02:31:09 +08:00
|
|
|
|
|
|
|
// Write out the size of PPCond so that clients can identifer empty tables.
|
2009-01-08 09:17:37 +08:00
|
|
|
Emit32(PPCond.size());
|
2008-12-03 03:44:08 +08:00
|
|
|
|
2008-12-13 02:31:09 +08:00
|
|
|
for (unsigned i = 0, e = PPCond.size(); i!=e; ++i) {
|
2009-01-08 09:17:37 +08:00
|
|
|
Emit32(PPCond[i].first - off);
|
2008-12-13 02:31:09 +08:00
|
|
|
uint32_t x = PPCond[i].second;
|
|
|
|
assert(x != 0 && "PPCond entry not backpatched.");
|
|
|
|
// Emit zero for #endifs. This allows us to do checking when
|
|
|
|
// we read the PTH file back in.
|
2009-01-08 09:17:37 +08:00
|
|
|
Emit32(x == i ? 0 : x);
|
2008-12-12 07:36:38 +08:00
|
|
|
}
|
|
|
|
|
2009-01-27 08:01:05 +08:00
|
|
|
return PCHEntry(off, PPCondOff);
|
2009-01-08 10:44:06 +08:00
|
|
|
}
|
|
|
|
|
2009-01-27 08:01:05 +08:00
|
|
|
Offset PTHWriter::EmitCachedSpellings() {
|
|
|
|
// Write each cached strings to the PTH file.
|
|
|
|
Offset SpellingsOff = Out.tell();
|
|
|
|
|
|
|
|
for (std::vector<llvm::StringMapEntry<OffsetOpt>*>::iterator
|
|
|
|
I = StrEntries.begin(), E = StrEntries.end(); I!=E; ++I) {
|
2009-01-08 10:44:06 +08:00
|
|
|
|
2009-01-27 08:01:05 +08:00
|
|
|
const char* data = (*I)->getKeyData();
|
|
|
|
EmitBuf(data, data + (*I)->getKeyLength());
|
|
|
|
Emit8('\0');
|
2009-01-08 10:44:06 +08:00
|
|
|
}
|
|
|
|
|
2009-01-27 08:01:05 +08:00
|
|
|
return SpellingsOff;
|
2008-11-26 11:36:26 +08:00
|
|
|
}
|
2008-10-21 08:54:44 +08:00
|
|
|
|
2009-01-08 09:17:37 +08:00
|
|
|
void PTHWriter::GeneratePTH() {
|
2009-01-27 05:43:14 +08:00
|
|
|
// Generate the prologue.
|
|
|
|
Out << "cfe-pth";
|
2009-01-27 05:50:21 +08:00
|
|
|
Emit32(PTHManager::Version);
|
2009-02-12 07:34:32 +08:00
|
|
|
|
|
|
|
// Leave 4 words for the prologue.
|
|
|
|
Offset PrologueOffset = Out.tell();
|
|
|
|
for (unsigned i = 0; i < 4 * sizeof(uint32_t); ++i) Emit8(0);
|
2009-01-27 05:43:14 +08:00
|
|
|
|
2008-10-21 08:54:44 +08:00
|
|
|
// Iterate over all the files in SourceManager. Create a lexer
|
|
|
|
// for each file and cache the tokens.
|
2009-01-17 11:48:08 +08:00
|
|
|
SourceManager &SM = PP.getSourceManager();
|
|
|
|
const LangOptions &LOpts = PP.getLangOptions();
|
2008-10-21 08:54:44 +08:00
|
|
|
|
2009-01-17 11:48:08 +08:00
|
|
|
for (SourceManager::fileinfo_iterator I = SM.fileinfo_begin(),
|
|
|
|
E = SM.fileinfo_end(); I != E; ++I) {
|
2009-02-03 15:30:45 +08:00
|
|
|
const SrcMgr::ContentCache &C = *I->second;
|
2009-01-17 11:48:08 +08:00
|
|
|
const FileEntry *FE = C.Entry;
|
2008-12-03 03:44:08 +08:00
|
|
|
|
|
|
|
// FIXME: Handle files with non-absolute paths.
|
|
|
|
llvm::sys::Path P(FE->getName());
|
|
|
|
if (!P.isAbsolute())
|
|
|
|
continue;
|
2008-11-26 11:36:26 +08:00
|
|
|
|
2009-01-17 11:48:08 +08:00
|
|
|
const llvm::MemoryBuffer *B = C.getBuffer();
|
2008-11-26 11:36:26 +08:00
|
|
|
if (!B) continue;
|
2008-12-12 07:36:38 +08:00
|
|
|
|
2009-01-17 14:22:33 +08:00
|
|
|
FileID FID = SM.createFileID(FE, SourceLocation(), SrcMgr::C_User);
|
2009-01-17 15:35:14 +08:00
|
|
|
Lexer L(FID, SM, LOpts);
|
2009-02-11 06:16:22 +08:00
|
|
|
PM.insert(FE, LexTokens(L));
|
2008-11-26 10:18:33 +08:00
|
|
|
}
|
2008-11-26 11:36:26 +08:00
|
|
|
|
|
|
|
// Write out the identifier table.
|
2009-02-12 00:06:55 +08:00
|
|
|
const std::pair<Offset,Offset>& IdTableOff = EmitIdentifierTable();
|
2008-11-26 11:36:26 +08:00
|
|
|
|
2009-01-08 10:44:06 +08:00
|
|
|
// Write out the cached strings table.
|
2009-01-27 08:01:05 +08:00
|
|
|
Offset SpellingOff = EmitCachedSpellings();
|
2009-01-08 10:44:06 +08:00
|
|
|
|
2008-11-26 11:36:26 +08:00
|
|
|
// Write out the file table.
|
2009-01-08 09:17:37 +08:00
|
|
|
Offset FileTableOff = EmitFileTable();
|
2008-10-21 08:54:44 +08:00
|
|
|
|
2009-02-12 07:34:32 +08:00
|
|
|
// Finally, write the prologue.
|
|
|
|
Out.seek(PrologueOffset);
|
2009-01-08 09:17:37 +08:00
|
|
|
Emit32(IdTableOff.first);
|
2009-02-12 00:06:55 +08:00
|
|
|
Emit32(IdTableOff.second);
|
2009-01-08 09:17:37 +08:00
|
|
|
Emit32(FileTableOff);
|
2009-01-27 08:01:05 +08:00
|
|
|
Emit32(SpellingOff);
|
2009-01-08 09:17:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void clang::CacheTokens(Preprocessor& PP, const std::string& OutFile) {
|
|
|
|
// Lex through the entire file. This will populate SourceManager with
|
|
|
|
// all of the header information.
|
|
|
|
Token Tok;
|
|
|
|
PP.EnterMainSourceFile();
|
|
|
|
do { PP.Lex(Tok); } while (Tok.isNot(tok::eof));
|
|
|
|
|
|
|
|
// Open up the PTH file.
|
|
|
|
std::string ErrMsg;
|
|
|
|
llvm::raw_fd_ostream Out(OutFile.c_str(), true, ErrMsg);
|
|
|
|
|
|
|
|
if (!ErrMsg.empty()) {
|
|
|
|
llvm::errs() << "PTH error: " << ErrMsg << "\n";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the PTHWriter and generate the PTH file.
|
|
|
|
PTHWriter PW(Out, PP);
|
|
|
|
PW.GeneratePTH();
|
2008-10-21 08:54:44 +08:00
|
|
|
}
|
2009-02-10 09:06:17 +08:00
|
|
|
|
2009-02-12 05:29:16 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
class VISIBILITY_HIDDEN PCHIdKey {
|
|
|
|
public:
|
|
|
|
const IdentifierInfo* II;
|
|
|
|
uint32_t FileOffset;
|
|
|
|
};
|
|
|
|
|
|
|
|
class VISIBILITY_HIDDEN PCHIdentifierTableTrait {
|
|
|
|
public:
|
|
|
|
typedef PCHIdKey* key_type;
|
|
|
|
typedef key_type key_type_ref;
|
|
|
|
|
|
|
|
typedef uint32_t data_type;
|
|
|
|
typedef data_type data_type_ref;
|
|
|
|
|
|
|
|
static unsigned ComputeHash(PCHIdKey* key) {
|
|
|
|
return BernsteinHash(key->II->getName());
|
|
|
|
}
|
|
|
|
|
|
|
|
static std::pair<unsigned,unsigned>
|
|
|
|
EmitKeyDataLength(llvm::raw_ostream& Out, const PCHIdKey* key, uint32_t) {
|
|
|
|
unsigned n = strlen(key->II->getName()) + 1;
|
|
|
|
::Emit16(Out, n);
|
|
|
|
return std::make_pair(n, sizeof(uint32_t));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void EmitKey(llvm::raw_fd_ostream& Out, PCHIdKey* key, unsigned n) {
|
|
|
|
// Record the location of the key data. This is used when generating
|
|
|
|
// the mapping from persistent IDs to strings.
|
|
|
|
key->FileOffset = Out.tell();
|
|
|
|
Out.write(key->II->getName(), n);
|
|
|
|
}
|
|
|
|
|
2009-02-12 11:26:59 +08:00
|
|
|
static void EmitData(llvm::raw_ostream& Out, PCHIdKey*, uint32_t pID,
|
|
|
|
unsigned) {
|
2009-02-12 05:29:16 +08:00
|
|
|
::Emit32(Out, pID);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
/// EmitIdentifierTable - Emits two tables to the PTH file. The first is
|
|
|
|
/// a hashtable mapping from identifier strings to persistent IDs. The second
|
|
|
|
/// is a straight table mapping from persistent IDs to string data (the
|
|
|
|
/// keys of the first table).
|
|
|
|
///
|
|
|
|
std::pair<Offset,Offset> PTHWriter::EmitIdentifierTable() {
|
|
|
|
// Build two maps:
|
|
|
|
// (1) an inverse map from persistent IDs -> (IdentifierInfo*,Offset)
|
|
|
|
// (2) a map from (IdentifierInfo*, Offset)* -> persistent IDs
|
|
|
|
|
|
|
|
// Note that we use 'calloc', so all the bytes are 0.
|
|
|
|
PCHIdKey* IIDMap = (PCHIdKey*) calloc(idcount, sizeof(PCHIdKey));
|
|
|
|
|
|
|
|
// Create the hashtable.
|
|
|
|
OnDiskChainedHashTableGenerator<PCHIdentifierTableTrait> IIOffMap;
|
|
|
|
|
|
|
|
// Generate mapping from persistent IDs -> IdentifierInfo*.
|
|
|
|
for (IDMap::iterator I=IM.begin(), E=IM.end(); I!=E; ++I) {
|
|
|
|
// Decrement by 1 because we are using a vector for the lookup and
|
|
|
|
// 0 is reserved for NULL.
|
|
|
|
assert(I->second > 0);
|
|
|
|
assert(I->second-1 < idcount);
|
|
|
|
unsigned idx = I->second-1;
|
|
|
|
|
|
|
|
// Store the mapping from persistent ID to IdentifierInfo*
|
|
|
|
IIDMap[idx].II = I->first;
|
|
|
|
|
|
|
|
// Store the reverse mapping in a hashtable.
|
|
|
|
IIOffMap.insert(&IIDMap[idx], I->second);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write out the inverse map first. This causes the PCIDKey entries to
|
|
|
|
// record PTH file offsets for the string data. This is used to write
|
|
|
|
// the second table.
|
|
|
|
Offset StringTableOffset = IIOffMap.Emit(Out);
|
|
|
|
|
|
|
|
// Now emit the table mapping from persistent IDs to PTH file offsets.
|
|
|
|
Offset IDOff = Out.tell();
|
|
|
|
Emit32(idcount); // Emit the number of identifiers.
|
|
|
|
for (unsigned i = 0 ; i < idcount; ++i) Emit32(IIDMap[i].FileOffset);
|
|
|
|
|
|
|
|
// Finally, release the inverse map.
|
|
|
|
free(IIDMap);
|
|
|
|
|
|
|
|
return std::make_pair(IDOff, StringTableOffset);
|
|
|
|
}
|
|
|
|
|
|
|
|
|