commit
50bc25d3c7
File diff suppressed because it is too large
Load Diff
|
@ -30,6 +30,7 @@
|
|||
#include "KeyBackedTypes.h"
|
||||
#include <ctime>
|
||||
#include <climits>
|
||||
#include "BackupContainer.h"
|
||||
|
||||
class BackupAgentBase : NonCopyable {
|
||||
public:
|
||||
|
@ -242,9 +243,9 @@ public:
|
|||
|
||||
/** BACKUP METHODS **/
|
||||
|
||||
Future<Void> submitBackup(Reference<ReadYourWritesTransaction> tr, Key outContainer, std::string tagName, Standalone<VectorRef<KeyRangeRef>> backupRanges, bool stopWhenDone = true);
|
||||
Future<Void> submitBackup(Database cx, Key outContainer, std::string tagName, Standalone<VectorRef<KeyRangeRef>> backupRanges, bool stopWhenDone = true) {
|
||||
return runRYWTransactionFailIfLocked(cx, [=](Reference<ReadYourWritesTransaction> tr){ return submitBackup(tr, outContainer, tagName, backupRanges, stopWhenDone); });
|
||||
Future<Void> submitBackup(Reference<ReadYourWritesTransaction> tr, Key outContainer, int snapshotIntervalSeconds, std::string tagName, Standalone<VectorRef<KeyRangeRef>> backupRanges, bool stopWhenDone = true);
|
||||
Future<Void> submitBackup(Database cx, Key outContainer, int snapshotIntervalSeconds, std::string tagName, Standalone<VectorRef<KeyRangeRef>> backupRanges, bool stopWhenDone = true) {
|
||||
return runRYWTransactionFailIfLocked(cx, [=](Reference<ReadYourWritesTransaction> tr){ return submitBackup(tr, outContainer, snapshotIntervalSeconds, tagName, backupRanges, stopWhenDone); });
|
||||
}
|
||||
|
||||
Future<Void> discontinueBackup(Reference<ReadYourWritesTransaction> tr, Key tagName);
|
||||
|
@ -260,6 +261,7 @@ public:
|
|||
Future<std::string> getStatus(Database cx, int errorLimit, std::string tagName);
|
||||
|
||||
Future<Version> getLastRestorable(Reference<ReadYourWritesTransaction> tr, Key tagName);
|
||||
void setLastRestorable(Reference<ReadYourWritesTransaction> tr, Key tagName, Version version);
|
||||
|
||||
// stopWhenDone will return when the backup is stopped, if enabled. Otherwise, it
|
||||
// will return when the backup directory is restorable.
|
||||
|
@ -267,14 +269,6 @@ public:
|
|||
|
||||
static Future<std::string> getBackupInfo(std::string backupContainer, Version* defaultVersion = NULL);
|
||||
|
||||
static std::string getTempFilename();
|
||||
// Data(key ranges) and Log files will have their file size in the name because it is not at all convenient
|
||||
// to fetch filesizes from either of the current BackupContainer implementations. LocalDirectory requires
|
||||
// querying each file separately, and Blob Store doesn't support renames so the apparent log and data files
|
||||
// are actually a kind of symbolic link so to get the size of the final file it would have to be read.
|
||||
static std::string getDataFilename(Version version, int64_t size, int blockSize);
|
||||
static std::string getLogFilename(Version beginVer, Version endVer, int64_t size, int blockSize);
|
||||
|
||||
static const Key keyLastRestorable;
|
||||
|
||||
Future<int64_t> getTaskCount(Reference<ReadYourWritesTransaction> tr) { return taskBucket->getTaskCount(tr); }
|
||||
|
@ -421,31 +415,10 @@ Future<Void> logErrorWorker(Reference<ReadYourWritesTransaction> const& tr, Key
|
|||
Future<Void> logError(Database cx, Key keyErrors, const std::string& message);
|
||||
Future<Void> logError(Reference<ReadYourWritesTransaction> tr, Key keyErrors, const std::string& message);
|
||||
Future<Void> checkVersion(Reference<ReadYourWritesTransaction> const& tr);
|
||||
Future<Void> readCommitted(Database const& cx, PromiseStream<RangeResultWithVersion> const& results, Reference<FlowLock> const& lock, KeyRangeRef const& range, bool const& terminator = false, bool const& systemAccess = false, bool const& lockAware = false);
|
||||
Future<Void> readCommitted(Database const& cx, PromiseStream<RCGroup> const& results, Future<Void> const& active, Reference<FlowLock> const& lock, KeyRangeRef const& range, std::function< std::pair<uint64_t, uint32_t>(Key key) > const& groupBy, bool const& terminator = false, bool const& systemAccess = false, bool const& lockAware = false, std::function< Future<Void>(Reference<ReadYourWritesTransaction> tr) > const& withEachFunction = nullptr);
|
||||
Future<Void> readCommitted(Database const& cx, PromiseStream<RangeResultWithVersion> const& results, Reference<FlowLock> const& lock, KeyRangeRef const& range, bool const& terminator = true, bool const& systemAccess = false, bool const& lockAware = false);
|
||||
Future<Void> readCommitted(Database const& cx, PromiseStream<RCGroup> const& results, Future<Void> const& active, Reference<FlowLock> const& lock, KeyRangeRef const& range, std::function< std::pair<uint64_t, uint32_t>(Key key) > const& groupBy, bool const& terminator = true, bool const& systemAccess = false, bool const& lockAware = false, std::function< Future<Void>(Reference<ReadYourWritesTransaction> tr) > const& withEachFunction = nullptr);
|
||||
Future<Void> applyMutations(Database const& cx, Key const& uid, Key const& addPrefix, Key const& removePrefix, Version const& beginVersion, Version* const& endVersion, RequestStream<CommitTransactionRequest> const& commit, NotifiedVersion* const& committedVersion, Reference<KeyRangeMap<Version>> const& keyVersion);
|
||||
|
||||
template <typename T>
|
||||
class TaskParam {
|
||||
public:
|
||||
TaskParam(StringRef key) : key(key) {}
|
||||
T get(Reference<Task> task) const {
|
||||
return Codec<T>::unpack(Tuple::unpack(task->params[key]));
|
||||
}
|
||||
void set(Reference<Task> task, T const &val) const {
|
||||
task->params[key] = Codec<T>::pack(val).pack();
|
||||
}
|
||||
bool exists(Reference<Task> task) const {
|
||||
return task->params.find(key) != task->params.end();
|
||||
}
|
||||
T getOrDefault(Reference<Task> task, const T defaultValue = T()) const {
|
||||
if(!exists(task))
|
||||
return defaultValue;
|
||||
return get(task);
|
||||
}
|
||||
StringRef key;
|
||||
};
|
||||
|
||||
typedef BackupAgentBase::enumState EBackupState;
|
||||
template<> inline Tuple Codec<EBackupState>::pack(EBackupState const &val) { return Tuple().append(val); }
|
||||
template<> inline EBackupState Codec<EBackupState>::unpack(Tuple const &val) { return (EBackupState)val.getInt(0); }
|
||||
|
@ -562,18 +535,105 @@ protected:
|
|||
Subspace configSpace;
|
||||
};
|
||||
|
||||
template<> inline Tuple Codec<Reference<IBackupContainer>>::pack(Reference<IBackupContainer> const &bc) {
|
||||
return Tuple().append(StringRef(bc->getURL()));
|
||||
}
|
||||
template<> inline Reference<IBackupContainer> Codec<Reference<IBackupContainer>>::unpack(Tuple const &val) {
|
||||
return IBackupContainer::openContainer(val.getString(0).toString());
|
||||
}
|
||||
|
||||
class BackupConfig : public KeyBackedConfig {
|
||||
public:
|
||||
BackupConfig(UID uid = UID()) : KeyBackedConfig(fileBackupPrefixRange.begin, uid) {}
|
||||
BackupConfig(Reference<Task> task) : KeyBackedConfig(fileBackupPrefixRange.begin, task) {}
|
||||
|
||||
// rangeFileMap maps a keyrange file's End to its Begin and Filename
|
||||
typedef std::pair<Key, Key> KeyAndFilenameT;
|
||||
typedef KeyBackedMap<Key, KeyAndFilenameT> RangeFileMapT;
|
||||
RangeFileMapT rangeFileMap() {
|
||||
struct RangeSlice {
|
||||
Key begin;
|
||||
Version version;
|
||||
std::string fileName;
|
||||
int64_t fileSize;
|
||||
Tuple pack() const {
|
||||
return Tuple().append(begin).append(version).append(StringRef(fileName)).append(fileSize);
|
||||
}
|
||||
static RangeSlice unpack(Tuple const &t) {
|
||||
RangeSlice r;
|
||||
int i = 0;
|
||||
r.begin = t.getString(i++);
|
||||
r.version = t.getInt(i++);
|
||||
r.fileName = t.getString(i++).toString();
|
||||
r.fileSize = t.getInt(i++);
|
||||
return r;
|
||||
}
|
||||
};
|
||||
|
||||
// Map of range end boundaries to info about the backup file written for that range.
|
||||
typedef KeyBackedMap<Key, RangeSlice> RangeFileMapT;
|
||||
RangeFileMapT snapshotRangeFileMap() {
|
||||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
// Coalesced set of ranges already dispatched for writing.
|
||||
typedef KeyBackedMap<Key, bool> RangeDispatchMapT;
|
||||
RangeDispatchMapT snapshotRangeDispatchMap() {
|
||||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
// Interval to use for determining the target end version for new snapshots
|
||||
KeyBackedProperty<int64_t> snapshotIntervalSeconds() {
|
||||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
// When the current snapshot began
|
||||
KeyBackedProperty<Version> snapshotBeginVersion() {
|
||||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
// When the current snapshot is desired to end.
|
||||
// This can be changed at runtime to speed up or slow down a snapshot
|
||||
KeyBackedProperty<Version> snapshotTargetEndVersion() {
|
||||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
KeyBackedProperty<int64_t> snapshotBatchSize() {
|
||||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
KeyBackedProperty<Key> snapshotBatchFuture() {
|
||||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
KeyBackedProperty<Key> snapshotBatchDispatchDoneKey() {
|
||||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
Future<Void> initNewSnapshot(Reference<ReadYourWritesTransaction> tr, int64_t intervalSeconds = -1) {
|
||||
BackupConfig © = *this; // Capture this by value instead of this ptr
|
||||
|
||||
Future<Version> beginVersion = tr->getReadVersion();
|
||||
Future<int64_t> defaultInterval = 0;
|
||||
if(intervalSeconds < 0)
|
||||
defaultInterval = copy.snapshotIntervalSeconds().getOrThrow(tr);
|
||||
|
||||
// Make sure read version and possibly the snapshot interval value are ready, then clear/init the snapshot config members
|
||||
return map(success(beginVersion) && success(defaultInterval), [=](Void) mutable {
|
||||
copy.snapshotRangeFileMap().clear(tr);
|
||||
copy.snapshotRangeDispatchMap().clear(tr);
|
||||
copy.snapshotBatchSize().clear(tr);
|
||||
copy.snapshotBatchFuture().clear(tr);
|
||||
copy.snapshotBatchDispatchDoneKey().clear(tr);
|
||||
|
||||
if(intervalSeconds < 0)
|
||||
intervalSeconds = defaultInterval.get();
|
||||
Version endVersion = beginVersion.get() + intervalSeconds * CLIENT_KNOBS->CORE_VERSIONSPERSECOND;
|
||||
|
||||
copy.snapshotBeginVersion().set(tr, beginVersion.get());
|
||||
copy.snapshotTargetEndVersion().set(tr, endVersion);
|
||||
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
|
||||
KeyBackedBinaryValue<int64_t> rangeBytesWritten() {
|
||||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
@ -586,19 +646,42 @@ public:
|
|||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
KeyBackedProperty<std::string> backupContainer() {
|
||||
KeyBackedProperty<Reference<IBackupContainer>> backupContainer() {
|
||||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
// Get the backup container URL only without creating a backup container instance.
|
||||
KeyBackedProperty<Reference<IBackupContainer>> backupContainerURL() {
|
||||
return configSpace.pack(LiteralStringRef("backupContainer"));
|
||||
}
|
||||
|
||||
// Stop differntial logging if already started or don't start after completing KV ranges
|
||||
KeyBackedProperty<bool> stopWhenDone() {
|
||||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
KeyBackedProperty<Version> stopVersion() {
|
||||
// Latest version for which all prior versions have had their log copy tasks completed
|
||||
KeyBackedProperty<Version> latestLogEndVersion() {
|
||||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
// The end version of the last complete snapshot
|
||||
KeyBackedProperty<Version> latestSnapshotEndVersion() {
|
||||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
Future<Optional<Version>> getLatestRestorableVersion(Reference<ReadYourWritesTransaction> tr) {
|
||||
auto © = *this;
|
||||
auto lastLog = latestLogEndVersion().get(tr);
|
||||
auto lastSnapshot = latestSnapshotEndVersion().get(tr);
|
||||
return map(success(lastLog) && success(lastSnapshot), [=](Void) -> Optional<Version> {
|
||||
if(lastLog.get().present() && lastSnapshot.get().present()
|
||||
&& lastLog.get().get() >= lastSnapshot.get().get())
|
||||
return lastLog.get().get();
|
||||
return {};
|
||||
});
|
||||
}
|
||||
|
||||
KeyBackedProperty<std::vector<KeyRange>> backupRanges() {
|
||||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
@ -613,7 +696,11 @@ public:
|
|||
TraceEvent(SevError, "FileBackupErrorNoUID").error(e).detail("Description", details);
|
||||
return Void();
|
||||
}
|
||||
TraceEvent(SevWarn, "FileBackupError").error(e).detail("BackupUID", uid).detail("Description", details).detail("TaskInstance", (uint64_t)taskInstance);
|
||||
TraceEvent t(SevWarn, "FileBackupError");
|
||||
t.error(e).detail("BackupUID", uid).detail("Description", details).detail("TaskInstance", (uint64_t)taskInstance);
|
||||
// These should not happen
|
||||
if(e.code() == error_code_key_not_found)
|
||||
t.backtrace();
|
||||
std::string msg = format("ERROR: %s %s", e.what(), details.c_str());
|
||||
return lastError().set(cx, {msg, (int64_t)now()});
|
||||
}
|
||||
|
|
|
@ -364,7 +364,8 @@ ACTOR Future<Void> readCommitted(Database cx, PromiseStream<RangeResultWithVersi
|
|||
begin = firstGreaterThan(values.end()[-1].key);
|
||||
|
||||
if (!values.more && !limits.isReached()) {
|
||||
results.sendError(end_of_stream());
|
||||
if(terminator)
|
||||
results.sendError(end_of_stream());
|
||||
return Void();
|
||||
}
|
||||
}
|
||||
|
@ -456,7 +457,8 @@ ACTOR Future<Void> readCommitted(Database cx, PromiseStream<RCGroup> results, Fu
|
|||
results.send(rcGroup);
|
||||
}
|
||||
|
||||
results.sendError(end_of_stream());
|
||||
if(terminator)
|
||||
results.sendError(end_of_stream());
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -22,72 +22,162 @@
|
|||
|
||||
#include "flow/flow.h"
|
||||
#include "fdbrpc/IAsyncFile.h"
|
||||
#include "fdbrpc/BlobStore.h"
|
||||
#include "FDBTypes.h"
|
||||
#include <vector>
|
||||
|
||||
// Class representing a container for backup files, such as a mounted directory or a remote filesystem.
|
||||
// Append-only file interface for writing backup data
|
||||
// Once finish() is called the file cannot be further written to.
|
||||
// Backup containers should not attempt to use files for which finish was not called or did not complete.
|
||||
// TODO: Move the log file and range file format encoding/decoding stuff to this file and behind interfaces.
|
||||
class IBackupFile {
|
||||
public:
|
||||
IBackupFile(std::string fileName) : m_fileName(fileName), m_offset(0) {}
|
||||
virtual ~IBackupFile() {}
|
||||
// Backup files are append-only and cannot have more than 1 append outstanding at once.
|
||||
virtual Future<Void> append(const void *data, int len) = 0;
|
||||
virtual Future<Void> finish() = 0;
|
||||
inline std::string getFileName() const {
|
||||
return m_fileName;
|
||||
}
|
||||
inline int64_t size() const {
|
||||
return m_offset;
|
||||
}
|
||||
virtual void addref() = 0;
|
||||
virtual void delref() = 0;
|
||||
|
||||
Future<Void> appendString(Standalone<StringRef> s);
|
||||
protected:
|
||||
std::string m_fileName;
|
||||
int64_t m_offset;
|
||||
};
|
||||
|
||||
// Structures for various backup components
|
||||
|
||||
struct LogFile {
|
||||
Version beginVersion;
|
||||
Version endVersion;
|
||||
uint32_t blockSize;
|
||||
std::string fileName;
|
||||
int64_t fileSize;
|
||||
|
||||
// Order by beginVersion, break ties with endVersion
|
||||
bool operator< (const LogFile &rhs) const {
|
||||
return beginVersion == rhs.beginVersion ? endVersion < rhs.endVersion : beginVersion < rhs.beginVersion;
|
||||
}
|
||||
};
|
||||
|
||||
struct RangeFile {
|
||||
Version version;
|
||||
uint32_t blockSize;
|
||||
std::string fileName;
|
||||
int64_t fileSize;
|
||||
|
||||
// Order by version, break ties with name
|
||||
bool operator< (const RangeFile &rhs) const {
|
||||
return version == rhs.version ? fileName < rhs.fileName : version < rhs.version;
|
||||
}
|
||||
};
|
||||
|
||||
struct KeyspaceSnapshotFile {
|
||||
Version beginVersion;
|
||||
Version endVersion;
|
||||
std::string fileName;
|
||||
int64_t totalSize;
|
||||
|
||||
// Order by beginVersion, break ties with endVersion
|
||||
bool operator< (const KeyspaceSnapshotFile &rhs) const {
|
||||
return beginVersion == rhs.beginVersion ? endVersion < rhs.endVersion : beginVersion < rhs.beginVersion;
|
||||
}
|
||||
};
|
||||
|
||||
struct FullBackupListing {
|
||||
std::vector<RangeFile> ranges;
|
||||
std::vector<LogFile> logs;
|
||||
std::vector<KeyspaceSnapshotFile> snapshots;
|
||||
};
|
||||
|
||||
struct BackupDescription {
|
||||
std::string url;
|
||||
std::vector<KeyspaceSnapshotFile> snapshots;
|
||||
Optional<Version> minLogBegin;
|
||||
Optional<Version> maxLogEnd;
|
||||
Optional<Version> contiguousLogEnd;
|
||||
Optional<Version> maxRestorableVersion;
|
||||
Optional<Version> minRestorableVersion;
|
||||
std::string extendedDetail; // Freeform container-specific info.
|
||||
std::string toString() const;
|
||||
};
|
||||
|
||||
struct RestorableFileSet {
|
||||
Version targetVersion;
|
||||
std::vector<LogFile> logs;
|
||||
std::vector<RangeFile> ranges;
|
||||
KeyspaceSnapshotFile snapshot;
|
||||
};
|
||||
|
||||
/* IBackupContainer is an interface to a set of backup data, which contains
|
||||
* - backup metadata
|
||||
* - log files
|
||||
* - range files
|
||||
* - keyspace snapshot files defining a complete non overlapping key space snapshot
|
||||
*
|
||||
* Files in a container are identified by a name. This can be any string, whatever
|
||||
* makes sense for the underlying storage system.
|
||||
*
|
||||
* Reading files is done by file name. File names are discovered by getting a RestorableFileSet.
|
||||
*
|
||||
* For remote data stores that are filesystem-like, it's probably best to inherit BackupContainerFileSystem.
|
||||
*/
|
||||
class IBackupContainer {
|
||||
public:
|
||||
virtual void addref() = 0;
|
||||
virtual void delref() = 0;
|
||||
|
||||
enum EMode { READONLY, WRITEONLY };
|
||||
|
||||
static std::vector<std::string> getURLFormats();
|
||||
|
||||
IBackupContainer() {}
|
||||
virtual ~IBackupContainer() {}
|
||||
|
||||
// Create the container (if necessary)
|
||||
// Create the container
|
||||
virtual Future<Void> create() = 0;
|
||||
|
||||
// Open a named file in the container for reading (restore mode) or writing (backup mode)
|
||||
virtual Future<Reference<IAsyncFile>> openFile(std::string name, EMode mode) = 0;
|
||||
// Open a log file or range file for writing
|
||||
virtual Future<Reference<IBackupFile>> writeLogFile(Version beginVersion, Version endVersion, int blockSize) = 0;
|
||||
virtual Future<Reference<IBackupFile>> writeRangeFile(Version version, int blockSize) = 0;
|
||||
|
||||
// Returns whether or not a file exists in the container
|
||||
virtual Future<bool> fileExists(std::string name) = 0;
|
||||
// Write a KeyspaceSnapshotFile of range file names representing a full non overlapping
|
||||
// snapshot of the key ranges this backup is targeting.
|
||||
virtual Future<Void> writeKeyspaceSnapshotFile(std::vector<std::string> fileNames, int64_t totalBytes) = 0;
|
||||
|
||||
// Get a list of backup files in the container
|
||||
virtual Future<std::vector<std::string>> listFiles() = 0;
|
||||
// Open a file for read by name
|
||||
virtual Future<Reference<IAsyncFile>> readFile(std::string name) = 0;
|
||||
|
||||
// Rename a file
|
||||
virtual Future<Void> renameFile(std::string from, std::string to) = 0;
|
||||
// Delete all data up to (but not including endVersion)
|
||||
virtual Future<Void> expireData(Version endVersion) = 0;
|
||||
|
||||
// Delete entire container. During the process, if pNumDeleted is not null it will be
|
||||
// updated with the count of deleted files so that progress can be seen.
|
||||
virtual Future<Void> deleteContainer(int *pNumDeleted = nullptr) = 0;
|
||||
|
||||
// Uses the virtual methods to describe the backup contents
|
||||
virtual Future<BackupDescription> describeBackup() = 0;
|
||||
|
||||
virtual Future<FullBackupListing> listBackup() = 0;
|
||||
|
||||
// Get exactly the files necessary to restore to targetVersion. Returns non-present if
|
||||
// restore to given version is not possible.
|
||||
virtual Future<Optional<RestorableFileSet>> getRestoreSet(Version targetVersion) = 0;
|
||||
|
||||
// Get an IBackupContainer based on a container spec string
|
||||
static Reference<IBackupContainer> openContainer(std::string url, std::string *error = nullptr);
|
||||
};
|
||||
|
||||
class BackupContainerBlobStore : public IBackupContainer, ReferenceCounted<BackupContainerBlobStore> {
|
||||
public:
|
||||
void addref() { return ReferenceCounted<BackupContainerBlobStore>::addref(); }
|
||||
void delref() { return ReferenceCounted<BackupContainerBlobStore>::delref(); }
|
||||
static const std::string META_BUCKET;
|
||||
|
||||
static std::string getURLFormat() { return BlobStoreEndpoint::getURLFormat(true); }
|
||||
static Future<std::vector<std::string>> listBackupContainers(Reference<BlobStoreEndpoint> const &bs);
|
||||
|
||||
BackupContainerBlobStore(Reference<BlobStoreEndpoint> bstore, std::string name)
|
||||
: m_bstore(bstore), m_bucketPrefix(name) {}
|
||||
|
||||
virtual ~BackupContainerBlobStore() { m_bucketCount.cancel(); }
|
||||
|
||||
// IBackupContainer methods
|
||||
Future<Void> create();
|
||||
Future<Reference<IAsyncFile>> openFile(std::string name, EMode mode);
|
||||
Future<bool> fileExists(std::string name);
|
||||
Future<Void> renameFile(std::string from, std::string to);
|
||||
Future<std::vector<std::string>> listFiles();
|
||||
Future<Void> listFilesStream(PromiseStream<BlobStoreEndpoint::ObjectInfo> results);
|
||||
|
||||
Future<Void> deleteContainer(int *pNumDeleted = NULL);
|
||||
Future<std::string> containerInfo();
|
||||
Future<int> getBucketCount();
|
||||
std::string getBucketString(int num) { return format("%s_%d", m_bucketPrefix.c_str(), num); }
|
||||
Future<std::string> getBucketForFile(std::string const &name);
|
||||
Future<std::vector<std::string>> getBucketList();
|
||||
|
||||
Reference<BlobStoreEndpoint> m_bstore;
|
||||
std::string m_bucketPrefix;
|
||||
Future<int> m_bucketCount;
|
||||
static Reference<IBackupContainer> openContainer(std::string url);
|
||||
static std::vector<std::string> getURLFormats();
|
||||
static Future<std::vector<std::string>> listContainers(std::string const &baseURL);
|
||||
|
||||
std::string getURL() const {
|
||||
return URL;
|
||||
}
|
||||
|
||||
static std::string lastOpenError;
|
||||
|
||||
private:
|
||||
std::string URL;
|
||||
};
|
||||
|
||||
|
|
|
@ -256,6 +256,7 @@ namespace dbBackup {
|
|||
state Reference<ReadYourWritesTransaction> tr = Reference<ReadYourWritesTransaction>( new ReadYourWritesTransaction(cx) );
|
||||
loop{
|
||||
try {
|
||||
tr->reset();
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
state Key prefix = task->params[BackupAgentBase::keyConfigLogUid].withPrefix(applyMutationsKeyVersionMapRange.begin);
|
||||
|
@ -265,15 +266,11 @@ namespace dbBackup {
|
|||
state Future<Optional<Value>> rangeCountValue = tr->get(rangeCountKey, true);
|
||||
state Future<Standalone<RangeResultRef>> prevRange = tr->getRange(firstGreaterOrEqual(prefix), lastLessOrEqual(rangeBegin.withPrefix(prefix)), 1, true, true);
|
||||
state Future<Standalone<RangeResultRef>> nextRange = tr->getRange(firstGreaterOrEqual(rangeEnd.withPrefix(prefix)), firstGreaterOrEqual(strinc(prefix)), 1, true, false);
|
||||
state Future<bool> verified = taskBucket->keepRunning(tr, task);
|
||||
state Future<Void> verified = taskBucket->keepRunning(tr, task);
|
||||
|
||||
Void _ = wait( checkDatabaseLock(tr, BinaryReader::fromStringRef<UID>(task->params[BackupAgentBase::keyConfigLogUid], Unversioned())) );
|
||||
Void _ = wait( success(backupVersions) && success(logVersionValue) && success(rangeCountValue) && success(prevRange) && success(nextRange) && success(verified) );
|
||||
|
||||
if(!verified.get()) {
|
||||
return Void();
|
||||
}
|
||||
|
||||
int64_t rangeCount = 0;
|
||||
if(rangeCountValue.get().present()) {
|
||||
ASSERT(rangeCountValue.get().get().size() == sizeof(int64_t));
|
||||
|
@ -654,30 +651,11 @@ namespace dbBackup {
|
|||
|
||||
for (int i = 0; i < ranges.size(); ++i) {
|
||||
results.push_back(PromiseStream<RCGroup>());
|
||||
rc.push_back(readCommitted(taskBucket->src, results[i], Future<Void>(Void()), lock, ranges[i], decodeBKMutationLogKey, false, true, true, nullptr));
|
||||
rc.push_back(readCommitted(taskBucket->src, results[i], Future<Void>(Void()), lock, ranges[i], decodeBKMutationLogKey, true, true, true, nullptr));
|
||||
dump.push_back(dumpData(cx, task, results[i], lock.getPtr(), taskBucket));
|
||||
}
|
||||
|
||||
state Future<Void> dumpComplete = waitForAll(dump);
|
||||
|
||||
try {
|
||||
loop {
|
||||
choose {
|
||||
when( Void _ = wait(dumpComplete) ) { break; }
|
||||
when( Void _ = wait(delay((CLIENT_KNOBS->TASKBUCKET_TIMEOUT_VERSIONS/2)/CLIENT_KNOBS->CORE_VERSIONSPERSECOND)) ) {
|
||||
bool saveResult = wait( taskBucket->saveAndExtend(cx, task) );
|
||||
if(!saveResult) {
|
||||
return Void();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Error &e) {
|
||||
if (e.code() == error_code_backup_error)
|
||||
return Void();
|
||||
throw;
|
||||
}
|
||||
Void _ = wait(waitForAll(dump));
|
||||
|
||||
if (newEndVersion < endVersion) {
|
||||
task->params[CopyLogRangeTaskFunc::keyNextBeginVersion] = BinaryWriter::toValue(newEndVersion, Unversioned());
|
||||
|
@ -1650,7 +1628,7 @@ public:
|
|||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
||||
state Future<Optional<Value>> fDisabled = tr->get(backupAgent->taskBucket->getDisableKey());
|
||||
state Future<Optional<Value>> fPaused = tr->get(backupAgent->taskBucket->getPauseKey());
|
||||
int backupStateInt = wait(backupAgent->getStateValue(tr, logUid));
|
||||
state BackupAgentBase::enumState backupState = (BackupAgentBase::enumState)backupStateInt;
|
||||
|
||||
|
@ -1736,9 +1714,9 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
Optional<Value> disabled = wait(fDisabled);
|
||||
if(disabled.present()) {
|
||||
statusText += format("\nAll DR agents have been disabled.\n");
|
||||
Optional<Value> paused = wait(fPaused);
|
||||
if(paused.present()) {
|
||||
statusText += format("\nAll DR agents have been paused.\n");
|
||||
}
|
||||
|
||||
break;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -144,6 +144,16 @@ public:
|
|||
});
|
||||
}
|
||||
|
||||
Future<T> getD(Database cx, bool snapshot = false, T defaultValue = T()) const {
|
||||
auto © = *this;
|
||||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
||||
return copy.getD(tr, snapshot, defaultValue);
|
||||
});
|
||||
}
|
||||
|
||||
Future<T> getOrThrow(Database cx, bool snapshot = false, Error err = key_not_found()) const {
|
||||
auto © = *this;
|
||||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) {
|
||||
|
@ -219,10 +229,10 @@ public:
|
|||
typedef std::vector<PairType> PairsType;
|
||||
|
||||
// If end is not present one key past the end of the map is used.
|
||||
Future<PairsType> getRange(Reference<ReadYourWritesTransaction> tr, KeyType const &begin, Optional<KeyType> const &end, int limit, bool snapshot = false) const {
|
||||
Future<PairsType> getRange(Reference<ReadYourWritesTransaction> tr, KeyType const &begin, Optional<KeyType> const &end, int limit, bool snapshot = false, bool reverse = false) const {
|
||||
Subspace s = space; // 'this' could be invalid inside lambda
|
||||
Key endKey = end.present() ? s.pack(Codec<KeyType>::pack(end.get())) : space.range().end;
|
||||
return map(tr->getRange(KeyRangeRef(s.pack(Codec<KeyType>::pack(begin)), endKey), GetRangeLimits(limit), snapshot),
|
||||
return map(tr->getRange(KeyRangeRef(s.pack(Codec<KeyType>::pack(begin)), endKey), GetRangeLimits(limit), snapshot, reverse),
|
||||
[s] (Standalone<RangeResultRef> const &kvs) -> PairsType {
|
||||
PairsType results;
|
||||
for(int i = 0; i < kvs.size(); ++i) {
|
||||
|
@ -269,3 +279,53 @@ public:
|
|||
|
||||
Subspace space;
|
||||
};
|
||||
|
||||
template <typename _ValueType>
|
||||
class KeyBackedSet {
|
||||
public:
|
||||
KeyBackedSet(KeyRef key) : space(key) {}
|
||||
|
||||
typedef _ValueType ValueType;
|
||||
typedef std::vector<ValueType> Values;
|
||||
|
||||
// If end is not present one key past the end of the map is used.
|
||||
Future<Values> getRange(Reference<ReadYourWritesTransaction> tr, ValueType const &begin, Optional<ValueType> const &end, int limit, bool snapshot = false) const {
|
||||
Subspace s = space; // 'this' could be invalid inside lambda
|
||||
Key endKey = end.present() ? s.pack(Codec<ValueType>::pack(end.get())) : space.range().end;
|
||||
return map(tr->getRange(KeyRangeRef(s.pack(Codec<ValueType>::pack(begin)), endKey), GetRangeLimits(limit), snapshot),
|
||||
[s] (Standalone<RangeResultRef> const &kvs) -> Values {
|
||||
Values results;
|
||||
for(int i = 0; i < kvs.size(); ++i) {
|
||||
results.push_back(Codec<ValueType>::unpack(s.unpack(kvs[i].key)));
|
||||
}
|
||||
return results;
|
||||
});
|
||||
}
|
||||
|
||||
Future<bool> exists(Reference<ReadYourWritesTransaction> tr, ValueType const &val, bool snapshot = false) const {
|
||||
return map(tr->get(space.pack(Codec<ValueType>::pack(val)), snapshot), [](Optional<Value> const &val) -> bool {
|
||||
return val.present();
|
||||
});
|
||||
}
|
||||
|
||||
// Returns the expectedSize of the set key
|
||||
int insert(Reference<ReadYourWritesTransaction> tr, ValueType const &val) {
|
||||
Key k = space.pack(Codec<ValueType>::pack(val));
|
||||
tr->set(k, StringRef());
|
||||
return k.expectedSize();
|
||||
}
|
||||
|
||||
void erase(Reference<ReadYourWritesTransaction> tr, ValueType const &val) {
|
||||
return tr->clear(space.pack(Codec<ValueType>::pack(val)));
|
||||
}
|
||||
|
||||
void erase(Reference<ReadYourWritesTransaction> tr, ValueType const &begin, ValueType const &end) {
|
||||
return tr->clear(KeyRangeRef(space.pack(Codec<ValueType>::pack(begin)), space.pack(Codec<ValueType>::pack(end))));
|
||||
}
|
||||
|
||||
void clear(Reference<ReadYourWritesTransaction> tr) {
|
||||
return tr->clear(space.range());
|
||||
}
|
||||
|
||||
Subspace space;
|
||||
};
|
||||
|
|
|
@ -96,6 +96,8 @@ ClientKnobs::ClientKnobs(bool randomize) {
|
|||
init( BACKUP_LOCK_BYTES, 1e8 );
|
||||
init( BACKUP_RANGE_TIMEOUT, TASKBUCKET_TIMEOUT_VERSIONS/CORE_VERSIONSPERSECOND/2.0 );
|
||||
init( BACKUP_RANGE_MINWAIT, std::max(1.0, BACKUP_RANGE_TIMEOUT/2.0));
|
||||
init( BACKUP_SNAPSHOT_DISPATCH_INTERVAL_SEC, 60 * 60 ); // 1 hour
|
||||
init( BACKUP_DEFAULT_SNAPSHOT_INTERVAL_SEC, 3600 * 24 * 10); // 10 days
|
||||
init( BACKUP_SHARD_TASK_LIMIT, 1000 ); if( randomize && BUGGIFY ) BACKUP_SHARD_TASK_LIMIT = 4;
|
||||
init( BACKUP_AGGREGATE_POLL_RATE_UPDATE_INTERVAL, 60);
|
||||
init( BACKUP_AGGREGATE_POLL_RATE, 2.0 ); // polls per second target for all agents on the cluster
|
||||
|
@ -138,6 +140,7 @@ ClientKnobs::ClientKnobs(bool randomize) {
|
|||
init( HTTP_VERBOSE_LEVEL, 0 );
|
||||
init( BLOBSTORE_CONNECT_TRIES, 10 );
|
||||
init( BLOBSTORE_CONNECT_TIMEOUT, 10 );
|
||||
init( BLOBSTORE_MAX_CONNECTION_LIFE, 120 );
|
||||
init( BLOBSTORE_REQUEST_TRIES, 10 );
|
||||
init( BLOBSTORE_REQUEST_TIMEOUT, 30 );
|
||||
|
||||
|
@ -150,7 +153,6 @@ ClientKnobs::ClientKnobs(bool randomize) {
|
|||
init( BLOBSTORE_READ_CACHE_BLOCKS_PER_FILE, 2 );
|
||||
init( BLOBSTORE_MULTIPART_MAX_PART_SIZE, 20000000 );
|
||||
init( BLOBSTORE_MULTIPART_MIN_PART_SIZE, 5242880 );
|
||||
init( BLOBSTORE_BACKUP_BUCKETS, 100 );
|
||||
|
||||
// These are basically unlimited by default but can be used to reduce blob IO if needed
|
||||
init( BLOBSTORE_REQUESTS_PER_SECOND, 200 );
|
||||
|
|
|
@ -98,6 +98,8 @@ public:
|
|||
int BACKUP_LOCK_BYTES;
|
||||
double BACKUP_RANGE_TIMEOUT;
|
||||
double BACKUP_RANGE_MINWAIT;
|
||||
int BACKUP_SNAPSHOT_DISPATCH_INTERVAL_SEC;
|
||||
int BACKUP_DEFAULT_SNAPSHOT_INTERVAL_SEC;
|
||||
int BACKUP_SHARD_TASK_LIMIT;
|
||||
double BACKUP_AGGREGATE_POLL_RATE;
|
||||
double BACKUP_AGGREGATE_POLL_RATE_UPDATE_INTERVAL;
|
||||
|
@ -142,6 +144,7 @@ public:
|
|||
int HTTP_VERBOSE_LEVEL;
|
||||
int BLOBSTORE_CONNECT_TRIES;
|
||||
int BLOBSTORE_CONNECT_TIMEOUT;
|
||||
int BLOBSTORE_MAX_CONNECTION_LIFE;
|
||||
int BLOBSTORE_REQUEST_TRIES;
|
||||
int BLOBSTORE_REQUEST_TIMEOUT;
|
||||
int BLOBSTORE_REQUESTS_PER_SECOND;
|
||||
|
@ -156,7 +159,6 @@ public:
|
|||
int BLOBSTORE_READ_CACHE_BLOCKS_PER_FILE;
|
||||
int BLOBSTORE_MAX_SEND_BYTES_PER_SECOND;
|
||||
int BLOBSTORE_MAX_RECV_BYTES_PER_SECOND;
|
||||
int BLOBSTORE_BACKUP_BUCKETS;
|
||||
|
||||
int CONSISTENCY_CHECK_RATE_LIMIT;
|
||||
int CONSISTENCY_CHECK_RATE_WINDOW;
|
||||
|
|
|
@ -21,8 +21,7 @@
|
|||
#ifndef FDBCLIENT_STATUS_H
|
||||
#define FDBCLIENT_STATUS_H
|
||||
|
||||
#include "json_spirit/json_spirit_writer_template.h"
|
||||
#include "json_spirit/json_spirit_reader_template.h"
|
||||
#include "../fdbrpc/JSONDoc.h"
|
||||
|
||||
struct StatusObject : json_spirit::mObject {
|
||||
typedef json_spirit::mObject Map;
|
||||
|
@ -71,291 +70,6 @@ static StatusObject makeMessage(const char *name, const char *description) {
|
|||
return out;
|
||||
}
|
||||
|
||||
// JSONDoc is a convenient reader/writer class for manipulating JSON documents using "paths".
|
||||
// Access is done using a "path", which is a string of dot-separated
|
||||
// substrings representing representing successively deeper keys found in nested
|
||||
// JSON objects within the top level object
|
||||
//
|
||||
// Most methods are read-only with respect to the source JSON object.
|
||||
// The only modifying methods are create(), put(), subDoc(), and mergeInto()
|
||||
//
|
||||
// JSONDoc maintains some state which is the JSON value that was found during the most recent
|
||||
// *successful* path lookup.
|
||||
//
|
||||
// Examples:
|
||||
// StatusObjectReader r(some_obj);
|
||||
//
|
||||
// // See if JSON doc path a.b.c exists
|
||||
// bool exists = r.has("a.b.c");
|
||||
//
|
||||
// // See if JSON doc path a.b.c exists, if it does then assign value to x. Throws if path exists but T is not compatible.
|
||||
// T x;
|
||||
// bool exists = r.has("a.b.c", x);
|
||||
//
|
||||
// // This way you can chain things like this:
|
||||
// bool is_two = r.has("a.b.c", x) && x == 2;
|
||||
//
|
||||
// // Alternatively, you can avoid the temp var by making use of the last() method which returns a reference
|
||||
// // to the JSON value at the last successfully found path that has() has seen.
|
||||
// bool is_int = r.has("a.b.c") && r.last().type == json_spirit::int_type;
|
||||
// bool is_two = r.has("a.b.c") && r.last().get_int() == 2;
|
||||
//
|
||||
// // The familiar at() method also exists but now supports the same path concept.
|
||||
// // It will throw in the same circumstances as the original method
|
||||
// int x = r.at("a.b.c").get_int();
|
||||
//
|
||||
// // If you wish to access an element with the dot character within its name (e.g., "hostname.example.com"),
|
||||
// // you can do so by setting the "split" flag to false in either the "has" or "get" methods. The example
|
||||
// // below will look for the key "hostname.example.com" as a subkey of the path "a.b.c" (or, more
|
||||
// // precisely, it will look to see if r.has("a").has("b").has("c").has("hostname.example.com", false)).
|
||||
// bool exists = r.has("a.b.c").has("hostname.example.com", false);
|
||||
//
|
||||
// // And the familiar operator[] interface exists as well, however only as a synonym for at()
|
||||
// // because this class is only for reading. Using operator [] will not auto-create null things.
|
||||
// // The following would throw if a.b.c did not exist, or if it was not an int.
|
||||
// int x = r["a.b.c"].get_int();
|
||||
struct JSONDoc {
|
||||
JSONDoc() : pObj(NULL) {}
|
||||
|
||||
// Construction from const json_spirit::mObject, trivial and will never throw.
|
||||
// Resulting JSONDoc will not allow modifications.
|
||||
JSONDoc(const json_spirit::mObject &o) : pObj(&o), wpObj(NULL) {}
|
||||
|
||||
// Construction from json_spirit::mObject. Allows modifications.
|
||||
JSONDoc(json_spirit::mObject &o) : pObj(&o), wpObj(&o) {}
|
||||
|
||||
// Construction from const json_spirit::mValue (which is a Variant type) which will try to
|
||||
// convert it to an mObject. This will throw if that fails, just as it would
|
||||
// if the caller called get_obj() itself and used the previous constructor instead.
|
||||
JSONDoc(const json_spirit::mValue &v) : pObj(&v.get_obj()), wpObj(NULL) {}
|
||||
|
||||
// Construction from non-const json_spirit::mValue - will convert the mValue to
|
||||
// an object if it isn't already and then attach to it.
|
||||
JSONDoc(json_spirit::mValue &v) {
|
||||
if(v.type() != json_spirit::obj_type)
|
||||
v = json_spirit::mObject();
|
||||
wpObj = &v.get_obj();
|
||||
pObj = wpObj;
|
||||
}
|
||||
|
||||
// Returns whether or not a "path" exists.
|
||||
// Returns true if all elements along path exist
|
||||
// Returns false if any elements along the path are MISSING
|
||||
// Will throw if a non-terminating path element exists BUT is not a JSON Object.
|
||||
// If the "split" flag is set to "false", then this skips the splitting of a
|
||||
// path into on the "dot" character.
|
||||
// When a path is found, pLast is updated.
|
||||
bool has(std::string path, bool split=true) {
|
||||
if (pObj == NULL)
|
||||
return false;
|
||||
|
||||
if (path.empty())
|
||||
return false;
|
||||
size_t start = 0;
|
||||
const json_spirit::mValue *curVal = NULL;
|
||||
while (start < path.size())
|
||||
{
|
||||
// If a path segment is found then curVal must be an object
|
||||
size_t dot;
|
||||
if (split) {
|
||||
dot = path.find_first_of('.', start);
|
||||
if (dot == std::string::npos)
|
||||
dot = path.size();
|
||||
} else {
|
||||
dot = path.size();
|
||||
}
|
||||
std::string key = path.substr(start, dot - start);
|
||||
|
||||
// Get pointer to the current Object that the key has to be in
|
||||
// This will throw if the value is not an Object
|
||||
const json_spirit::mObject *curObj = curVal ? &curVal->get_obj() : pObj;
|
||||
|
||||
// Make sure key exists, if not then return false
|
||||
if (!curObj->count(key))
|
||||
return false;
|
||||
|
||||
// Advance curVal
|
||||
curVal = &curObj->at(key);
|
||||
|
||||
// Advance start position in path
|
||||
start = dot + 1;
|
||||
}
|
||||
|
||||
pLast = curVal;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Creates the given path (forcing Objects to exist along its depth, replacing whatever else might have been there)
|
||||
// and returns a reference to the Value at that location.
|
||||
json_spirit::mValue & create(std::string path, bool split=true) {
|
||||
if (wpObj == NULL || path.empty())
|
||||
throw std::runtime_error("JSON Object not writable or bad JSON path");
|
||||
|
||||
size_t start = 0;
|
||||
json_spirit::mValue *curVal = nullptr;
|
||||
while (start < path.size())
|
||||
{
|
||||
// Get next path segment name
|
||||
size_t dot;
|
||||
if (split) {
|
||||
dot = path.find_first_of('.', start);
|
||||
if (dot == std::string::npos)
|
||||
dot = path.size();
|
||||
} else {
|
||||
dot = path.size();
|
||||
}
|
||||
std::string key = path.substr(start, dot - start);
|
||||
if(key.empty())
|
||||
throw std::runtime_error("invalid JSON path");
|
||||
|
||||
// Get/create pointer to the current Object that the key has to be in
|
||||
// If curVal is defined then force it to be an Object
|
||||
json_spirit::mObject *curObj;
|
||||
if(curVal != nullptr) {
|
||||
if(curVal->type() != json_spirit::obj_type)
|
||||
*curVal = json_spirit::mObject();
|
||||
curObj = &curVal->get_obj();
|
||||
}
|
||||
else // Otherwise start with the object *this is writing to
|
||||
curObj = wpObj;
|
||||
|
||||
// Make sure key exists, if not then return false
|
||||
if (!curObj->count(key))
|
||||
(*curObj)[key] = json_spirit::mValue();
|
||||
|
||||
// Advance curVal
|
||||
curVal = &((*curObj)[key]);
|
||||
|
||||
// Advance start position in path
|
||||
start = dot + 1;
|
||||
}
|
||||
|
||||
return *curVal;
|
||||
}
|
||||
|
||||
// Creates the path given, puts a value at it, and returns a reference to the value
|
||||
template<typename T>
|
||||
T & put(std::string path, const T & value, bool split=true) {
|
||||
json_spirit::mValue &v = create(path, split);
|
||||
v = value;
|
||||
return v.get_value<T>();
|
||||
}
|
||||
|
||||
// Ensures that a an Object exists at path and returns a JSONDoc that writes to it.
|
||||
JSONDoc subDoc(std::string path, bool split=true) {
|
||||
json_spirit::mValue &v = create(path, split);
|
||||
if(v.type() != json_spirit::obj_type)
|
||||
v = json_spirit::mObject();
|
||||
return JSONDoc(v.get_obj());
|
||||
}
|
||||
|
||||
// Apply a merge operation to two values. Works for int, double, and string
|
||||
template <typename T>
|
||||
static json_spirit::mObject mergeOperator(const std::string &op, const json_spirit::mObject &op_a, const json_spirit::mObject &op_b, T const &a, T const &b) {
|
||||
if(op == "$max")
|
||||
return {{op, std::max<T>(a, b)}};
|
||||
if(op == "$min")
|
||||
return {{op, std::min<T>(a, b)}};
|
||||
if(op == "$sum")
|
||||
return {{op, a + b}};
|
||||
throw std::exception();
|
||||
}
|
||||
|
||||
// This is just a convenience function to make calling mergeOperator look cleaner
|
||||
template <typename T>
|
||||
static json_spirit::mObject mergeOperatorWrapper(const std::string &op, const json_spirit::mObject &op_a, const json_spirit::mObject &op_b, const json_spirit::mValue &a, const json_spirit::mValue &b) {
|
||||
return mergeOperator<T>(op, op_a, op_b, a.get_value<T>(), b.get_value<T>());
|
||||
}
|
||||
|
||||
static inline std::string getOperator(const json_spirit::mObject &obj) {
|
||||
for(auto &k : obj)
|
||||
if(!k.first.empty() && k.first[0] == '$')
|
||||
return k.first;
|
||||
return std::string();
|
||||
}
|
||||
|
||||
// Merge src into dest, applying merge operators
|
||||
static void mergeInto(json_spirit::mObject &dst, const json_spirit::mObject &src);
|
||||
static void mergeValueInto(json_spirit::mValue &d, const json_spirit::mValue &s);
|
||||
|
||||
// Remove any merge operators that never met any mates.
|
||||
static void cleanOps(json_spirit::mObject &obj);
|
||||
void cleanOps() {
|
||||
if(wpObj == nullptr)
|
||||
throw std::runtime_error("JSON Object not writable");
|
||||
|
||||
return cleanOps(*wpObj);
|
||||
}
|
||||
|
||||
void absorb(const JSONDoc &doc) {
|
||||
if(wpObj == nullptr)
|
||||
throw std::runtime_error("JSON Object not writable");
|
||||
|
||||
if(doc.pObj == nullptr)
|
||||
throw std::runtime_error("JSON Object not readable");
|
||||
|
||||
mergeInto(*wpObj, *doc.pObj);
|
||||
}
|
||||
|
||||
// Returns whether or not a "path" exists.
|
||||
// Returns true if all elements along path exist
|
||||
// Returns false if any elements along the path are MISSING
|
||||
// Sets out to the value of the thing that path refers to
|
||||
// Will throw if a non-terminating path element exists BUT is not a JSON Object.
|
||||
// Will throw if all elements along path exists but T is an incompatible type
|
||||
template <typename T> bool get(const std::string path, T &out, bool split=true) {
|
||||
bool r = has(path, split);
|
||||
if (r)
|
||||
out = pLast->get_value<T>();
|
||||
return r;
|
||||
}
|
||||
|
||||
// For convenience, wraps get() in a try/catch and returns false UNLESS the path existed and was a compatible type.
|
||||
template <typename T> bool tryGet(const std::string path, T &out, bool split=true) {
|
||||
try { return get(path, out, split); } catch(...) {}
|
||||
return false;
|
||||
}
|
||||
|
||||
const json_spirit::mValue & at(const std::string path, bool split=true) {
|
||||
if (has(path, split))
|
||||
return last();
|
||||
throw std::runtime_error("JSON path doesn't exist");
|
||||
}
|
||||
|
||||
const json_spirit::mValue & operator[](const std::string path) {
|
||||
return at(path);
|
||||
}
|
||||
|
||||
const json_spirit::mValue & last() const { return *pLast; }
|
||||
bool valid() const { return pObj != NULL; }
|
||||
|
||||
const json_spirit::mObject & obj() {
|
||||
// This dummy object is necessary to make working with obj() easier when this does not currently
|
||||
// point to a valid mObject. valid() can be called to explicitly check for this scenario, but
|
||||
// calling obj() at least will not seg fault and instead return a const reference to an empty mObject.
|
||||
// This is very useful when iterating using obj() to access the underlying mObject.
|
||||
static const json_spirit::mObject dummy;
|
||||
return pObj ? *pObj : dummy;
|
||||
}
|
||||
|
||||
// Return reference to writeable underlying mObject but only if *this was initialized with a writeable value or object
|
||||
json_spirit::mObject & wobj() {
|
||||
ASSERT(wpObj != nullptr);
|
||||
return *wpObj;
|
||||
}
|
||||
|
||||
// This is the version used to represent 'now' for use by the $expires operator.
|
||||
// By default, nothing will expire and it is up to the user of JSONDoc to update this value if
|
||||
// it is intended to be used.
|
||||
// This is slightly hackish but otherwise the JSON merge functions would require a Transaction.
|
||||
static uint64_t expires_reference_version;
|
||||
private:
|
||||
const json_spirit::mObject *pObj;
|
||||
// Writeable pointer to the same object. Will be NULL if initialized from a const object.
|
||||
json_spirit::mObject *wpObj;
|
||||
const json_spirit::mValue *pLast;
|
||||
};
|
||||
|
||||
// Typedef to cover older code that was written when this class was only a reader and called StatusObjectReader
|
||||
typedef JSONDoc StatusObjectReader;
|
||||
|
||||
|
|
|
@ -21,6 +21,10 @@
|
|||
#include "TaskBucket.h"
|
||||
#include "ReadYourWrites.h"
|
||||
|
||||
Reference<TaskFuture> Task::getDoneFuture(Reference<FutureBucket> fb) {
|
||||
return fb->unpack(params[reservedTaskParamKeyDone]);
|
||||
}
|
||||
|
||||
struct UnblockFutureTaskFunc : TaskFuncBase {
|
||||
static StringRef name;
|
||||
|
||||
|
@ -87,7 +91,7 @@ Key Task::reservedTaskParamValidValue = LiteralStringRef("_validvalue");
|
|||
// IMPORTANT: Task() must result in an EMPTY parameter set, so params should only
|
||||
// be set for non-default constructor arguments. To change this behavior look at all
|
||||
// Task() default constructions to see if they require params to be empty and call clear.
|
||||
Task::Task(Value type, uint32_t version, Value done, unsigned int priority) {
|
||||
Task::Task(Value type, uint32_t version, Value done, unsigned int priority) : extendMutex(1) {
|
||||
if (type.size())
|
||||
params[Task::reservedTaskParamKeyType] = type;
|
||||
|
||||
|
@ -204,8 +208,9 @@ public:
|
|||
|
||||
state Standalone<RangeResultRef> values = wait(tr->getRange(taskAvailableSpace.range(), CLIENT_KNOBS->TOO_MANY));
|
||||
Version version = wait(tr->getReadVersion());
|
||||
task->timeout = (uint64_t)version + (uint64_t)(taskBucket->timeout * (CLIENT_KNOBS->TASKBUCKET_TIMEOUT_JITTER_OFFSET + CLIENT_KNOBS->TASKBUCKET_TIMEOUT_JITTER_RANGE * g_random->random01()));
|
||||
Subspace timeoutSpace = taskBucket->timeouts.get(task->timeout).get(taskUID);
|
||||
task->timeoutVersion = version + (uint64_t)(taskBucket->timeout * (CLIENT_KNOBS->TASKBUCKET_TIMEOUT_JITTER_OFFSET + CLIENT_KNOBS->TASKBUCKET_TIMEOUT_JITTER_RANGE * g_random->random01()));
|
||||
|
||||
Subspace timeoutSpace = taskBucket->timeouts.get(task->timeoutVersion).get(taskUID);
|
||||
|
||||
for (auto & s : values) {
|
||||
Key param = taskAvailableSpace.unpack(s.key).getString(0);
|
||||
|
@ -220,6 +225,7 @@ public:
|
|||
return task;
|
||||
}
|
||||
|
||||
// Verify that the user configured task verification key still has the user specificied value
|
||||
ACTOR static Future<bool> taskVerify(Reference<TaskBucket> tb, Reference<ReadYourWritesTransaction> tr, Reference<Task> task) {
|
||||
|
||||
if (task->params.find(Task::reservedTaskParamValidKey) == task->params.end()) {
|
||||
|
@ -303,17 +309,54 @@ public:
|
|||
return result;
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> extendTimeoutRepeatedly(Database cx, Reference<TaskBucket> taskBucket, Reference<Task> task) {
|
||||
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
|
||||
state Version versionNow = wait(runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) {
|
||||
taskBucket->setOptions(tr);
|
||||
return map(tr->getReadVersion(), [=](Version v) {
|
||||
return v;
|
||||
});
|
||||
}));
|
||||
|
||||
loop {
|
||||
state FlowLock::Releaser releaser;
|
||||
|
||||
// Wait until we are half way to the timeout version of this task
|
||||
Void _ = wait(delay(0.8 * (BUGGIFY ? (2 * g_random->random01()) : 1.0) * (double)(task->timeoutVersion - (uint64_t)versionNow) / CLIENT_KNOBS->CORE_VERSIONSPERSECOND));
|
||||
|
||||
// Take the extendMutex lock until we either succeed or stop trying to extend due to failure
|
||||
Void _ = wait(task->extendMutex.take());
|
||||
releaser = FlowLock::Releaser(task->extendMutex, 1);
|
||||
|
||||
loop {
|
||||
try {
|
||||
tr->reset();
|
||||
taskBucket->setOptions(tr);
|
||||
|
||||
// Attempt to extend the task's timeout
|
||||
state Version newTimeout = wait(taskBucket->extendTimeout(tr, task, false));
|
||||
Void _ = wait(tr->commit());
|
||||
task->timeoutVersion = newTimeout;
|
||||
versionNow = tr->getCommittedVersion();
|
||||
break;
|
||||
} catch(Error &e) {
|
||||
Void _ = wait(tr->onError(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<bool> doTask(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
|
||||
if (!task || !TaskFuncBase::isValidTask(task))
|
||||
return false;
|
||||
|
||||
state Reference<TaskFuncBase> taskFunc;
|
||||
|
||||
try {
|
||||
state Reference<TaskFuncBase> taskFunc = TaskFuncBase::create(task->params[Task::reservedTaskParamKeyType]);
|
||||
taskFunc = TaskFuncBase::create(task->params[Task::reservedTaskParamKeyType]);
|
||||
if (taskFunc) {
|
||||
state bool verifyTask = (task->params.find(Task::reservedTaskParamValidKey) != task->params.end());
|
||||
|
||||
state Version versionNow;
|
||||
|
||||
if (verifyTask) {
|
||||
loop {
|
||||
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
|
||||
|
@ -330,8 +373,6 @@ public:
|
|||
Void _ = wait(tr->commit());
|
||||
return true;
|
||||
}
|
||||
Version ver = wait(tr->getReadVersion());
|
||||
versionNow = ver;
|
||||
break;
|
||||
}
|
||||
catch (Error &e) {
|
||||
|
@ -339,35 +380,8 @@ public:
|
|||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
state Reference<ReadYourWritesTransaction> tr2(new ReadYourWritesTransaction(cx));
|
||||
taskBucket->setOptions(tr2);
|
||||
Version ver = wait(tr2->getReadVersion());
|
||||
versionNow = ver;
|
||||
}
|
||||
|
||||
state Future<Void> run = taskFunc->execute(cx, taskBucket, futureBucket, task);
|
||||
// Convert timeout version of task to seconds using recent read version and versions_per_second
|
||||
state Future<Void> timeout = delay((BUGGIFY ? (2 * g_random->random01()) : 1.0) * (double)(task->timeout - (uint64_t)versionNow) / CLIENT_KNOBS->CORE_VERSIONSPERSECOND);
|
||||
|
||||
loop {
|
||||
choose {
|
||||
when(Void _ = wait(run)) {
|
||||
break;
|
||||
}
|
||||
|
||||
when(Void _ = wait(timeout)) {
|
||||
// Get read version, if it is greater than task timeout then return true because a task was run but it timed out.
|
||||
state Reference<ReadYourWritesTransaction> tr3(new ReadYourWritesTransaction(cx));
|
||||
taskBucket->setOptions(tr3);
|
||||
Version version = wait(tr3->getReadVersion());
|
||||
if(version >= task->timeout)
|
||||
throw timed_out();
|
||||
// Otherwise reset the timeout
|
||||
timeout = delay((BUGGIFY ? (2 * g_random->random01()) : 1.0) * (double)(task->timeout - (uint64_t)versionNow) / CLIENT_KNOBS->CORE_VERSIONSPERSECOND);
|
||||
}
|
||||
}
|
||||
}
|
||||
Void _ = wait(taskFunc->execute(cx, taskBucket, futureBucket, task) || extendTimeoutRepeatedly(cx, taskBucket, task));
|
||||
|
||||
if (BUGGIFY) Void _ = wait(delay(10.0));
|
||||
Void _ = wait(runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) {
|
||||
|
@ -380,6 +394,16 @@ public:
|
|||
.detail("TaskType", task->params[Task::reservedTaskParamKeyType].printable())
|
||||
.detail("Priority", task->getPriority())
|
||||
.error(e);
|
||||
try {
|
||||
state Error e2 = e;
|
||||
Void _ = wait(taskFunc->handleError(cx, task, e2));
|
||||
} catch(Error &e) {
|
||||
TraceEvent(SevWarn, "TB_ExecuteFailureLogErrorFailed")
|
||||
.detail("TaskUID", task->key.printable())
|
||||
.detail("TaskType", task->params[Task::reservedTaskParamKeyType].printable())
|
||||
.detail("Priority", task->getPriority())
|
||||
.error(e2);
|
||||
}
|
||||
}
|
||||
|
||||
// Return true to indicate that we did work.
|
||||
|
@ -449,16 +473,16 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> watchDisabled(Database cx, Reference<TaskBucket> taskBucket, Reference<AsyncVar<bool>> disabled) {
|
||||
ACTOR static Future<Void> watchPaused(Database cx, Reference<TaskBucket> taskBucket, Reference<AsyncVar<bool>> paused) {
|
||||
loop {
|
||||
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
|
||||
try {
|
||||
taskBucket->setOptions(tr);
|
||||
Optional<Value> disabledVal = wait(tr->get(taskBucket->disableKey));
|
||||
disabled->set(disabledVal.present());
|
||||
state Future<Void> watchDisabledFuture = tr->watch(taskBucket->disableKey);
|
||||
Optional<Value> pausedVal = wait(tr->get(taskBucket->pauseKey));
|
||||
paused->set(pausedVal.present());
|
||||
state Future<Void> watchPausedFuture = tr->watch(taskBucket->pauseKey);
|
||||
Void _ = wait(tr->commit());
|
||||
Void _ = wait(watchDisabledFuture);
|
||||
Void _ = wait(watchPausedFuture);
|
||||
}
|
||||
catch (Error &e) {
|
||||
Void _ = wait(tr->onError(e));
|
||||
|
@ -467,15 +491,15 @@ public:
|
|||
}
|
||||
|
||||
ACTOR static Future<Void> run(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, double *pollDelay, int maxConcurrentTasks) {
|
||||
state Reference<AsyncVar<bool>> disabled = Reference<AsyncVar<bool>>( new AsyncVar<bool>(true) );
|
||||
state Future<Void> watchDisabledFuture = watchDisabled(cx, taskBucket, disabled);
|
||||
state Reference<AsyncVar<bool>> paused = Reference<AsyncVar<bool>>( new AsyncVar<bool>(true) );
|
||||
state Future<Void> watchPausedFuture = watchPaused(cx, taskBucket, paused);
|
||||
|
||||
loop {
|
||||
while(disabled->get()) {
|
||||
Void _ = wait(disabled->onChange() || watchDisabledFuture);
|
||||
while(paused->get()) {
|
||||
Void _ = wait(paused->onChange() || watchPausedFuture);
|
||||
}
|
||||
|
||||
Void _ = wait(dispatch(cx, taskBucket, futureBucket, pollDelay, maxConcurrentTasks) || disabled->onChange() || watchDisabledFuture);
|
||||
Void _ = wait(dispatch(cx, taskBucket, futureBucket, pollDelay, maxConcurrentTasks) || paused->onChange() || watchPausedFuture);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -533,11 +557,12 @@ public:
|
|||
return false;
|
||||
}
|
||||
|
||||
// Verify that the task's keys are still in the timeout space at the expected timeout prefix
|
||||
ACTOR static Future<bool> isFinished(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> task) {
|
||||
taskBucket->setOptions(tr);
|
||||
|
||||
Tuple t;
|
||||
t.append(task->timeout);
|
||||
t.append(task->timeoutVersion);
|
||||
t.append(task->key);
|
||||
|
||||
Standalone<RangeResultRef> values = wait(tr->getRange(taskBucket->timeouts.range(t), 1));
|
||||
|
@ -658,7 +683,7 @@ public:
|
|||
task.params[param] = iter.value;
|
||||
}
|
||||
|
||||
// Move the final task to its new available keyspace. Safe if task == Task()
|
||||
// Move the final task, if complete, to its new available keyspace. Safe if task == Task()
|
||||
if(!values.more) {
|
||||
Subspace space = taskBucket->getAvailableSpace(task.getPriority()).get(task.key);
|
||||
for(auto &p : task.params)
|
||||
|
@ -693,27 +718,48 @@ public:
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<bool> saveAndExtend(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> task) {
|
||||
ACTOR static Future<Version> extendTimeout(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> task, bool updateParams, Version newTimeoutVersion) {
|
||||
taskBucket->setOptions(tr);
|
||||
|
||||
// First make sure it's safe to keep running
|
||||
bool keepRunning = wait(taskBucket->keepRunning(tr, task));
|
||||
if(!keepRunning)
|
||||
return false;
|
||||
Void _ = wait(taskBucket->keepRunning(tr, task));
|
||||
|
||||
// Clear old timeout keys
|
||||
KeyRange range = taskBucket->timeouts.range(Tuple().append(task->timeout).append(task->key));
|
||||
tr->clear(range);
|
||||
|
||||
// Update timeout and write new timeout keys
|
||||
// This is where the task definition currently exists
|
||||
state Subspace oldTimeoutSpace = taskBucket->timeouts.get(task->timeoutVersion).get(task->key);
|
||||
// Update the task's timeout
|
||||
Version version = wait(tr->getReadVersion());
|
||||
task->timeout = (uint64_t)version + taskBucket->timeout;
|
||||
Subspace timeoutSpace = taskBucket->timeouts.get(task->timeout).get(task->key);
|
||||
|
||||
for(auto &p : task->params)
|
||||
tr->set(timeoutSpace.pack(p.key), p.value);
|
||||
if(newTimeoutVersion == invalidVersion)
|
||||
newTimeoutVersion = version + taskBucket->timeout;
|
||||
else if(newTimeoutVersion <= version) // Ensure that the time extension is to the future
|
||||
newTimeoutVersion = version + 1;
|
||||
|
||||
return true;
|
||||
// This is where the task definition is being moved to
|
||||
state Subspace newTimeoutSpace = taskBucket->timeouts.get(newTimeoutVersion).get(task->key);
|
||||
|
||||
tr->addReadConflictRange(oldTimeoutSpace.range());
|
||||
tr->addWriteConflictRange(newTimeoutSpace.range());
|
||||
|
||||
// If we're updating the task params the clear the old space and write params to the new space
|
||||
if(updateParams) {
|
||||
TEST(true); // Extended a task while updating parameters
|
||||
for(auto &p : task->params) {
|
||||
tr->set(newTimeoutSpace.pack(p.key), p.value);
|
||||
}
|
||||
} else {
|
||||
TEST(true); // Extended a task without updating parameters
|
||||
// Otherwise, read and transplant the params from the old to new timeout spaces
|
||||
Standalone<RangeResultRef> params = wait(tr->getRange(oldTimeoutSpace.range(), CLIENT_KNOBS->TOO_MANY));
|
||||
for(auto &kv : params) {
|
||||
Tuple paramKey = oldTimeoutSpace.unpack(kv.key);
|
||||
tr->set(newTimeoutSpace.pack(paramKey), kv.value);
|
||||
}
|
||||
}
|
||||
|
||||
tr->clear(oldTimeoutSpace.range());
|
||||
|
||||
return newTimeoutVersion;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -723,7 +769,7 @@ TaskBucket::TaskBucket(const Subspace& subspace, bool sysAccess, bool priorityBa
|
|||
, available(prefix.get(LiteralStringRef("av")))
|
||||
, available_prioritized(prefix.get(LiteralStringRef("avp")))
|
||||
, timeouts(prefix.get(LiteralStringRef("to")))
|
||||
, disableKey(prefix.pack(LiteralStringRef("disable")))
|
||||
, pauseKey(prefix.pack(LiteralStringRef("pause")))
|
||||
, timeout(CLIENT_KNOBS->TASKBUCKET_TIMEOUT_VERSIONS)
|
||||
, system_access(sysAccess)
|
||||
, priority_batch(priorityBatch)
|
||||
|
@ -742,13 +788,13 @@ Future<Void> TaskBucket::clear(Reference<ReadYourWritesTransaction> tr){
|
|||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> TaskBucket::changeDisable(Reference<ReadYourWritesTransaction> tr, bool disable){
|
||||
Future<Void> TaskBucket::changePause(Reference<ReadYourWritesTransaction> tr, bool pause){
|
||||
setOptions(tr);
|
||||
|
||||
if(disable) {
|
||||
tr->set(disableKey, StringRef());
|
||||
if(pause) {
|
||||
tr->set(pauseKey, StringRef());
|
||||
} else {
|
||||
tr->clear(disableKey);
|
||||
tr->clear(pauseKey);
|
||||
}
|
||||
|
||||
return Void();
|
||||
|
@ -759,7 +805,17 @@ Key TaskBucket::addTask(Reference<ReadYourWritesTransaction> tr, Reference<Task>
|
|||
|
||||
Key key(g_random->randomUniqueID().toString());
|
||||
|
||||
Subspace taskSpace = getAvailableSpace(task->getPriority()).get(key);
|
||||
Subspace taskSpace;
|
||||
|
||||
// If scheduledVersion is valid then place the task directly into the timeout
|
||||
// space for its scheduled time, otherwise place it in the available space by priority.
|
||||
Version scheduledVersion = ReservedTaskParams.scheduledVersion().getOrDefault(task, invalidVersion);
|
||||
if(scheduledVersion != invalidVersion) {
|
||||
taskSpace = timeouts.get(scheduledVersion).get(key);
|
||||
}
|
||||
else {
|
||||
taskSpace = getAvailableSpace(task->getPriority()).get(key);
|
||||
}
|
||||
|
||||
for (auto & param : task->params)
|
||||
tr->set(taskSpace.pack(param.key), param.value);
|
||||
|
@ -818,8 +874,8 @@ Future<Void> TaskBucket::run(Database cx, Reference<FutureBucket> futureBucket,
|
|||
return TaskBucketImpl::run(cx, Reference<TaskBucket>::addRef(this), futureBucket, pollDelay, maxConcurrentTasks);
|
||||
}
|
||||
|
||||
Future<Void> TaskBucket::watchDisabled(Database cx, Reference<AsyncVar<bool>> disabled) {
|
||||
return TaskBucketImpl::watchDisabled(cx, Reference<TaskBucket>::addRef(this), disabled);
|
||||
Future<Void> TaskBucket::watchPaused(Database cx, Reference<AsyncVar<bool>> paused) {
|
||||
return TaskBucketImpl::watchPaused(cx, Reference<TaskBucket>::addRef(this), paused);
|
||||
}
|
||||
|
||||
Future<bool> TaskBucket::isEmpty(Reference<ReadYourWritesTransaction> tr){
|
||||
|
@ -830,7 +886,7 @@ Future<Void> TaskBucket::finish(Reference<ReadYourWritesTransaction> tr, Referen
|
|||
setOptions(tr);
|
||||
|
||||
Tuple t;
|
||||
t.append(task->timeout);
|
||||
t.append(task->timeoutVersion);
|
||||
t.append(task->key);
|
||||
|
||||
tr->atomicOp(prefix.pack(LiteralStringRef("task_count")), LiteralStringRef("\xff\xff\xff\xff\xff\xff\xff\xff"), MutationRef::AddValue);
|
||||
|
@ -839,8 +895,8 @@ Future<Void> TaskBucket::finish(Reference<ReadYourWritesTransaction> tr, Referen
|
|||
return Void();
|
||||
}
|
||||
|
||||
Future<bool> TaskBucket::saveAndExtend(Reference<ReadYourWritesTransaction> tr, Reference<Task> task) {
|
||||
return TaskBucketImpl::saveAndExtend(tr, Reference<TaskBucket>::addRef(this), task);
|
||||
Future<Version> TaskBucket::extendTimeout(Reference<ReadYourWritesTransaction> tr, Reference<Task> task, bool updateParams, Version newTimeoutVersion) {
|
||||
return TaskBucketImpl::extendTimeout(tr, Reference<TaskBucket>::addRef(this), task, updateParams, newTimeoutVersion);
|
||||
}
|
||||
|
||||
Future<bool> TaskBucket::isFinished(Reference<ReadYourWritesTransaction> tr, Reference<Task> task){
|
||||
|
@ -1006,13 +1062,28 @@ public:
|
|||
Standalone<RangeResultRef> values = wait(tr->getRange(taskFuture->callbacks.range(), CLIENT_KNOBS->TOO_MANY));
|
||||
tr->clear(taskFuture->callbacks.range());
|
||||
|
||||
state Reference<Task> task(new Task());
|
||||
for (auto & s : values) {
|
||||
Key key = taskFuture->callbacks.unpack(s.key).getString(1);
|
||||
task->params[key] = s.value;
|
||||
std::vector<Future<Void>> actions;
|
||||
|
||||
if(values.size() != 0) {
|
||||
state Reference<Task> task(new Task());
|
||||
Key lastTaskID;
|
||||
for (auto & s : values) {
|
||||
Tuple t = taskFuture->callbacks.unpack(s.key);
|
||||
Key taskID = t.getString(0);
|
||||
Key key = t.getString(1);
|
||||
// If we see a new task ID and the old one isn't empty then process the task accumulated so far and make a new task
|
||||
if(taskID.size() != 0 && taskID != lastTaskID) {
|
||||
actions.push_back(performAction(tr, taskBucket, taskFuture, task));
|
||||
task = Reference<Task>(new Task());
|
||||
}
|
||||
task->params[key] = s.value;
|
||||
lastTaskID = taskID;
|
||||
}
|
||||
// Process the last task
|
||||
actions.push_back(performAction(tr, taskBucket, taskFuture, task));
|
||||
}
|
||||
|
||||
Void _ = wait(performAction(tr, taskBucket, taskFuture, task));
|
||||
Void _ = wait(waitForAll(actions));
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
|
|
@ -24,11 +24,16 @@
|
|||
|
||||
#include "flow/flow.h"
|
||||
#include "flow/IDispatched.h"
|
||||
#include "flow/genericactors.actor.h"
|
||||
|
||||
#include "FDBTypes.h"
|
||||
#include "NativeAPI.h"
|
||||
#include "RunTransaction.actor.h"
|
||||
#include "Subspace.h"
|
||||
#include "KeyBackedTypes.h"
|
||||
|
||||
class FutureBucket;
|
||||
class TaskFuture;
|
||||
|
||||
class Task : public ReferenceCounted<Task> {
|
||||
public:
|
||||
|
@ -40,10 +45,14 @@ public:
|
|||
unsigned int getPriority() const;
|
||||
|
||||
Key key;
|
||||
uint64_t timeout;
|
||||
Version timeoutVersion;
|
||||
|
||||
// Take this lock while you don't want Taskbucket to try to extend your task's timeout
|
||||
FlowLock extendMutex;
|
||||
|
||||
Map<Key, Value> params; // SOMEDAY: use one arena?
|
||||
|
||||
// New reserved task parameter keys should be added in ReservedTaskParams below instead of here.
|
||||
static Key reservedTaskParamKeyPriority;
|
||||
static Key reservedTaskParamKeyType;
|
||||
static Key reservedTaskParamKeyAddTask;
|
||||
|
@ -53,8 +62,45 @@ public:
|
|||
static Key reservedTaskParamKeyVersion;
|
||||
static Key reservedTaskParamValidKey;
|
||||
static Key reservedTaskParamValidValue;
|
||||
|
||||
Reference<TaskFuture> getDoneFuture(Reference<FutureBucket> fb);
|
||||
|
||||
std::string toString() const {
|
||||
std::string s = format("TASK [key=%s timeoutVersion=%lld ", key.printable().c_str(), timeoutVersion);
|
||||
for(auto ¶m : params)
|
||||
s.append(format("%s=%s ", param.key.printable().c_str(), param.value.printable().c_str()));
|
||||
s.append("]");
|
||||
return s;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class TaskParam {
|
||||
public:
|
||||
TaskParam(StringRef key) : key(key) {}
|
||||
T get(Reference<Task> task) const {
|
||||
return Codec<T>::unpack(Tuple::unpack(task->params[key]));
|
||||
}
|
||||
void set(Reference<Task> task, T const &val) const {
|
||||
task->params[key] = Codec<T>::pack(val).pack();
|
||||
}
|
||||
bool exists(Reference<Task> task) const {
|
||||
return task->params.find(key) != task->params.end();
|
||||
}
|
||||
T getOrDefault(Reference<Task> task, const T defaultValue = T()) const {
|
||||
if(!exists(task))
|
||||
return defaultValue;
|
||||
return get(task);
|
||||
}
|
||||
StringRef key;
|
||||
};
|
||||
|
||||
struct {
|
||||
TaskParam<Version> scheduledVersion() {
|
||||
return LiteralStringRef(__FUNCTION__);
|
||||
}
|
||||
} ReservedTaskParams;
|
||||
|
||||
class FutureBucket;
|
||||
|
||||
class TaskBucket : public ReferenceCounted<TaskBucket> {
|
||||
|
@ -69,9 +115,9 @@ public:
|
|||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
}
|
||||
|
||||
Future<Void> changeDisable(Reference<ReadYourWritesTransaction> tr, bool disable);
|
||||
Future<Void> changeDisable(Database cx, bool disable) {
|
||||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr){ return changeDisable(tr, disable); });
|
||||
Future<Void> changePause(Reference<ReadYourWritesTransaction> tr, bool pause);
|
||||
Future<Void> changePause(Database cx, bool pause) {
|
||||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr){ return changePause(tr, pause); });
|
||||
}
|
||||
|
||||
Future<Void> clear(Reference<ReadYourWritesTransaction> tr);
|
||||
|
@ -80,10 +126,18 @@ public:
|
|||
}
|
||||
|
||||
// Transactions inside an execute() function should call this and stop without committing if it returns false.
|
||||
Future<bool> keepRunning(Reference<ReadYourWritesTransaction> tr, Reference<Task> task) {
|
||||
Future<Void> keepRunning(Reference<ReadYourWritesTransaction> tr, Reference<Task> task) {
|
||||
Future<bool> finished = isFinished(tr, task);
|
||||
Future<bool> valid = isVerified(tr, task);
|
||||
return map(success(finished) && success(valid), [=](Void) -> bool { return !finished.get() && valid.get(); } );
|
||||
return map(success(finished) && success(valid), [=](Void) {
|
||||
if(finished.get() || !valid.get()) {
|
||||
throw task_interrupted();
|
||||
}
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
Future<Void> keepRunning(Database cx, Reference<Task> task) {
|
||||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr){ return keepRunning(tr, task); });
|
||||
}
|
||||
|
||||
static void setValidationCondition(Reference<Task> task, KeyRef vKey, KeyRef vValue);
|
||||
|
@ -104,7 +158,7 @@ public:
|
|||
Future<bool> doOne(Database cx, Reference<FutureBucket> futureBucket);
|
||||
|
||||
Future<Void> run(Database cx, Reference<FutureBucket> futureBucket, double *pollDelay, int maxConcurrentTasks);
|
||||
Future<Void> watchDisabled(Database cx, Reference<AsyncVar<bool>> disabled);
|
||||
Future<Void> watchPaused(Database cx, Reference<AsyncVar<bool>> paused);
|
||||
|
||||
Future<bool> isEmpty(Reference<ReadYourWritesTransaction> tr);
|
||||
Future<bool> isEmpty(Database cx){
|
||||
|
@ -117,9 +171,16 @@ public:
|
|||
}
|
||||
|
||||
// Extend the task's timeout as if it just started and also save any parameter changes made to the task
|
||||
Future<bool> saveAndExtend(Reference<ReadYourWritesTransaction> tr, Reference<Task> task);
|
||||
Future<bool> saveAndExtend(Database cx, Reference<Task> task){
|
||||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr){ return saveAndExtend(tr, task); });
|
||||
Future<Version> extendTimeout(Reference<ReadYourWritesTransaction> tr, Reference<Task> task, bool updateParams, Version newTimeoutVersion = invalidVersion);
|
||||
Future<Void> extendTimeout(Database cx, Reference<Task> task, bool updateParams, Version newTimeoutVersion = invalidVersion){
|
||||
return map(
|
||||
runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) {
|
||||
return extendTimeout(tr, task, updateParams, newTimeoutVersion);
|
||||
}),
|
||||
[=](Version v) {
|
||||
task->timeoutVersion = v;
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
|
||||
Future<bool> isFinished(Reference<ReadYourWritesTransaction> tr, Reference<Task> task);
|
||||
|
@ -154,8 +215,8 @@ public:
|
|||
return lock_aware;
|
||||
}
|
||||
|
||||
Key getDisableKey() const {
|
||||
return disableKey;
|
||||
Key getPauseKey() const {
|
||||
return pauseKey;
|
||||
}
|
||||
|
||||
Subspace getAvailableSpace(int priority = 0) {
|
||||
|
@ -175,7 +236,7 @@ private:
|
|||
|
||||
Subspace prefix;
|
||||
Subspace active;
|
||||
Key disableKey;
|
||||
Key pauseKey;
|
||||
|
||||
// Available task subspaces. Priority 0, the default, will be under available which is backward
|
||||
// compatible with pre-priority TaskBucket processes. Priority 1 and higher will be in
|
||||
|
@ -318,6 +379,10 @@ struct TaskFuncBase : IDispatched<TaskFuncBase, Standalone<StringRef>, std::func
|
|||
// *Database* operations here are exactly once; side effects are at least once; excessive time here may prevent task from finishing!
|
||||
virtual Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) = 0;
|
||||
|
||||
virtual Future<Void> handleError(Database cx, Reference<Task> task, Error const &error) {
|
||||
return Void();
|
||||
}
|
||||
|
||||
template <class TaskFuncBaseType>
|
||||
struct Factory {
|
||||
static TaskFuncBase* create() {
|
||||
|
@ -348,7 +413,7 @@ struct TaskCompletionKey {
|
|||
static TaskCompletionKey noSignal() {
|
||||
return TaskCompletionKey(StringRef());
|
||||
}
|
||||
|
||||
TaskCompletionKey() {}
|
||||
private:
|
||||
TaskCompletionKey(Reference<TaskFuture> f) : joinFuture(f) { }
|
||||
TaskCompletionKey(Key k) : key(k) { }
|
||||
|
|
|
@ -223,7 +223,7 @@ private:
|
|||
return Void();
|
||||
|
||||
// Wait for an upload slot to be available
|
||||
Void _ = wait(f->m_concurrentUploads.take(1));
|
||||
Void _ = wait(f->m_concurrentUploads.take());
|
||||
|
||||
// Do the upload, and if it fails forward errors to m_error and also stop if anything else sends an error to m_error
|
||||
// Also, hold a releaser for the concurrent upload slot while all that is going on.
|
||||
|
|
|
@ -45,7 +45,7 @@ public:
|
|||
|
||||
// Read from the underlying file to a CacheBlock
|
||||
ACTOR static Future<Reference<CacheBlock>> readBlock(AsyncFileReadAheadCache *f, int length, int64_t offset) {
|
||||
Void _ = wait(f->m_max_concurrent_reads.take(1));
|
||||
Void _ = wait(f->m_max_concurrent_reads.take());
|
||||
|
||||
state Reference<CacheBlock> block(new CacheBlock(length));
|
||||
try {
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include "libb64/encode.h"
|
||||
#include "sha1/SHA1.h"
|
||||
#include "time.h"
|
||||
#include "fdbclient/json_spirit/json_spirit_reader_template.h"
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
#include <boost/algorithm/string/classification.hpp>
|
||||
|
||||
|
@ -51,6 +50,7 @@ BlobStoreEndpoint::Stats BlobStoreEndpoint::s_stats;
|
|||
BlobStoreEndpoint::BlobKnobs::BlobKnobs() {
|
||||
connect_tries = CLIENT_KNOBS->BLOBSTORE_CONNECT_TRIES;
|
||||
connect_timeout = CLIENT_KNOBS->BLOBSTORE_CONNECT_TIMEOUT;
|
||||
max_connection_life = CLIENT_KNOBS->BLOBSTORE_MAX_CONNECTION_LIFE;
|
||||
request_tries = CLIENT_KNOBS->BLOBSTORE_REQUEST_TRIES;
|
||||
request_timeout = CLIENT_KNOBS->BLOBSTORE_REQUEST_TIMEOUT;
|
||||
requests_per_second = CLIENT_KNOBS->BLOBSTORE_REQUESTS_PER_SECOND;
|
||||
|
@ -65,14 +65,13 @@ BlobStoreEndpoint::BlobKnobs::BlobKnobs() {
|
|||
read_cache_blocks_per_file = CLIENT_KNOBS->BLOBSTORE_READ_CACHE_BLOCKS_PER_FILE;
|
||||
max_send_bytes_per_second = CLIENT_KNOBS->BLOBSTORE_MAX_RECV_BYTES_PER_SECOND;
|
||||
max_recv_bytes_per_second = CLIENT_KNOBS->BLOBSTORE_MAX_SEND_BYTES_PER_SECOND;
|
||||
buckets_to_span = CLIENT_KNOBS->BLOBSTORE_BACKUP_BUCKETS;
|
||||
}
|
||||
|
||||
bool BlobStoreEndpoint::BlobKnobs::set(StringRef name, int value) {
|
||||
#define TRY_PARAM(n, sn) if(name == LiteralStringRef(#n) || name == LiteralStringRef(#sn)) { n = value; return true; }
|
||||
TRY_PARAM(buckets_to_span, bts);
|
||||
TRY_PARAM(connect_tries, ct);
|
||||
TRY_PARAM(connect_timeout, cto);
|
||||
TRY_PARAM(max_connection_life, mcl);
|
||||
TRY_PARAM(request_tries, rt);
|
||||
TRY_PARAM(request_timeout, rto);
|
||||
TRY_PARAM(requests_per_second, rps);
|
||||
|
@ -96,9 +95,9 @@ std::string BlobStoreEndpoint::BlobKnobs::getURLParameters() const {
|
|||
static BlobKnobs defaults;
|
||||
std::string r;
|
||||
#define _CHECK_PARAM(n, sn) if(n != defaults. n) { r += format("%s%s=%d", r.empty() ? "" : "&", #sn, n); }
|
||||
_CHECK_PARAM(buckets_to_span, bts);
|
||||
_CHECK_PARAM(connect_tries, ct);
|
||||
_CHECK_PARAM(connect_timeout, cto);
|
||||
_CHECK_PARAM(max_connection_life, mcl);
|
||||
_CHECK_PARAM(request_tries, rt);
|
||||
_CHECK_PARAM(request_timeout, rto);
|
||||
_CHECK_PARAM(requests_per_second, rps);
|
||||
|
@ -147,34 +146,16 @@ Reference<BlobStoreEndpoint> BlobStoreEndpoint::fromString(std::string const &ur
|
|||
throw std::string("Invalid blobstore URL.");
|
||||
StringRef key = t.tok(":");
|
||||
StringRef secret = t.tok("@");
|
||||
StringRef hosts = t.tok(":");
|
||||
StringRef port = t.tok("/");
|
||||
StringRef hostPort = t.tok("/");
|
||||
StringRef resource = t.tok("?");
|
||||
|
||||
// The host/ip list can have up to one text hostname, which should be first, and then 0 or more IP addresses.
|
||||
// The items are comma separated.
|
||||
std::string host;
|
||||
std::vector<NetworkAddress> addrs;
|
||||
char *end;
|
||||
uint16_t portNum = (uint16_t)strtoul(port.toString().c_str(), &end, 10);
|
||||
if(*end) {
|
||||
throw format("%s is not a valid port", port.toString().c_str());
|
||||
}
|
||||
// hostPort is at least a host or IP address, optionally followed by :portNumber or :serviceName
|
||||
tokenizer h(hostPort);
|
||||
StringRef host = h.tok(":");
|
||||
if(host.size() == 0)
|
||||
throw std::string("host cannot be empty");
|
||||
|
||||
tokenizer h(hosts);
|
||||
while(1) {
|
||||
StringRef item = h.tok(",");
|
||||
if(item.size() == 0)
|
||||
break;
|
||||
// Try to parse item as an IP with the given port, if it fails it will throw so then store it as host
|
||||
try {
|
||||
// Use an integer port number so the parse doesn't fail due to the port number being garbage
|
||||
NetworkAddress na = NetworkAddress::parse(format("%s:%d", item.toString().c_str(), (int)portNum));
|
||||
addrs.push_back(na);
|
||||
} catch(Error &e) {
|
||||
host = item.toString();
|
||||
}
|
||||
}
|
||||
StringRef service = h.s;
|
||||
|
||||
BlobKnobs knobs;
|
||||
while(1) {
|
||||
|
@ -193,59 +174,29 @@ Reference<BlobStoreEndpoint> BlobStoreEndpoint::fromString(std::string const &ur
|
|||
if(resourceFromURL != nullptr)
|
||||
*resourceFromURL = resource.toString();
|
||||
|
||||
return Reference<BlobStoreEndpoint>(new BlobStoreEndpoint(host, addrs, portNum, key.toString(), secret.toString(), knobs));
|
||||
return Reference<BlobStoreEndpoint>(new BlobStoreEndpoint(host.toString(), service.toString(), key.toString(), secret.toString(), knobs));
|
||||
|
||||
} catch(std::string &err) {
|
||||
if(error != nullptr)
|
||||
*error = err;
|
||||
TraceEvent(SevWarnAlways, "BlobStoreEndpointBadURL").detail("Description", err).detail("Format", getURLFormat()).detail("URL", url);
|
||||
throw file_not_found();
|
||||
throw backup_invalid_url();
|
||||
}
|
||||
}
|
||||
|
||||
std::string BlobStoreEndpoint::getResourceURL(std::string resource) {
|
||||
std::string hosts = host;
|
||||
for(auto &na : addresses) {
|
||||
if(hosts.size() != 0)
|
||||
hosts.append(",");
|
||||
hosts.append(toIPString(na.ip));
|
||||
std::string hostPort = host;
|
||||
if(!service.empty()) {
|
||||
hostPort.append(":");
|
||||
hostPort.append(service);
|
||||
}
|
||||
std::string r = format("blobstore://%s:%s@%s:%d/%s", key.c_str(), secret.c_str(), hosts.c_str(), (int)port, resource.c_str());
|
||||
std::string r = format("blobstore://%s:%s@%s/%s", key.c_str(), secret.c_str(), hostPort.c_str(), resource.c_str());
|
||||
std::string p = knobs.getURLParameters();
|
||||
if(!p.empty())
|
||||
r.append("?").append(p);
|
||||
return r;
|
||||
}
|
||||
|
||||
ACTOR Future<Void> resolveHostname_impl(Reference<BlobStoreEndpoint> bstore) {
|
||||
state std::vector<uint32_t> ip_addresses;
|
||||
|
||||
// TODO: Resolve host to get list of IPs into ip_addresses. Ideally this should be done using
|
||||
// boost asio so that backup agents can re-resolve a blobstore endpoint if none of the IP addresses
|
||||
// are reachable for some long amount of time that exceeds time-to-live for the hostname.
|
||||
// However, if it is done using blocking calls instead then the command line tools that use
|
||||
// blobstore URLs will have to resolve them at a safe time during input parsing / checking.
|
||||
//
|
||||
|
||||
// Don't modify the existing IP address list if resolution did not work
|
||||
if(ip_addresses.empty())
|
||||
return Void();
|
||||
|
||||
// Resolve was successful so replace addresses with the new IPs found.
|
||||
bstore->addresses.clear();
|
||||
for(auto &ip : ip_addresses)
|
||||
bstore->addresses.push_back(NetworkAddress(ip, bstore->port));
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> BlobStoreEndpoint::resolveHostname(bool only_if_unresolved) {
|
||||
if(only_if_unresolved && !addresses.empty())
|
||||
return Void();
|
||||
|
||||
return resolveHostname_impl(Reference<BlobStoreEndpoint>::addRef(this));
|
||||
}
|
||||
|
||||
ACTOR Future<bool> objectExists_impl(Reference<BlobStoreEndpoint> b, std::string bucket, std::string object) {
|
||||
std::string resource = std::string("/") + bucket + "/" + object;
|
||||
HTTP::Headers headers;
|
||||
|
@ -261,7 +212,7 @@ Future<bool> BlobStoreEndpoint::objectExists(std::string const &bucket, std::str
|
|||
ACTOR Future<Void> deleteObject_impl(Reference<BlobStoreEndpoint> b, std::string bucket, std::string object) {
|
||||
std::string resource = std::string("/") + bucket + "/" + object;
|
||||
HTTP::Headers headers;
|
||||
Reference<HTTP::Response> r = wait(b->doRequest("DELETE", resource, headers, NULL, 0, {200, 404}));
|
||||
Reference<HTTP::Response> r = wait(b->doRequest("DELETE", resource, headers, NULL, 0, {200, 204, 404}));
|
||||
// 200 means object deleted, 404 means it doesn't exist already, so either success code passed above is fine.
|
||||
return Void();
|
||||
}
|
||||
|
@ -271,19 +222,23 @@ Future<Void> BlobStoreEndpoint::deleteObject(std::string const &bucket, std::str
|
|||
}
|
||||
|
||||
ACTOR Future<Void> deleteBucket_impl(Reference<BlobStoreEndpoint> b, std::string bucket, int *pNumDeleted) {
|
||||
state PromiseStream<BlobStoreEndpoint::ObjectInfo> resultStream;
|
||||
state Future<Void> done = b->getBucketContentsStream(bucket, resultStream);
|
||||
state PromiseStream<BlobStoreEndpoint::ListResult> resultStream;
|
||||
state Future<Void> done = b->listBucketStream(bucket, resultStream);
|
||||
state std::vector<Future<Void>> deleteFutures;
|
||||
loop {
|
||||
choose {
|
||||
when(Void _ = wait(done)) {
|
||||
break;
|
||||
}
|
||||
when(BlobStoreEndpoint::ObjectInfo info = waitNext(resultStream.getFuture())) {
|
||||
if(pNumDeleted == nullptr)
|
||||
deleteFutures.push_back(b->deleteObject(bucket, info.name));
|
||||
else
|
||||
deleteFutures.push_back(map(b->deleteObject(bucket, info.name), [this](Void) -> Void { ++*pNumDeleted; return Void(); }));
|
||||
when(BlobStoreEndpoint::ListResult list = waitNext(resultStream.getFuture())) {
|
||||
for(auto &object : list.objects) {
|
||||
int *pNumDeletedCopy = pNumDeleted; // avoid capture of this
|
||||
deleteFutures.push_back(map(b->deleteObject(bucket, object.name), [pNumDeletedCopy](Void) -> Void {
|
||||
if(pNumDeletedCopy != nullptr)
|
||||
++*pNumDeletedCopy;
|
||||
return Void();
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -296,6 +251,18 @@ Future<Void> BlobStoreEndpoint::deleteBucket(std::string const &bucket, int *pNu
|
|||
return deleteBucket_impl(Reference<BlobStoreEndpoint>::addRef(this), bucket, pNumDeleted);
|
||||
}
|
||||
|
||||
ACTOR Future<Void> createBucket_impl(Reference<BlobStoreEndpoint> b, std::string bucket) {
|
||||
std::string resource = std::string("/") + bucket;
|
||||
HTTP::Headers headers;
|
||||
|
||||
Reference<HTTP::Response> r = wait(b->doRequest("PUT", resource, headers, NULL, 0, {200, 409}));
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> BlobStoreEndpoint::createBucket(std::string const &bucket) {
|
||||
return createBucket_impl(Reference<BlobStoreEndpoint>::addRef(this), bucket);
|
||||
}
|
||||
|
||||
ACTOR Future<int64_t> objectSize_impl(Reference<BlobStoreEndpoint> b, std::string bucket, std::string object) {
|
||||
std::string resource = std::string("/") + bucket + "/" + object;
|
||||
HTTP::Headers headers;
|
||||
|
@ -308,22 +275,36 @@ Future<int64_t> BlobStoreEndpoint::objectSize(std::string const &bucket, std::st
|
|||
return objectSize_impl(Reference<BlobStoreEndpoint>::addRef(this), bucket, object);
|
||||
}
|
||||
|
||||
Future<Reference<IConnection>> BlobStoreEndpoint::connect( NetworkAddress address ) {
|
||||
auto &pool = connectionPool[address];
|
||||
Future<BlobStoreEndpoint::ReusableConnection> BlobStoreEndpoint::connect() {
|
||||
// First try to get a connection from the pool
|
||||
while(!connectionPool.empty()) {
|
||||
ReusableConnection rconn = connectionPool.front();
|
||||
connectionPool.pop();
|
||||
|
||||
while(!pool.empty()) {
|
||||
BlobStoreEndpoint::ConnPoolEntry c = pool.front();
|
||||
pool.pop_front();
|
||||
|
||||
// If the connection was placed in the pool less than 10 seconds ago, reuse it.
|
||||
if(c.second > now() - 10) {
|
||||
//printf("Reusing blob store connection\n");
|
||||
return c.first;
|
||||
// If the connection expires in the future then return it
|
||||
if(rconn.expirationTime > now()) {
|
||||
TraceEvent("BlobStoreEndpointReusingConnected")
|
||||
.detail("RemoteEndpoint", rconn.conn->getPeerAddress())
|
||||
.detail("ExpiresIn", rconn.expirationTime - now())
|
||||
.suppressFor(5, true);
|
||||
return rconn;
|
||||
}
|
||||
}
|
||||
|
||||
TraceEvent(SevInfo, "BlobStoreHTTPConnect").detail("RemoteEndpoint", address).suppressFor(5.0, true);
|
||||
return INetworkConnections::net()->connect(address);
|
||||
return map(INetworkConnections::net()->connect(host, service.empty() ? "http" : service), [=] (Reference<IConnection> conn) {
|
||||
TraceEvent("BlobStoreEndpointNewConnection")
|
||||
.detail("RemoteEndpoint", conn->getPeerAddress())
|
||||
.detail("ExpiresIn", knobs.max_connection_life)
|
||||
.suppressFor(5, true);;
|
||||
return ReusableConnection({conn, now() + knobs.max_connection_life});
|
||||
});
|
||||
}
|
||||
|
||||
void BlobStoreEndpoint::returnConnection(ReusableConnection &rconn) {
|
||||
// If it expires in the future then add it to the pool in the front
|
||||
if(rconn.expirationTime > now())
|
||||
connectionPool.push(rconn);
|
||||
rconn.conn = Reference<IConnection>();
|
||||
}
|
||||
|
||||
// Do a request, get a Response.
|
||||
|
@ -336,25 +317,21 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<BlobStoreEndpoi
|
|||
if(contentLen > 0)
|
||||
headers["Content-Length"] = format("%d", contentLen);
|
||||
|
||||
Void _ = wait(bstore->concurrentRequests.take(1));
|
||||
headers["Host"] = bstore->host;
|
||||
Void _ = wait(bstore->concurrentRequests.take());
|
||||
state FlowLock::Releaser globalReleaser(bstore->concurrentRequests, 1);
|
||||
|
||||
state int maxTries = std::min(bstore->knobs.request_tries, bstore->knobs.connect_tries);
|
||||
state int thisTry = 1;
|
||||
state double nextRetryDelay = 2.0;
|
||||
state NetworkAddress address;
|
||||
|
||||
loop {
|
||||
state Optional<Error> err;
|
||||
try {
|
||||
// Pick an adress
|
||||
address = bstore->addresses[g_random->randomInt(0, bstore->addresses.size())];
|
||||
state FlowLock::Releaser perAddressReleaser;
|
||||
Void _ = wait(bstore->concurrentRequestsPerAddress[address]->take(1));
|
||||
perAddressReleaser = FlowLock::Releaser(*bstore->concurrentRequestsPerAddress[address], 1);
|
||||
state Optional<NetworkAddress> remoteAddress;
|
||||
|
||||
try {
|
||||
// Start connecting
|
||||
Future<Reference<IConnection>> fconn = bstore->connect(address);
|
||||
Future<BlobStoreEndpoint::ReusableConnection> frconn = bstore->connect();
|
||||
|
||||
// Finish/update the request headers (which includes Date header)
|
||||
bstore->setAuthHeaders(verb, resource, headers);
|
||||
|
@ -374,14 +351,17 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<BlobStoreEndpoi
|
|||
}
|
||||
|
||||
// Finish connecting, do request
|
||||
state Reference<IConnection> conn = wait(timeoutError(fconn, bstore->knobs.connect_timeout));
|
||||
state BlobStoreEndpoint::ReusableConnection rconn = wait(timeoutError(frconn, bstore->knobs.connect_timeout));
|
||||
remoteAddress = rconn.conn->getPeerAddress();
|
||||
Void _ = wait(bstore->requestRate->getAllowance(1));
|
||||
state Reference<HTTP::Response> r = wait(timeoutError(HTTP::doRequest(conn, verb, resource, headers, &contentCopy, contentLen, bstore->sendRate, &bstore->s_stats.bytes_sent, bstore->recvRate), bstore->knobs.request_timeout));
|
||||
state Reference<HTTP::Response> r = wait(timeoutError(HTTP::doRequest(rconn.conn, verb, resource, headers, &contentCopy, contentLen, bstore->sendRate, &bstore->s_stats.bytes_sent, bstore->recvRate), bstore->knobs.request_timeout));
|
||||
r->convertToJSONifXML();
|
||||
|
||||
// Since the response was parsed successfully (which is why we are here) reuse the connection unless we received the "Connection: close" header.
|
||||
if(r->headers["Connection"] != "close")
|
||||
bstore->connectionPool[address].push_back(BlobStoreEndpoint::ConnPoolEntry(conn, now()));
|
||||
conn.clear();
|
||||
bstore->returnConnection(rconn);
|
||||
rconn.conn.clear();
|
||||
|
||||
} catch(Error &e) {
|
||||
// For timeouts, conn failure, or bad reponse reported by HTTP:doRequest, save the error and handle it / possibly retry below.
|
||||
// Any other error is rethrown.
|
||||
|
@ -409,8 +389,12 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<BlobStoreEndpoi
|
|||
|
||||
TraceEvent event(SevWarn, retryable ? "BlobStoreEndpointRequestFailedRetryable" : "BlobStoreEndpointRequestFailed");
|
||||
|
||||
event.detail("RemoteEndpoint", address)
|
||||
.detail("Verb", verb)
|
||||
if(remoteAddress.present())
|
||||
event.detail("RemoteEndpoint", remoteAddress.get());
|
||||
else
|
||||
event.detail("RemoteHost", bstore->host);
|
||||
|
||||
event.detail("Verb", verb)
|
||||
.detail("Resource", resource)
|
||||
.detail("ThisTry", thisTry)
|
||||
.suppressFor(5, true);
|
||||
|
@ -459,9 +443,16 @@ Future<Reference<HTTP::Response>> BlobStoreEndpoint::doRequest(std::string const
|
|||
return doRequest_impl(Reference<BlobStoreEndpoint>::addRef(this), verb, resource, headers, pContent, contentLen, successCodes);
|
||||
}
|
||||
|
||||
ACTOR Future<Void> getBucketContentsStream_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket, PromiseStream<BlobStoreEndpoint::ObjectInfo> results) {
|
||||
ACTOR Future<Void> listBucketStream_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket, PromiseStream<BlobStoreEndpoint::ListResult> results, Optional<std::string> prefix, Optional<char> delimiter) {
|
||||
// Request 1000 keys at a time, the maximum allowed
|
||||
state std::string resource = std::string("/") + bucket + "/?max-keys=1000&marker=";
|
||||
state std::string resource = "/";
|
||||
resource.append(bucket);
|
||||
resource.append("/?max-keys=1000");
|
||||
if(prefix.present())
|
||||
resource.append("&prefix=").append(HTTP::urlEncode(prefix.get()));
|
||||
if(delimiter.present())
|
||||
resource.append("&delimiter=").append(HTTP::urlEncode(std::string(1, delimiter.get())));
|
||||
resource.append("&marker=");
|
||||
state std::string lastFile;
|
||||
state bool more = true;
|
||||
|
||||
|
@ -470,32 +461,58 @@ ACTOR Future<Void> getBucketContentsStream_impl(Reference<BlobStoreEndpoint> bst
|
|||
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", resource + HTTP::urlEncode(lastFile), headers, NULL, 0, {200}));
|
||||
|
||||
try {
|
||||
BlobStoreEndpoint::ListResult result;
|
||||
// Parse the json assuming it is valid and contains the right stuff. If any exceptions are thrown, throw http_bad_response
|
||||
json_spirit::Value json;
|
||||
json_spirit::mValue json;
|
||||
json_spirit::read_string(r->content, json);
|
||||
for(auto &i : json.get_obj()) {
|
||||
if(i.name_ == "truncated") {
|
||||
more = i.value_.get_bool();
|
||||
}
|
||||
else if(i.name_ == "results") {
|
||||
BlobStoreEndpoint::ObjectInfo info;
|
||||
info.bucket = bucket;
|
||||
for(auto &o : i.value_.get_array()) {
|
||||
info.size = -1;
|
||||
info.name.clear();
|
||||
for(auto &f : o.get_obj()) {
|
||||
if(f.name_ == "size")
|
||||
info.size = f.value_.get_int();
|
||||
else if(f.name_ == "key")
|
||||
info.name = f.value_.get_str();
|
||||
}
|
||||
if(info.size >= 0 && !info.name.empty()) {
|
||||
lastFile = info.name;
|
||||
results.send(std::move(info));
|
||||
}
|
||||
}
|
||||
JSONDoc doc(json);
|
||||
std::string isTruncated;
|
||||
if (!doc.tryGet("truncated", more)) {
|
||||
doc.get("ListBucketResult.IsTruncated", isTruncated);
|
||||
more = isTruncated == "false" ? false : true;
|
||||
}
|
||||
if (doc.has("results")) {
|
||||
for (auto &jsonObject : doc.at("results").get_array()) {
|
||||
JSONDoc objectDoc(jsonObject);
|
||||
BlobStoreEndpoint::ObjectInfo object;
|
||||
objectDoc.get("size", object.size);
|
||||
objectDoc.get("key", object.name);
|
||||
result.objects.push_back(std::move(object));
|
||||
}
|
||||
}
|
||||
if(doc.has("ListBucketResult.Contents")) {
|
||||
if (doc.at("ListBucketResult.Contents").type() == json_spirit::array_type) {
|
||||
for (auto &jsonObject : doc.at("ListBucketResult.Contents").get_array()) {
|
||||
JSONDoc objectDoc(jsonObject);
|
||||
BlobStoreEndpoint::ObjectInfo object;
|
||||
std::string sizeVal;
|
||||
objectDoc.get("Size", sizeVal);
|
||||
object.size = strtoll(sizeVal.c_str(), NULL, 10);
|
||||
objectDoc.get("Key", object.name);
|
||||
result.objects.push_back(std::move(object));
|
||||
}
|
||||
}
|
||||
else {
|
||||
auto jsonObject = doc.at("ListBucketResult.Contents");
|
||||
JSONDoc objectDoc(jsonObject);
|
||||
BlobStoreEndpoint::ObjectInfo object;
|
||||
std::string sizeVal;
|
||||
objectDoc.get("Size", sizeVal);
|
||||
object.size = strtoll(sizeVal.c_str(), NULL, 10);
|
||||
objectDoc.get("Key", object.name);
|
||||
result.objects.push_back(std::move(object));
|
||||
|
||||
}
|
||||
}
|
||||
if(doc.has("CommonPrefixes")) {
|
||||
for(auto &jsonObject : doc.at("CommonPrefixes").get_array()) {
|
||||
JSONDoc objectDoc(jsonObject);
|
||||
std::string prefix;
|
||||
objectDoc.get("Prefix", prefix);
|
||||
result.commonPrefixes.push_back(std::move(prefix));
|
||||
}
|
||||
}
|
||||
results.send(result);
|
||||
} catch(Error &e) {
|
||||
throw http_bad_response();
|
||||
}
|
||||
|
@ -504,29 +521,30 @@ ACTOR Future<Void> getBucketContentsStream_impl(Reference<BlobStoreEndpoint> bst
|
|||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> BlobStoreEndpoint::getBucketContentsStream(std::string const &bucket, PromiseStream<BlobStoreEndpoint::ObjectInfo> results) {
|
||||
return getBucketContentsStream_impl(Reference<BlobStoreEndpoint>::addRef(this), bucket, results);
|
||||
Future<Void> BlobStoreEndpoint::listBucketStream(std::string const &bucket, PromiseStream<ListResult> results, Optional<std::string> prefix, Optional<char> delimiter) {
|
||||
return listBucketStream_impl(Reference<BlobStoreEndpoint>::addRef(this), bucket, results, prefix, delimiter);
|
||||
}
|
||||
|
||||
ACTOR Future<BlobStoreEndpoint::BucketContentsT> getBucketContents_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket) {
|
||||
state BlobStoreEndpoint::BucketContentsT results;
|
||||
state PromiseStream<BlobStoreEndpoint::ObjectInfo> resultStream;
|
||||
state Future<Void> done = bstore->getBucketContentsStream(bucket, resultStream);
|
||||
ACTOR Future<BlobStoreEndpoint::ListResult> listBucket_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket, Optional<std::string> prefix, Optional<char> delimiter) {
|
||||
state BlobStoreEndpoint::ListResult results;
|
||||
state PromiseStream<BlobStoreEndpoint::ListResult> resultStream;
|
||||
state Future<Void> done = bstore->listBucketStream(bucket, resultStream, prefix, delimiter);
|
||||
loop {
|
||||
choose {
|
||||
when(Void _ = wait(done)) {
|
||||
break;
|
||||
}
|
||||
when(BlobStoreEndpoint::ObjectInfo info = waitNext(resultStream.getFuture())) {
|
||||
results.push_back(info);
|
||||
when(BlobStoreEndpoint::ListResult info = waitNext(resultStream.getFuture())) {
|
||||
results.commonPrefixes.insert(results.commonPrefixes.end(), info.commonPrefixes.begin(), info.commonPrefixes.end());
|
||||
results.objects.insert(results.objects.end(), info.objects.begin(), info.objects.end());
|
||||
}
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
Future<BlobStoreEndpoint::BucketContentsT> BlobStoreEndpoint::getBucketContents(std::string const &bucket) {
|
||||
return getBucketContents_impl(Reference<BlobStoreEndpoint>::addRef(this), bucket);
|
||||
Future<BlobStoreEndpoint::ListResult> BlobStoreEndpoint::listBucket(std::string const &bucket, Optional<std::string> prefix, Optional<char> delimiter) {
|
||||
return listBucket_impl(Reference<BlobStoreEndpoint>::addRef(this), bucket, prefix, delimiter);
|
||||
}
|
||||
|
||||
std::string BlobStoreEndpoint::hmac_sha1(std::string const &msg) {
|
||||
|
@ -589,7 +607,8 @@ void BlobStoreEndpoint::setAuthHeaders(std::string const &verb, std::string cons
|
|||
std::string sig = base64::encoder::from_string(hmac_sha1(msg));
|
||||
// base64 encoded blocks end in \n so remove it.
|
||||
sig.resize(sig.size() - 1);
|
||||
std::string auth = key;
|
||||
std::string auth = "AWS ";
|
||||
auth.append(key);
|
||||
auth.append(":");
|
||||
auth.append(sig);
|
||||
headers["Authorization"] = auth;
|
||||
|
@ -615,7 +634,7 @@ ACTOR Future<Void> writeEntireFileFromBuffer_impl(Reference<BlobStoreEndpoint> b
|
|||
if(contentLen > bstore->knobs.multipart_max_part_size)
|
||||
throw file_too_large();
|
||||
|
||||
Void _ = wait(bstore->concurrentUploads.take(1));
|
||||
Void _ = wait(bstore->concurrentUploads.take());
|
||||
state FlowLock::Releaser uploadReleaser(bstore->concurrentUploads, 1);
|
||||
|
||||
std::string resource = std::string("/") + bucket + "/" + object;
|
||||
|
@ -625,7 +644,7 @@ ACTOR Future<Void> writeEntireFileFromBuffer_impl(Reference<BlobStoreEndpoint> b
|
|||
state Reference<HTTP::Response> r = wait(bstore->doRequest("PUT", resource, headers, pContent, contentLen, {200}));
|
||||
|
||||
// For uploads, Blobstore returns an MD5 sum of uploaded content so check it.
|
||||
if(r->headers["Content-MD5"] != contentMD5)
|
||||
if (!r->verifyMD5(false, contentMD5))
|
||||
throw checksum_failed();
|
||||
|
||||
return Void();
|
||||
|
@ -669,7 +688,9 @@ ACTOR Future<int> readObject_impl(Reference<BlobStoreEndpoint> bstore, std::stri
|
|||
std::string resource = std::string("/") + bucket + "/" + object;
|
||||
HTTP::Headers headers;
|
||||
headers["Range"] = format("bytes=%lld-%lld", offset, offset + length - 1);
|
||||
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", resource, headers, NULL, 0, {200, 206}));
|
||||
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", resource, headers, NULL, 0, {200, 206, 404}));
|
||||
if(r->code == 404)
|
||||
throw file_not_found();
|
||||
if(r->contentLen != r->content.size()) // Double check that this wasn't a header-only response, probably unnecessary
|
||||
throw io_error();
|
||||
// Copy the output bytes, server could have sent more or less bytes than requested so copy at most length bytes
|
||||
|
@ -700,7 +721,7 @@ Future<std::string> BlobStoreEndpoint::beginMultiPartUpload(std::string const &b
|
|||
}
|
||||
|
||||
ACTOR Future<std::string> uploadPart_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket, std::string object, std::string uploadID, unsigned int partNumber, UnsentPacketQueue *pContent, int contentLen, std::string contentMD5) {
|
||||
Void _ = wait(bstore->concurrentUploads.take(1));
|
||||
Void _ = wait(bstore->concurrentUploads.take());
|
||||
state FlowLock::Releaser uploadReleaser(bstore->concurrentUploads, 1);
|
||||
|
||||
std::string resource = format("/%s/%s?partNumber=%d&uploadId=%s", bucket.c_str(), object.c_str(), partNumber, uploadID.c_str());
|
||||
|
@ -712,7 +733,7 @@ ACTOR Future<std::string> uploadPart_impl(Reference<BlobStoreEndpoint> bstore, s
|
|||
// will see error 400. That could be detected and handled gracefully by retrieving the etag for the successful request.
|
||||
|
||||
// For uploads, Blobstore returns an MD5 sum of uploaded content so check it.
|
||||
if(r->headers["Content-MD5"] != contentMD5)
|
||||
if (!r->verifyMD5(false, contentMD5))
|
||||
throw checksum_failed();
|
||||
|
||||
// No etag -> bad response.
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include "fdbclient/Knobs.h"
|
||||
#include "IRateControl.h"
|
||||
#include "HTTP.h"
|
||||
#include "fdbclient/json_spirit/json_spirit_writer_template.h"
|
||||
#include "JSONDoc.h"
|
||||
|
||||
// Representation of all the things you need to connect to a blob store instance with some credentials.
|
||||
// Reference counted because a very large number of them could be needed.
|
||||
|
@ -49,6 +49,7 @@ public:
|
|||
BlobKnobs();
|
||||
int connect_tries,
|
||||
connect_timeout,
|
||||
max_connection_life,
|
||||
request_tries,
|
||||
request_timeout,
|
||||
requests_per_second,
|
||||
|
@ -62,14 +63,14 @@ public:
|
|||
read_ahead_blocks,
|
||||
read_cache_blocks_per_file,
|
||||
max_send_bytes_per_second,
|
||||
max_recv_bytes_per_second,
|
||||
buckets_to_span;
|
||||
max_recv_bytes_per_second;
|
||||
bool set(StringRef name, int value);
|
||||
std::string getURLParameters() const;
|
||||
static std::vector<std::string> getKnobDescriptions() {
|
||||
return {
|
||||
"connect_tries (or ct) Number of times to try to connect for each request.",
|
||||
"connect_timeout (or cto) Number of seconds to wait for a connect request to succeed.",
|
||||
"max_connection_life (or mcl) Maximum number of seconds to use a single TCP connection.",
|
||||
"request_tries (or rt) Number of times to try each request until a parseable HTTP response other than 429 is received.",
|
||||
"request_timeout (or rto) Number of seconds to wait for a request to succeed after a connection is established.",
|
||||
"requests_per_second (or rps) Max number of requests to start per second.",
|
||||
|
@ -83,51 +84,44 @@ public:
|
|||
"read_ahead_blocks (or rab) Number of blocks to read ahead of requested offset.",
|
||||
"read_cache_blocks_per_file (or rcb) Size of the read cache for a file in blocks.",
|
||||
"max_send_bytes_per_second (or sbps) Max send bytes per second for all requests combined.",
|
||||
"max_recv_bytes_per_second (or rbps) Max receive bytes per second for all requests combined (NOT YET USED).",
|
||||
"buckets_to_span (or bts) Number of buckets that a new backup should distribute over."
|
||||
"max_recv_bytes_per_second (or rbps) Max receive bytes per second for all requests combined (NOT YET USED)."
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
BlobStoreEndpoint(std::string const &host, std::vector<NetworkAddress> const &addrs, uint16_t port, std::string const &key, std::string const &key_secret, BlobKnobs const &knobs = BlobKnobs())
|
||||
: host(host), port(port), addresses(addrs), key(key), secret(key_secret), knobs(knobs),
|
||||
BlobStoreEndpoint(std::string const &host, std::string service, std::string const &key, std::string const &key_secret, BlobKnobs const &knobs = BlobKnobs())
|
||||
: host(host), service(service), key(key), secret(key_secret), knobs(knobs),
|
||||
requestRate(new SpeedLimit(knobs.requests_per_second, 1)),
|
||||
sendRate(new SpeedLimit(knobs.max_send_bytes_per_second, 1)),
|
||||
recvRate(new SpeedLimit(knobs.max_recv_bytes_per_second, 1)),
|
||||
concurrentRequests(knobs.concurrent_requests),
|
||||
concurrentUploads(knobs.concurrent_uploads) {
|
||||
|
||||
if(addresses.size() == 0)
|
||||
if(host.empty())
|
||||
throw connection_string_invalid();
|
||||
|
||||
int perAddressLimit = std::max<int>( 1, knobs.concurrent_requests/addrs.size() );
|
||||
for(auto &addr : addrs) {
|
||||
concurrentRequestsPerAddress[addr] = Reference<FlowLock>( new FlowLock(perAddressLimit) );
|
||||
}
|
||||
}
|
||||
|
||||
static std::string getURLFormat(bool withResource = false) {
|
||||
const char *resource = "";
|
||||
if(withResource)
|
||||
resource = "<name>";
|
||||
return format("blobstore://<api_key>:<secret>@<[host,]<ip>[,<ip>]...>:<port>/%s[?<param>=<value>[&<param>=<value>]...]", resource);
|
||||
return format("blobstore://<api_key>:<secret>@<host>[:<port>]/%s[?<param>=<value>[&<param>=<value>]...]", resource);
|
||||
}
|
||||
static Reference<BlobStoreEndpoint> fromString(std::string const &url, std::string *resourceFromURL = nullptr, std::string *error = nullptr);
|
||||
|
||||
// Resolve host, and if successful replace addrs with a list of NetworkAddresses created from the resolve results.
|
||||
Future<Void> resolveHostname(bool only_if_unresolved = true);
|
||||
|
||||
// Get a normalized version of this URL with the given resource, the host and any IP addresses (possibly from DNS
|
||||
// if resolve was done) and any non-default BlobKnob values as URL parameters.
|
||||
// Get a normalized version of this URL with the given resource and any non-default BlobKnob values as URL parameters.
|
||||
std::string getResourceURL(std::string resource);
|
||||
Future<Reference<IConnection>> connect(NetworkAddress address);
|
||||
|
||||
typedef std::pair<Reference<IConnection>, double> ConnPoolEntry;
|
||||
std::map<NetworkAddress,std::list<ConnPoolEntry>> connectionPool;
|
||||
struct ReusableConnection {
|
||||
Reference<IConnection> conn;
|
||||
double expirationTime;
|
||||
};
|
||||
std::queue<ReusableConnection> connectionPool;
|
||||
Future<ReusableConnection> connect();
|
||||
void returnConnection(ReusableConnection &conn);
|
||||
|
||||
std::string host;
|
||||
uint16_t port;
|
||||
std::vector<NetworkAddress> addresses;
|
||||
std::string service;
|
||||
std::string key;
|
||||
std::string secret;
|
||||
BlobKnobs knobs;
|
||||
|
@ -136,7 +130,6 @@ public:
|
|||
Reference<IRateControl> requestRate;
|
||||
Reference<IRateControl> sendRate;
|
||||
Reference<IRateControl> recvRate;
|
||||
std::map<NetworkAddress,Reference<FlowLock>> concurrentRequestsPerAddress;
|
||||
FlowLock concurrentRequests;
|
||||
FlowLock concurrentUploads;
|
||||
|
||||
|
@ -153,18 +146,22 @@ public:
|
|||
// Every blob store interaction should ultimately go through this function
|
||||
|
||||
Future<Reference<HTTP::Response>> doRequest(std::string const &verb, std::string const &resource, const HTTP::Headers &headers, UnsentPacketQueue *pContent, int contentLen, std::set<unsigned int> successCodes);
|
||||
|
||||
struct ObjectInfo {
|
||||
std::string bucket;
|
||||
std::string name;
|
||||
int64_t size;
|
||||
};
|
||||
|
||||
struct ListResult {
|
||||
std::vector<std::string> commonPrefixes;
|
||||
std::vector<ObjectInfo> objects;
|
||||
};
|
||||
|
||||
// Get bucket contents via a stream, since listing large buckets will take many serial blob requests
|
||||
Future<Void> getBucketContentsStream(std::string const &bucket, PromiseStream<ObjectInfo> results);
|
||||
Future<Void> listBucketStream(std::string const &bucket, PromiseStream<ListResult> results, Optional<std::string> prefix = {}, Optional<char> delimiter = {});
|
||||
|
||||
// Get a list of the files in a bucket
|
||||
typedef std::vector<ObjectInfo> BucketContentsT;
|
||||
Future<BucketContentsT> getBucketContents(std::string const &bucket);
|
||||
Future<ListResult> listBucket(std::string const &bucket, Optional<std::string> prefix = {}, Optional<char> delimiter = {});
|
||||
|
||||
// Check if an object exists in a bucket
|
||||
Future<bool> objectExists(std::string const &bucket, std::string const &object);
|
||||
|
@ -184,6 +181,9 @@ public:
|
|||
// a deletion of an object completes.
|
||||
Future<Void> deleteBucket(std::string const &bucket, int *pNumDeleted = NULL);
|
||||
|
||||
// Create a bucket if it does not already exists.
|
||||
Future<Void> createBucket(std::string const &bucket);
|
||||
|
||||
// Useful methods for working with tiny files
|
||||
Future<std::string> readEntireFile(std::string const &bucket, std::string const &object);
|
||||
Future<Void> writeEntireFile(std::string const &bucket, std::string const &object, std::string const &content);
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "md5/md5.h"
|
||||
#include "libb64/encode.h"
|
||||
#include <cctype>
|
||||
#include "xml2json.hpp"
|
||||
|
||||
namespace HTTP {
|
||||
|
||||
|
@ -59,6 +60,15 @@ namespace HTTP {
|
|||
return !fail_if_header_missing;
|
||||
}
|
||||
|
||||
void Response::convertToJSONifXML() {
|
||||
auto i = headers.find("Content-Type");
|
||||
if (i != headers.end() && i->second == "application/xml") {
|
||||
content = xml2json(content.c_str());
|
||||
contentLen = content.length();
|
||||
headers["Content-Type"] = "application/json";
|
||||
}
|
||||
}
|
||||
|
||||
std::string Response::toString() {
|
||||
std::string r = format("Response code: %d\n", code);
|
||||
r += format("ContentLen: %lld\n", contentLen);
|
||||
|
@ -324,7 +334,7 @@ namespace HTTP {
|
|||
}
|
||||
|
||||
state Reference<HTTP::Response> r(new HTTP::Response());
|
||||
Void _ = wait(r->read(conn, verb == "HEAD"));
|
||||
Void _ = wait(r->read(conn, verb == "HEAD" || verb == "DELETE"));
|
||||
double elapsed = timer() - send_start;
|
||||
if(CLIENT_KNOBS->HTTP_VERBOSE_LEVEL > 0)
|
||||
printf("[%s] HTTP code=%d, time=%fs %s %s [%u out, response content len %d]\n", conn->getDebugID().toString().c_str(), r->code, elapsed, verb.c_str(), resource.c_str(), (int)total_sent, (int)r->contentLen);
|
||||
|
|
|
@ -39,6 +39,7 @@ namespace HTTP {
|
|||
int64_t contentLen;
|
||||
|
||||
bool verifyMD5(bool fail_if_header_missing, Optional<std::string> content_sum = Optional<std::string>());
|
||||
void convertToJSONifXML();
|
||||
};
|
||||
|
||||
// Prepend the HTTP request header to the given PacketBuffer, returning the new head of the buffer chain
|
||||
|
|
|
@ -0,0 +1,310 @@
|
|||
/*
|
||||
* JSONDoc.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "fdbclient/json_spirit/json_spirit_writer_template.h"
|
||||
#include "fdbclient/json_spirit/json_spirit_reader_template.h"
|
||||
|
||||
// JSONDoc is a convenient reader/writer class for manipulating JSON documents using "paths".
|
||||
// Access is done using a "path", which is a string of dot-separated
|
||||
// substrings representing representing successively deeper keys found in nested
|
||||
// JSON objects within the top level object
|
||||
//
|
||||
// Most methods are read-only with respect to the source JSON object.
|
||||
// The only modifying methods are create(), put(), subDoc(), and mergeInto()
|
||||
//
|
||||
// JSONDoc maintains some state which is the JSON value that was found during the most recent
|
||||
// *successful* path lookup.
|
||||
//
|
||||
// Examples:
|
||||
// JSONDoc r(some_obj);
|
||||
//
|
||||
// // See if JSON doc path a.b.c exists
|
||||
// bool exists = r.has("a.b.c");
|
||||
//
|
||||
// // See if JSON doc path a.b.c exists, if it does then assign value to x. Throws if path exists but T is not compatible.
|
||||
// T x;
|
||||
// bool exists = r.has("a.b.c", x);
|
||||
//
|
||||
// // This way you can chain things like this:
|
||||
// bool is_two = r.has("a.b.c", x) && x == 2;
|
||||
//
|
||||
// // Alternatively, you can avoid the temp var by making use of the last() method which returns a reference
|
||||
// // to the JSON value at the last successfully found path that has() has seen.
|
||||
// bool is_int = r.has("a.b.c") && r.last().type == json_spirit::int_type;
|
||||
// bool is_two = r.has("a.b.c") && r.last().get_int() == 2;
|
||||
//
|
||||
// // The familiar at() method also exists but now supports the same path concept.
|
||||
// // It will throw in the same circumstances as the original method
|
||||
// int x = r.at("a.b.c").get_int();
|
||||
//
|
||||
// // If you wish to access an element with the dot character within its name (e.g., "hostname.example.com"),
|
||||
// // you can do so by setting the "split" flag to false in either the "has" or "get" methods. The example
|
||||
// // below will look for the key "hostname.example.com" as a subkey of the path "a.b.c" (or, more
|
||||
// // precisely, it will look to see if r.has("a").has("b").has("c").has("hostname.example.com", false)).
|
||||
// bool exists = r.has("a.b.c").has("hostname.example.com", false);
|
||||
//
|
||||
// // And the familiar operator[] interface exists as well, however only as a synonym for at()
|
||||
// // because this class is only for reading. Using operator [] will not auto-create null things.
|
||||
// // The following would throw if a.b.c did not exist, or if it was not an int.
|
||||
// int x = r["a.b.c"].get_int();
|
||||
struct JSONDoc {
|
||||
JSONDoc() : pObj(NULL) {}
|
||||
|
||||
// Construction from const json_spirit::mObject, trivial and will never throw.
|
||||
// Resulting JSONDoc will not allow modifications.
|
||||
JSONDoc(const json_spirit::mObject &o) : pObj(&o), wpObj(NULL) {}
|
||||
|
||||
// Construction from json_spirit::mObject. Allows modifications.
|
||||
JSONDoc(json_spirit::mObject &o) : pObj(&o), wpObj(&o) {}
|
||||
|
||||
// Construction from const json_spirit::mValue (which is a Variant type) which will try to
|
||||
// convert it to an mObject. This will throw if that fails, just as it would
|
||||
// if the caller called get_obj() itself and used the previous constructor instead.
|
||||
JSONDoc(const json_spirit::mValue &v) : pObj(&v.get_obj()), wpObj(NULL) {}
|
||||
|
||||
// Construction from non-const json_spirit::mValue - will convert the mValue to
|
||||
// an object if it isn't already and then attach to it.
|
||||
JSONDoc(json_spirit::mValue &v) {
|
||||
if(v.type() != json_spirit::obj_type)
|
||||
v = json_spirit::mObject();
|
||||
wpObj = &v.get_obj();
|
||||
pObj = wpObj;
|
||||
}
|
||||
|
||||
// Returns whether or not a "path" exists.
|
||||
// Returns true if all elements along path exist
|
||||
// Returns false if any elements along the path are MISSING
|
||||
// Will throw if a non-terminating path element exists BUT is not a JSON Object.
|
||||
// If the "split" flag is set to "false", then this skips the splitting of a
|
||||
// path into on the "dot" character.
|
||||
// When a path is found, pLast is updated.
|
||||
bool has(std::string path, bool split=true) {
|
||||
if (pObj == NULL)
|
||||
return false;
|
||||
|
||||
if (path.empty())
|
||||
return false;
|
||||
size_t start = 0;
|
||||
const json_spirit::mValue *curVal = NULL;
|
||||
while (start < path.size())
|
||||
{
|
||||
// If a path segment is found then curVal must be an object
|
||||
size_t dot;
|
||||
if (split) {
|
||||
dot = path.find_first_of('.', start);
|
||||
if (dot == std::string::npos)
|
||||
dot = path.size();
|
||||
} else {
|
||||
dot = path.size();
|
||||
}
|
||||
std::string key = path.substr(start, dot - start);
|
||||
|
||||
// Get pointer to the current Object that the key has to be in
|
||||
// This will throw if the value is not an Object
|
||||
const json_spirit::mObject *curObj = curVal ? &curVal->get_obj() : pObj;
|
||||
|
||||
// Make sure key exists, if not then return false
|
||||
if (!curObj->count(key))
|
||||
return false;
|
||||
|
||||
// Advance curVal
|
||||
curVal = &curObj->at(key);
|
||||
|
||||
// Advance start position in path
|
||||
start = dot + 1;
|
||||
}
|
||||
|
||||
pLast = curVal;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Creates the given path (forcing Objects to exist along its depth, replacing whatever else might have been there)
|
||||
// and returns a reference to the Value at that location.
|
||||
json_spirit::mValue & create(std::string path, bool split=true) {
|
||||
if (wpObj == NULL || path.empty())
|
||||
throw std::runtime_error("JSON Object not writable or bad JSON path");
|
||||
|
||||
size_t start = 0;
|
||||
json_spirit::mValue *curVal = nullptr;
|
||||
while (start < path.size())
|
||||
{
|
||||
// Get next path segment name
|
||||
size_t dot;
|
||||
if (split) {
|
||||
dot = path.find_first_of('.', start);
|
||||
if (dot == std::string::npos)
|
||||
dot = path.size();
|
||||
} else {
|
||||
dot = path.size();
|
||||
}
|
||||
std::string key = path.substr(start, dot - start);
|
||||
if(key.empty())
|
||||
throw std::runtime_error("invalid JSON path");
|
||||
|
||||
// Get/create pointer to the current Object that the key has to be in
|
||||
// If curVal is defined then force it to be an Object
|
||||
json_spirit::mObject *curObj;
|
||||
if(curVal != nullptr) {
|
||||
if(curVal->type() != json_spirit::obj_type)
|
||||
*curVal = json_spirit::mObject();
|
||||
curObj = &curVal->get_obj();
|
||||
}
|
||||
else // Otherwise start with the object *this is writing to
|
||||
curObj = wpObj;
|
||||
|
||||
// Make sure key exists, if not then return false
|
||||
if (!curObj->count(key))
|
||||
(*curObj)[key] = json_spirit::mValue();
|
||||
|
||||
// Advance curVal
|
||||
curVal = &((*curObj)[key]);
|
||||
|
||||
// Advance start position in path
|
||||
start = dot + 1;
|
||||
}
|
||||
|
||||
return *curVal;
|
||||
}
|
||||
|
||||
// Creates the path given, puts a value at it, and returns a reference to the value
|
||||
template<typename T>
|
||||
T & put(std::string path, const T & value, bool split=true) {
|
||||
json_spirit::mValue &v = create(path, split);
|
||||
v = value;
|
||||
return v.get_value<T>();
|
||||
}
|
||||
|
||||
// Ensures that a an Object exists at path and returns a JSONDoc that writes to it.
|
||||
JSONDoc subDoc(std::string path, bool split=true) {
|
||||
json_spirit::mValue &v = create(path, split);
|
||||
if(v.type() != json_spirit::obj_type)
|
||||
v = json_spirit::mObject();
|
||||
return JSONDoc(v.get_obj());
|
||||
}
|
||||
|
||||
// Apply a merge operation to two values. Works for int, double, and string
|
||||
template <typename T>
|
||||
static json_spirit::mObject mergeOperator(const std::string &op, const json_spirit::mObject &op_a, const json_spirit::mObject &op_b, T const &a, T const &b) {
|
||||
if(op == "$max")
|
||||
return {{op, std::max<T>(a, b)}};
|
||||
if(op == "$min")
|
||||
return {{op, std::min<T>(a, b)}};
|
||||
if(op == "$sum")
|
||||
return {{op, a + b}};
|
||||
throw std::exception();
|
||||
}
|
||||
|
||||
// This is just a convenience function to make calling mergeOperator look cleaner
|
||||
template <typename T>
|
||||
static json_spirit::mObject mergeOperatorWrapper(const std::string &op, const json_spirit::mObject &op_a, const json_spirit::mObject &op_b, const json_spirit::mValue &a, const json_spirit::mValue &b) {
|
||||
return mergeOperator<T>(op, op_a, op_b, a.get_value<T>(), b.get_value<T>());
|
||||
}
|
||||
|
||||
static inline std::string getOperator(const json_spirit::mObject &obj) {
|
||||
for(auto &k : obj)
|
||||
if(!k.first.empty() && k.first[0] == '$')
|
||||
return k.first;
|
||||
return std::string();
|
||||
}
|
||||
|
||||
// Merge src into dest, applying merge operators
|
||||
static void mergeInto(json_spirit::mObject &dst, const json_spirit::mObject &src);
|
||||
static void mergeValueInto(json_spirit::mValue &d, const json_spirit::mValue &s);
|
||||
|
||||
// Remove any merge operators that never met any mates.
|
||||
static void cleanOps(json_spirit::mObject &obj);
|
||||
void cleanOps() {
|
||||
if(wpObj == nullptr)
|
||||
throw std::runtime_error("JSON Object not writable");
|
||||
|
||||
return cleanOps(*wpObj);
|
||||
}
|
||||
|
||||
void absorb(const JSONDoc &doc) {
|
||||
if(wpObj == nullptr)
|
||||
throw std::runtime_error("JSON Object not writable");
|
||||
|
||||
if(doc.pObj == nullptr)
|
||||
throw std::runtime_error("JSON Object not readable");
|
||||
|
||||
mergeInto(*wpObj, *doc.pObj);
|
||||
}
|
||||
|
||||
// Returns whether or not a "path" exists.
|
||||
// Returns true if all elements along path exist
|
||||
// Returns false if any elements along the path are MISSING
|
||||
// Sets out to the value of the thing that path refers to
|
||||
// Will throw if a non-terminating path element exists BUT is not a JSON Object.
|
||||
// Will throw if all elements along path exists but T is an incompatible type
|
||||
template <typename T> bool get(const std::string path, T &out, bool split=true) {
|
||||
bool r = has(path, split);
|
||||
if (r)
|
||||
out = pLast->get_value<T>();
|
||||
return r;
|
||||
}
|
||||
|
||||
// For convenience, wraps get() in a try/catch and returns false UNLESS the path existed and was a compatible type.
|
||||
template <typename T> bool tryGet(const std::string path, T &out, bool split=true) {
|
||||
try { return get(path, out, split); } catch(...) {}
|
||||
return false;
|
||||
}
|
||||
|
||||
const json_spirit::mValue & at(const std::string path, bool split=true) {
|
||||
if (has(path, split))
|
||||
return last();
|
||||
throw std::runtime_error("JSON path doesn't exist");
|
||||
}
|
||||
|
||||
const json_spirit::mValue & operator[](const std::string path) {
|
||||
return at(path);
|
||||
}
|
||||
|
||||
const json_spirit::mValue & last() const { return *pLast; }
|
||||
bool valid() const { return pObj != NULL; }
|
||||
|
||||
const json_spirit::mObject & obj() {
|
||||
// This dummy object is necessary to make working with obj() easier when this does not currently
|
||||
// point to a valid mObject. valid() can be called to explicitly check for this scenario, but
|
||||
// calling obj() at least will not seg fault and instead return a const reference to an empty mObject.
|
||||
// This is very useful when iterating using obj() to access the underlying mObject.
|
||||
static const json_spirit::mObject dummy;
|
||||
return pObj ? *pObj : dummy;
|
||||
}
|
||||
|
||||
// Return reference to writeable underlying mObject but only if *this was initialized with a writeable value or object
|
||||
json_spirit::mObject & wobj() {
|
||||
ASSERT(wpObj != nullptr);
|
||||
return *wpObj;
|
||||
}
|
||||
|
||||
// This is the version used to represent 'now' for use by the $expires operator.
|
||||
// By default, nothing will expire and it is up to the user of JSONDoc to update this value if
|
||||
// it is intended to be used.
|
||||
// This is slightly hackish but otherwise the JSON merge functions would require a Transaction.
|
||||
static uint64_t expires_reference_version;
|
||||
private:
|
||||
const json_spirit::mObject *pObj;
|
||||
// Writeable pointer to the same object. Will be NULL if initialized from a const object.
|
||||
json_spirit::mObject *wpObj;
|
||||
const json_spirit::mValue *pLast;
|
||||
};
|
||||
|
|
@ -173,6 +173,10 @@ Future<Reference<IConnection>> TLSNetworkConnections::connect( NetworkAddress to
|
|||
return network->connect( toAddr );
|
||||
}
|
||||
|
||||
Future<std::vector<NetworkAddress>> TLSNetworkConnections::resolveTCPEndpoint( std::string host, std::string service) {
|
||||
return network->resolveTCPEndpoint( host, service );
|
||||
}
|
||||
|
||||
Reference<IListener> TLSNetworkConnections::listen( NetworkAddress localAddr ) {
|
||||
if ( localAddr.isTLS() ) {
|
||||
NetworkAddress clearAddr( localAddr.ip, localAddr.port, localAddr.isPublic(), false );
|
||||
|
|
|
@ -109,6 +109,7 @@ struct TLSNetworkConnections : INetworkConnections {
|
|||
explicit TLSNetworkConnections( Reference<TLSOptions> options );
|
||||
|
||||
virtual Future<Reference<IConnection>> connect( NetworkAddress toAddr );
|
||||
virtual Future<std::vector<NetworkAddress>> resolveTCPEndpoint( std::string host, std::string service);
|
||||
|
||||
virtual Reference<IListener> listen( NetworkAddress localAddr );
|
||||
|
||||
|
|
|
@ -0,0 +1,271 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_ALLOCATORS_H_
|
||||
#define RAPIDJSON_ALLOCATORS_H_
|
||||
|
||||
#include "rapidjson.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Allocator
|
||||
|
||||
/*! \class rapidjson::Allocator
|
||||
\brief Concept for allocating, resizing and freeing memory block.
|
||||
|
||||
Note that Malloc() and Realloc() are non-static but Free() is static.
|
||||
|
||||
So if an allocator need to support Free(), it needs to put its pointer in
|
||||
the header of memory block.
|
||||
|
||||
\code
|
||||
concept Allocator {
|
||||
static const bool kNeedFree; //!< Whether this allocator needs to call Free().
|
||||
|
||||
// Allocate a memory block.
|
||||
// \param size of the memory block in bytes.
|
||||
// \returns pointer to the memory block.
|
||||
void* Malloc(size_t size);
|
||||
|
||||
// Resize a memory block.
|
||||
// \param originalPtr The pointer to current memory block. Null pointer is permitted.
|
||||
// \param originalSize The current size in bytes. (Design issue: since some allocator may not book-keep this, explicitly pass to it can save memory.)
|
||||
// \param newSize the new size in bytes.
|
||||
void* Realloc(void* originalPtr, size_t originalSize, size_t newSize);
|
||||
|
||||
// Free a memory block.
|
||||
// \param pointer to the memory block. Null pointer is permitted.
|
||||
static void Free(void *ptr);
|
||||
};
|
||||
\endcode
|
||||
*/
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// CrtAllocator
|
||||
|
||||
//! C-runtime library allocator.
|
||||
/*! This class is just wrapper for standard C library memory routines.
|
||||
\note implements Allocator concept
|
||||
*/
|
||||
class CrtAllocator {
|
||||
public:
|
||||
static const bool kNeedFree = true;
|
||||
void* Malloc(size_t size) {
|
||||
if (size) // behavior of malloc(0) is implementation defined.
|
||||
return std::malloc(size);
|
||||
else
|
||||
return NULL; // standardize to returning NULL.
|
||||
}
|
||||
void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) {
|
||||
(void)originalSize;
|
||||
if (newSize == 0) {
|
||||
std::free(originalPtr);
|
||||
return NULL;
|
||||
}
|
||||
return std::realloc(originalPtr, newSize);
|
||||
}
|
||||
static void Free(void *ptr) { std::free(ptr); }
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// MemoryPoolAllocator
|
||||
|
||||
//! Default memory allocator used by the parser and DOM.
|
||||
/*! This allocator allocate memory blocks from pre-allocated memory chunks.
|
||||
|
||||
It does not free memory blocks. And Realloc() only allocate new memory.
|
||||
|
||||
The memory chunks are allocated by BaseAllocator, which is CrtAllocator by default.
|
||||
|
||||
User may also supply a buffer as the first chunk.
|
||||
|
||||
If the user-buffer is full then additional chunks are allocated by BaseAllocator.
|
||||
|
||||
The user-buffer is not deallocated by this allocator.
|
||||
|
||||
\tparam BaseAllocator the allocator type for allocating memory chunks. Default is CrtAllocator.
|
||||
\note implements Allocator concept
|
||||
*/
|
||||
template <typename BaseAllocator = CrtAllocator>
|
||||
class MemoryPoolAllocator {
|
||||
public:
|
||||
static const bool kNeedFree = false; //!< Tell users that no need to call Free() with this allocator. (concept Allocator)
|
||||
|
||||
//! Constructor with chunkSize.
|
||||
/*! \param chunkSize The size of memory chunk. The default is kDefaultChunkSize.
|
||||
\param baseAllocator The allocator for allocating memory chunks.
|
||||
*/
|
||||
MemoryPoolAllocator(size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
|
||||
chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(0), baseAllocator_(baseAllocator), ownBaseAllocator_(0)
|
||||
{
|
||||
}
|
||||
|
||||
//! Constructor with user-supplied buffer.
|
||||
/*! The user buffer will be used firstly. When it is full, memory pool allocates new chunk with chunk size.
|
||||
|
||||
The user buffer will not be deallocated when this allocator is destructed.
|
||||
|
||||
\param buffer User supplied buffer.
|
||||
\param size Size of the buffer in bytes. It must at least larger than sizeof(ChunkHeader).
|
||||
\param chunkSize The size of memory chunk. The default is kDefaultChunkSize.
|
||||
\param baseAllocator The allocator for allocating memory chunks.
|
||||
*/
|
||||
MemoryPoolAllocator(void *buffer, size_t size, size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
|
||||
chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(buffer), baseAllocator_(baseAllocator), ownBaseAllocator_(0)
|
||||
{
|
||||
RAPIDJSON_ASSERT(buffer != 0);
|
||||
RAPIDJSON_ASSERT(size > sizeof(ChunkHeader));
|
||||
chunkHead_ = reinterpret_cast<ChunkHeader*>(buffer);
|
||||
chunkHead_->capacity = size - sizeof(ChunkHeader);
|
||||
chunkHead_->size = 0;
|
||||
chunkHead_->next = 0;
|
||||
}
|
||||
|
||||
//! Destructor.
|
||||
/*! This deallocates all memory chunks, excluding the user-supplied buffer.
|
||||
*/
|
||||
~MemoryPoolAllocator() {
|
||||
Clear();
|
||||
RAPIDJSON_DELETE(ownBaseAllocator_);
|
||||
}
|
||||
|
||||
//! Deallocates all memory chunks, excluding the user-supplied buffer.
|
||||
void Clear() {
|
||||
while (chunkHead_ && chunkHead_ != userBuffer_) {
|
||||
ChunkHeader* next = chunkHead_->next;
|
||||
baseAllocator_->Free(chunkHead_);
|
||||
chunkHead_ = next;
|
||||
}
|
||||
if (chunkHead_ && chunkHead_ == userBuffer_)
|
||||
chunkHead_->size = 0; // Clear user buffer
|
||||
}
|
||||
|
||||
//! Computes the total capacity of allocated memory chunks.
|
||||
/*! \return total capacity in bytes.
|
||||
*/
|
||||
size_t Capacity() const {
|
||||
size_t capacity = 0;
|
||||
for (ChunkHeader* c = chunkHead_; c != 0; c = c->next)
|
||||
capacity += c->capacity;
|
||||
return capacity;
|
||||
}
|
||||
|
||||
//! Computes the memory blocks allocated.
|
||||
/*! \return total used bytes.
|
||||
*/
|
||||
size_t Size() const {
|
||||
size_t size = 0;
|
||||
for (ChunkHeader* c = chunkHead_; c != 0; c = c->next)
|
||||
size += c->size;
|
||||
return size;
|
||||
}
|
||||
|
||||
//! Allocates a memory block. (concept Allocator)
|
||||
void* Malloc(size_t size) {
|
||||
if (!size)
|
||||
return NULL;
|
||||
|
||||
size = RAPIDJSON_ALIGN(size);
|
||||
if (chunkHead_ == 0 || chunkHead_->size + size > chunkHead_->capacity)
|
||||
if (!AddChunk(chunk_capacity_ > size ? chunk_capacity_ : size))
|
||||
return NULL;
|
||||
|
||||
void *buffer = reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size;
|
||||
chunkHead_->size += size;
|
||||
return buffer;
|
||||
}
|
||||
|
||||
//! Resizes a memory block (concept Allocator)
|
||||
void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) {
|
||||
if (originalPtr == 0)
|
||||
return Malloc(newSize);
|
||||
|
||||
if (newSize == 0)
|
||||
return NULL;
|
||||
|
||||
originalSize = RAPIDJSON_ALIGN(originalSize);
|
||||
newSize = RAPIDJSON_ALIGN(newSize);
|
||||
|
||||
// Do not shrink if new size is smaller than original
|
||||
if (originalSize >= newSize)
|
||||
return originalPtr;
|
||||
|
||||
// Simply expand it if it is the last allocation and there is sufficient space
|
||||
if (originalPtr == reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size - originalSize) {
|
||||
size_t increment = static_cast<size_t>(newSize - originalSize);
|
||||
if (chunkHead_->size + increment <= chunkHead_->capacity) {
|
||||
chunkHead_->size += increment;
|
||||
return originalPtr;
|
||||
}
|
||||
}
|
||||
|
||||
// Realloc process: allocate and copy memory, do not free original buffer.
|
||||
if (void* newBuffer = Malloc(newSize)) {
|
||||
if (originalSize)
|
||||
std::memcpy(newBuffer, originalPtr, originalSize);
|
||||
return newBuffer;
|
||||
}
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//! Frees a memory block (concept Allocator)
|
||||
static void Free(void *ptr) { (void)ptr; } // Do nothing
|
||||
|
||||
private:
|
||||
//! Copy constructor is not permitted.
|
||||
MemoryPoolAllocator(const MemoryPoolAllocator& rhs) /* = delete */;
|
||||
//! Copy assignment operator is not permitted.
|
||||
MemoryPoolAllocator& operator=(const MemoryPoolAllocator& rhs) /* = delete */;
|
||||
|
||||
//! Creates a new chunk.
|
||||
/*! \param capacity Capacity of the chunk in bytes.
|
||||
\return true if success.
|
||||
*/
|
||||
bool AddChunk(size_t capacity) {
|
||||
if (!baseAllocator_)
|
||||
ownBaseAllocator_ = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator());
|
||||
if (ChunkHeader* chunk = reinterpret_cast<ChunkHeader*>(baseAllocator_->Malloc(RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + capacity))) {
|
||||
chunk->capacity = capacity;
|
||||
chunk->size = 0;
|
||||
chunk->next = chunkHead_;
|
||||
chunkHead_ = chunk;
|
||||
return true;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static const int kDefaultChunkCapacity = 64 * 1024; //!< Default chunk capacity.
|
||||
|
||||
//! Chunk header for perpending to each chunk.
|
||||
/*! Chunks are stored as a singly linked list.
|
||||
*/
|
||||
struct ChunkHeader {
|
||||
size_t capacity; //!< Capacity of the chunk in bytes (excluding the header itself).
|
||||
size_t size; //!< Current size of allocated memory in bytes.
|
||||
ChunkHeader *next; //!< Next chunk in the linked list.
|
||||
};
|
||||
|
||||
ChunkHeader *chunkHead_; //!< Head of the chunk linked-list. Only the head chunk serves allocation.
|
||||
size_t chunk_capacity_; //!< The minimum capacity of chunk when they are allocated.
|
||||
void *userBuffer_; //!< User supplied buffer.
|
||||
BaseAllocator* baseAllocator_; //!< base allocator for allocating memory chunks.
|
||||
BaseAllocator* ownBaseAllocator_; //!< base allocator created by this object.
|
||||
};
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_ENCODINGS_H_
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,299 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_ENCODEDSTREAM_H_
|
||||
#define RAPIDJSON_ENCODEDSTREAM_H_
|
||||
|
||||
#include "stream.h"
|
||||
#include "memorystream.h"
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(effc++)
|
||||
#endif
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(padded)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Input byte stream wrapper with a statically bound encoding.
|
||||
/*!
|
||||
\tparam Encoding The interpretation of encoding of the stream. Either UTF8, UTF16LE, UTF16BE, UTF32LE, UTF32BE.
|
||||
\tparam InputByteStream Type of input byte stream. For example, FileReadStream.
|
||||
*/
|
||||
template <typename Encoding, typename InputByteStream>
|
||||
class EncodedInputStream {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
public:
|
||||
typedef typename Encoding::Ch Ch;
|
||||
|
||||
EncodedInputStream(InputByteStream& is) : is_(is) {
|
||||
current_ = Encoding::TakeBOM(is_);
|
||||
}
|
||||
|
||||
Ch Peek() const { return current_; }
|
||||
Ch Take() { Ch c = current_; current_ = Encoding::Take(is_); return c; }
|
||||
size_t Tell() const { return is_.Tell(); }
|
||||
|
||||
// Not implemented
|
||||
void Put(Ch) { RAPIDJSON_ASSERT(false); }
|
||||
void Flush() { RAPIDJSON_ASSERT(false); }
|
||||
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
private:
|
||||
EncodedInputStream(const EncodedInputStream&);
|
||||
EncodedInputStream& operator=(const EncodedInputStream&);
|
||||
|
||||
InputByteStream& is_;
|
||||
Ch current_;
|
||||
};
|
||||
|
||||
//! Specialized for UTF8 MemoryStream.
|
||||
template <>
|
||||
class EncodedInputStream<UTF8<>, MemoryStream> {
|
||||
public:
|
||||
typedef UTF8<>::Ch Ch;
|
||||
|
||||
EncodedInputStream(MemoryStream& is) : is_(is) {
|
||||
if (static_cast<unsigned char>(is_.Peek()) == 0xEFu) is_.Take();
|
||||
if (static_cast<unsigned char>(is_.Peek()) == 0xBBu) is_.Take();
|
||||
if (static_cast<unsigned char>(is_.Peek()) == 0xBFu) is_.Take();
|
||||
}
|
||||
Ch Peek() const { return is_.Peek(); }
|
||||
Ch Take() { return is_.Take(); }
|
||||
size_t Tell() const { return is_.Tell(); }
|
||||
|
||||
// Not implemented
|
||||
void Put(Ch) {}
|
||||
void Flush() {}
|
||||
Ch* PutBegin() { return 0; }
|
||||
size_t PutEnd(Ch*) { return 0; }
|
||||
|
||||
MemoryStream& is_;
|
||||
|
||||
private:
|
||||
EncodedInputStream(const EncodedInputStream&);
|
||||
EncodedInputStream& operator=(const EncodedInputStream&);
|
||||
};
|
||||
|
||||
//! Output byte stream wrapper with statically bound encoding.
|
||||
/*!
|
||||
\tparam Encoding The interpretation of encoding of the stream. Either UTF8, UTF16LE, UTF16BE, UTF32LE, UTF32BE.
|
||||
\tparam OutputByteStream Type of input byte stream. For example, FileWriteStream.
|
||||
*/
|
||||
template <typename Encoding, typename OutputByteStream>
|
||||
class EncodedOutputStream {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
public:
|
||||
typedef typename Encoding::Ch Ch;
|
||||
|
||||
EncodedOutputStream(OutputByteStream& os, bool putBOM = true) : os_(os) {
|
||||
if (putBOM)
|
||||
Encoding::PutBOM(os_);
|
||||
}
|
||||
|
||||
void Put(Ch c) { Encoding::Put(os_, c); }
|
||||
void Flush() { os_.Flush(); }
|
||||
|
||||
// Not implemented
|
||||
Ch Peek() const { RAPIDJSON_ASSERT(false); return 0;}
|
||||
Ch Take() { RAPIDJSON_ASSERT(false); return 0;}
|
||||
size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; }
|
||||
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
private:
|
||||
EncodedOutputStream(const EncodedOutputStream&);
|
||||
EncodedOutputStream& operator=(const EncodedOutputStream&);
|
||||
|
||||
OutputByteStream& os_;
|
||||
};
|
||||
|
||||
#define RAPIDJSON_ENCODINGS_FUNC(x) UTF8<Ch>::x, UTF16LE<Ch>::x, UTF16BE<Ch>::x, UTF32LE<Ch>::x, UTF32BE<Ch>::x
|
||||
|
||||
//! Input stream wrapper with dynamically bound encoding and automatic encoding detection.
|
||||
/*!
|
||||
\tparam CharType Type of character for reading.
|
||||
\tparam InputByteStream type of input byte stream to be wrapped.
|
||||
*/
|
||||
template <typename CharType, typename InputByteStream>
|
||||
class AutoUTFInputStream {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
public:
|
||||
typedef CharType Ch;
|
||||
|
||||
//! Constructor.
|
||||
/*!
|
||||
\param is input stream to be wrapped.
|
||||
\param type UTF encoding type if it is not detected from the stream.
|
||||
*/
|
||||
AutoUTFInputStream(InputByteStream& is, UTFType type = kUTF8) : is_(&is), type_(type), hasBOM_(false) {
|
||||
RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE);
|
||||
DetectType();
|
||||
static const TakeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Take) };
|
||||
takeFunc_ = f[type_];
|
||||
current_ = takeFunc_(*is_);
|
||||
}
|
||||
|
||||
UTFType GetType() const { return type_; }
|
||||
bool HasBOM() const { return hasBOM_; }
|
||||
|
||||
Ch Peek() const { return current_; }
|
||||
Ch Take() { Ch c = current_; current_ = takeFunc_(*is_); return c; }
|
||||
size_t Tell() const { return is_->Tell(); }
|
||||
|
||||
// Not implemented
|
||||
void Put(Ch) { RAPIDJSON_ASSERT(false); }
|
||||
void Flush() { RAPIDJSON_ASSERT(false); }
|
||||
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
private:
|
||||
AutoUTFInputStream(const AutoUTFInputStream&);
|
||||
AutoUTFInputStream& operator=(const AutoUTFInputStream&);
|
||||
|
||||
// Detect encoding type with BOM or RFC 4627
|
||||
void DetectType() {
|
||||
// BOM (Byte Order Mark):
|
||||
// 00 00 FE FF UTF-32BE
|
||||
// FF FE 00 00 UTF-32LE
|
||||
// FE FF UTF-16BE
|
||||
// FF FE UTF-16LE
|
||||
// EF BB BF UTF-8
|
||||
|
||||
const unsigned char* c = reinterpret_cast<const unsigned char *>(is_->Peek4());
|
||||
if (!c)
|
||||
return;
|
||||
|
||||
unsigned bom = static_cast<unsigned>(c[0] | (c[1] << 8) | (c[2] << 16) | (c[3] << 24));
|
||||
hasBOM_ = false;
|
||||
if (bom == 0xFFFE0000) { type_ = kUTF32BE; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); is_->Take(); }
|
||||
else if (bom == 0x0000FEFF) { type_ = kUTF32LE; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); is_->Take(); }
|
||||
else if ((bom & 0xFFFF) == 0xFFFE) { type_ = kUTF16BE; hasBOM_ = true; is_->Take(); is_->Take(); }
|
||||
else if ((bom & 0xFFFF) == 0xFEFF) { type_ = kUTF16LE; hasBOM_ = true; is_->Take(); is_->Take(); }
|
||||
else if ((bom & 0xFFFFFF) == 0xBFBBEF) { type_ = kUTF8; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); }
|
||||
|
||||
// RFC 4627: Section 3
|
||||
// "Since the first two characters of a JSON text will always be ASCII
|
||||
// characters [RFC0020], it is possible to determine whether an octet
|
||||
// stream is UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE) by looking
|
||||
// at the pattern of nulls in the first four octets."
|
||||
// 00 00 00 xx UTF-32BE
|
||||
// 00 xx 00 xx UTF-16BE
|
||||
// xx 00 00 00 UTF-32LE
|
||||
// xx 00 xx 00 UTF-16LE
|
||||
// xx xx xx xx UTF-8
|
||||
|
||||
if (!hasBOM_) {
|
||||
unsigned pattern = (c[0] ? 1 : 0) | (c[1] ? 2 : 0) | (c[2] ? 4 : 0) | (c[3] ? 8 : 0);
|
||||
switch (pattern) {
|
||||
case 0x08: type_ = kUTF32BE; break;
|
||||
case 0x0A: type_ = kUTF16BE; break;
|
||||
case 0x01: type_ = kUTF32LE; break;
|
||||
case 0x05: type_ = kUTF16LE; break;
|
||||
case 0x0F: type_ = kUTF8; break;
|
||||
default: break; // Use type defined by user.
|
||||
}
|
||||
}
|
||||
|
||||
// Runtime check whether the size of character type is sufficient. It only perform checks with assertion.
|
||||
if (type_ == kUTF16LE || type_ == kUTF16BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 2);
|
||||
if (type_ == kUTF32LE || type_ == kUTF32BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 4);
|
||||
}
|
||||
|
||||
typedef Ch (*TakeFunc)(InputByteStream& is);
|
||||
InputByteStream* is_;
|
||||
UTFType type_;
|
||||
Ch current_;
|
||||
TakeFunc takeFunc_;
|
||||
bool hasBOM_;
|
||||
};
|
||||
|
||||
//! Output stream wrapper with dynamically bound encoding and automatic encoding detection.
|
||||
/*!
|
||||
\tparam CharType Type of character for writing.
|
||||
\tparam OutputByteStream type of output byte stream to be wrapped.
|
||||
*/
|
||||
template <typename CharType, typename OutputByteStream>
|
||||
class AutoUTFOutputStream {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
public:
|
||||
typedef CharType Ch;
|
||||
|
||||
//! Constructor.
|
||||
/*!
|
||||
\param os output stream to be wrapped.
|
||||
\param type UTF encoding type.
|
||||
\param putBOM Whether to write BOM at the beginning of the stream.
|
||||
*/
|
||||
AutoUTFOutputStream(OutputByteStream& os, UTFType type, bool putBOM) : os_(&os), type_(type) {
|
||||
RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE);
|
||||
|
||||
// Runtime check whether the size of character type is sufficient. It only perform checks with assertion.
|
||||
if (type_ == kUTF16LE || type_ == kUTF16BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 2);
|
||||
if (type_ == kUTF32LE || type_ == kUTF32BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 4);
|
||||
|
||||
static const PutFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Put) };
|
||||
putFunc_ = f[type_];
|
||||
|
||||
if (putBOM)
|
||||
PutBOM();
|
||||
}
|
||||
|
||||
UTFType GetType() const { return type_; }
|
||||
|
||||
void Put(Ch c) { putFunc_(*os_, c); }
|
||||
void Flush() { os_->Flush(); }
|
||||
|
||||
// Not implemented
|
||||
Ch Peek() const { RAPIDJSON_ASSERT(false); return 0;}
|
||||
Ch Take() { RAPIDJSON_ASSERT(false); return 0;}
|
||||
size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; }
|
||||
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
private:
|
||||
AutoUTFOutputStream(const AutoUTFOutputStream&);
|
||||
AutoUTFOutputStream& operator=(const AutoUTFOutputStream&);
|
||||
|
||||
void PutBOM() {
|
||||
typedef void (*PutBOMFunc)(OutputByteStream&);
|
||||
static const PutBOMFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(PutBOM) };
|
||||
f[type_](*os_);
|
||||
}
|
||||
|
||||
typedef void (*PutFunc)(OutputByteStream&, Ch);
|
||||
|
||||
OutputByteStream* os_;
|
||||
UTFType type_;
|
||||
PutFunc putFunc_;
|
||||
};
|
||||
|
||||
#undef RAPIDJSON_ENCODINGS_FUNC
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_FILESTREAM_H_
|
|
@ -0,0 +1,716 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_ENCODINGS_H_
|
||||
#define RAPIDJSON_ENCODINGS_H_
|
||||
|
||||
#include "rapidjson.h"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(4244) // conversion from 'type1' to 'type2', possible loss of data
|
||||
RAPIDJSON_DIAG_OFF(4702) // unreachable code
|
||||
#elif defined(__GNUC__)
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(effc++)
|
||||
RAPIDJSON_DIAG_OFF(overflow)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Encoding
|
||||
|
||||
/*! \class rapidjson::Encoding
|
||||
\brief Concept for encoding of Unicode characters.
|
||||
|
||||
\code
|
||||
concept Encoding {
|
||||
typename Ch; //! Type of character. A "character" is actually a code unit in unicode's definition.
|
||||
|
||||
enum { supportUnicode = 1 }; // or 0 if not supporting unicode
|
||||
|
||||
//! \brief Encode a Unicode codepoint to an output stream.
|
||||
//! \param os Output stream.
|
||||
//! \param codepoint An unicode codepoint, ranging from 0x0 to 0x10FFFF inclusively.
|
||||
template<typename OutputStream>
|
||||
static void Encode(OutputStream& os, unsigned codepoint);
|
||||
|
||||
//! \brief Decode a Unicode codepoint from an input stream.
|
||||
//! \param is Input stream.
|
||||
//! \param codepoint Output of the unicode codepoint.
|
||||
//! \return true if a valid codepoint can be decoded from the stream.
|
||||
template <typename InputStream>
|
||||
static bool Decode(InputStream& is, unsigned* codepoint);
|
||||
|
||||
//! \brief Validate one Unicode codepoint from an encoded stream.
|
||||
//! \param is Input stream to obtain codepoint.
|
||||
//! \param os Output for copying one codepoint.
|
||||
//! \return true if it is valid.
|
||||
//! \note This function just validating and copying the codepoint without actually decode it.
|
||||
template <typename InputStream, typename OutputStream>
|
||||
static bool Validate(InputStream& is, OutputStream& os);
|
||||
|
||||
// The following functions are deal with byte streams.
|
||||
|
||||
//! Take a character from input byte stream, skip BOM if exist.
|
||||
template <typename InputByteStream>
|
||||
static CharType TakeBOM(InputByteStream& is);
|
||||
|
||||
//! Take a character from input byte stream.
|
||||
template <typename InputByteStream>
|
||||
static Ch Take(InputByteStream& is);
|
||||
|
||||
//! Put BOM to output byte stream.
|
||||
template <typename OutputByteStream>
|
||||
static void PutBOM(OutputByteStream& os);
|
||||
|
||||
//! Put a character to output byte stream.
|
||||
template <typename OutputByteStream>
|
||||
static void Put(OutputByteStream& os, Ch c);
|
||||
};
|
||||
\endcode
|
||||
*/
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// UTF8
|
||||
|
||||
//! UTF-8 encoding.
|
||||
/*! http://en.wikipedia.org/wiki/UTF-8
|
||||
http://tools.ietf.org/html/rfc3629
|
||||
\tparam CharType Code unit for storing 8-bit UTF-8 data. Default is char.
|
||||
\note implements Encoding concept
|
||||
*/
|
||||
template<typename CharType = char>
|
||||
struct UTF8 {
|
||||
typedef CharType Ch;
|
||||
|
||||
enum { supportUnicode = 1 };
|
||||
|
||||
template<typename OutputStream>
|
||||
static void Encode(OutputStream& os, unsigned codepoint) {
|
||||
if (codepoint <= 0x7F)
|
||||
os.Put(static_cast<Ch>(codepoint & 0xFF));
|
||||
else if (codepoint <= 0x7FF) {
|
||||
os.Put(static_cast<Ch>(0xC0 | ((codepoint >> 6) & 0xFF)));
|
||||
os.Put(static_cast<Ch>(0x80 | ((codepoint & 0x3F))));
|
||||
}
|
||||
else if (codepoint <= 0xFFFF) {
|
||||
os.Put(static_cast<Ch>(0xE0 | ((codepoint >> 12) & 0xFF)));
|
||||
os.Put(static_cast<Ch>(0x80 | ((codepoint >> 6) & 0x3F)));
|
||||
os.Put(static_cast<Ch>(0x80 | (codepoint & 0x3F)));
|
||||
}
|
||||
else {
|
||||
RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
|
||||
os.Put(static_cast<Ch>(0xF0 | ((codepoint >> 18) & 0xFF)));
|
||||
os.Put(static_cast<Ch>(0x80 | ((codepoint >> 12) & 0x3F)));
|
||||
os.Put(static_cast<Ch>(0x80 | ((codepoint >> 6) & 0x3F)));
|
||||
os.Put(static_cast<Ch>(0x80 | (codepoint & 0x3F)));
|
||||
}
|
||||
}
|
||||
|
||||
template<typename OutputStream>
|
||||
static void EncodeUnsafe(OutputStream& os, unsigned codepoint) {
|
||||
if (codepoint <= 0x7F)
|
||||
PutUnsafe(os, static_cast<Ch>(codepoint & 0xFF));
|
||||
else if (codepoint <= 0x7FF) {
|
||||
PutUnsafe(os, static_cast<Ch>(0xC0 | ((codepoint >> 6) & 0xFF)));
|
||||
PutUnsafe(os, static_cast<Ch>(0x80 | ((codepoint & 0x3F))));
|
||||
}
|
||||
else if (codepoint <= 0xFFFF) {
|
||||
PutUnsafe(os, static_cast<Ch>(0xE0 | ((codepoint >> 12) & 0xFF)));
|
||||
PutUnsafe(os, static_cast<Ch>(0x80 | ((codepoint >> 6) & 0x3F)));
|
||||
PutUnsafe(os, static_cast<Ch>(0x80 | (codepoint & 0x3F)));
|
||||
}
|
||||
else {
|
||||
RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
|
||||
PutUnsafe(os, static_cast<Ch>(0xF0 | ((codepoint >> 18) & 0xFF)));
|
||||
PutUnsafe(os, static_cast<Ch>(0x80 | ((codepoint >> 12) & 0x3F)));
|
||||
PutUnsafe(os, static_cast<Ch>(0x80 | ((codepoint >> 6) & 0x3F)));
|
||||
PutUnsafe(os, static_cast<Ch>(0x80 | (codepoint & 0x3F)));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
static bool Decode(InputStream& is, unsigned* codepoint) {
|
||||
#define COPY() c = is.Take(); *codepoint = (*codepoint << 6) | (static_cast<unsigned char>(c) & 0x3Fu)
|
||||
#define TRANS(mask) result &= ((GetRange(static_cast<unsigned char>(c)) & mask) != 0)
|
||||
#define TAIL() COPY(); TRANS(0x70)
|
||||
typename InputStream::Ch c = is.Take();
|
||||
if (!(c & 0x80)) {
|
||||
*codepoint = static_cast<unsigned char>(c);
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char type = GetRange(static_cast<unsigned char>(c));
|
||||
if (type >= 32) {
|
||||
*codepoint = 0;
|
||||
} else {
|
||||
*codepoint = (0xFF >> type) & static_cast<unsigned char>(c);
|
||||
}
|
||||
bool result = true;
|
||||
switch (type) {
|
||||
case 2: TAIL(); return result;
|
||||
case 3: TAIL(); TAIL(); return result;
|
||||
case 4: COPY(); TRANS(0x50); TAIL(); return result;
|
||||
case 5: COPY(); TRANS(0x10); TAIL(); TAIL(); return result;
|
||||
case 6: TAIL(); TAIL(); TAIL(); return result;
|
||||
case 10: COPY(); TRANS(0x20); TAIL(); return result;
|
||||
case 11: COPY(); TRANS(0x60); TAIL(); TAIL(); return result;
|
||||
default: return false;
|
||||
}
|
||||
#undef COPY
|
||||
#undef TRANS
|
||||
#undef TAIL
|
||||
}
|
||||
|
||||
template <typename InputStream, typename OutputStream>
|
||||
static bool Validate(InputStream& is, OutputStream& os) {
|
||||
#define COPY() os.Put(c = is.Take())
|
||||
#define TRANS(mask) result &= ((GetRange(static_cast<unsigned char>(c)) & mask) != 0)
|
||||
#define TAIL() COPY(); TRANS(0x70)
|
||||
Ch c;
|
||||
COPY();
|
||||
if (!(c & 0x80))
|
||||
return true;
|
||||
|
||||
bool result = true;
|
||||
switch (GetRange(static_cast<unsigned char>(c))) {
|
||||
case 2: TAIL(); return result;
|
||||
case 3: TAIL(); TAIL(); return result;
|
||||
case 4: COPY(); TRANS(0x50); TAIL(); return result;
|
||||
case 5: COPY(); TRANS(0x10); TAIL(); TAIL(); return result;
|
||||
case 6: TAIL(); TAIL(); TAIL(); return result;
|
||||
case 10: COPY(); TRANS(0x20); TAIL(); return result;
|
||||
case 11: COPY(); TRANS(0x60); TAIL(); TAIL(); return result;
|
||||
default: return false;
|
||||
}
|
||||
#undef COPY
|
||||
#undef TRANS
|
||||
#undef TAIL
|
||||
}
|
||||
|
||||
static unsigned char GetRange(unsigned char c) {
|
||||
// Referring to DFA of http://bjoern.hoehrmann.de/utf-8/decoder/dfa/
|
||||
// With new mapping 1 -> 0x10, 7 -> 0x20, 9 -> 0x40, such that AND operation can test multiple types.
|
||||
static const unsigned char type[] = {
|
||||
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
|
||||
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
|
||||
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
|
||||
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
|
||||
0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
|
||||
0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,
|
||||
0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
|
||||
0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
|
||||
8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
|
||||
10,3,3,3,3,3,3,3,3,3,3,3,3,4,3,3, 11,6,6,6,5,8,8,8,8,8,8,8,8,8,8,8,
|
||||
};
|
||||
return type[c];
|
||||
}
|
||||
|
||||
template <typename InputByteStream>
|
||||
static CharType TakeBOM(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
typename InputByteStream::Ch c = Take(is);
|
||||
if (static_cast<unsigned char>(c) != 0xEFu) return c;
|
||||
c = is.Take();
|
||||
if (static_cast<unsigned char>(c) != 0xBBu) return c;
|
||||
c = is.Take();
|
||||
if (static_cast<unsigned char>(c) != 0xBFu) return c;
|
||||
c = is.Take();
|
||||
return c;
|
||||
}
|
||||
|
||||
template <typename InputByteStream>
|
||||
static Ch Take(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
return static_cast<Ch>(is.Take());
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void PutBOM(OutputByteStream& os) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xEFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xBBu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xBFu));
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void Put(OutputByteStream& os, Ch c) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(c));
|
||||
}
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// UTF16
|
||||
|
||||
//! UTF-16 encoding.
|
||||
/*! http://en.wikipedia.org/wiki/UTF-16
|
||||
http://tools.ietf.org/html/rfc2781
|
||||
\tparam CharType Type for storing 16-bit UTF-16 data. Default is wchar_t. C++11 may use char16_t instead.
|
||||
\note implements Encoding concept
|
||||
|
||||
\note For in-memory access, no need to concern endianness. The code units and code points are represented by CPU's endianness.
|
||||
For streaming, use UTF16LE and UTF16BE, which handle endianness.
|
||||
*/
|
||||
template<typename CharType = wchar_t>
|
||||
struct UTF16 {
|
||||
typedef CharType Ch;
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(Ch) >= 2);
|
||||
|
||||
enum { supportUnicode = 1 };
|
||||
|
||||
template<typename OutputStream>
|
||||
static void Encode(OutputStream& os, unsigned codepoint) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2);
|
||||
if (codepoint <= 0xFFFF) {
|
||||
RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair
|
||||
os.Put(static_cast<typename OutputStream::Ch>(codepoint));
|
||||
}
|
||||
else {
|
||||
RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
|
||||
unsigned v = codepoint - 0x10000;
|
||||
os.Put(static_cast<typename OutputStream::Ch>((v >> 10) | 0xD800));
|
||||
os.Put((v & 0x3FF) | 0xDC00);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<typename OutputStream>
|
||||
static void EncodeUnsafe(OutputStream& os, unsigned codepoint) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2);
|
||||
if (codepoint <= 0xFFFF) {
|
||||
RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair
|
||||
PutUnsafe(os, static_cast<typename OutputStream::Ch>(codepoint));
|
||||
}
|
||||
else {
|
||||
RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
|
||||
unsigned v = codepoint - 0x10000;
|
||||
PutUnsafe(os, static_cast<typename OutputStream::Ch>((v >> 10) | 0xD800));
|
||||
PutUnsafe(os, (v & 0x3FF) | 0xDC00);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
static bool Decode(InputStream& is, unsigned* codepoint) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 2);
|
||||
typename InputStream::Ch c = is.Take();
|
||||
if (c < 0xD800 || c > 0xDFFF) {
|
||||
*codepoint = static_cast<unsigned>(c);
|
||||
return true;
|
||||
}
|
||||
else if (c <= 0xDBFF) {
|
||||
*codepoint = (static_cast<unsigned>(c) & 0x3FF) << 10;
|
||||
c = is.Take();
|
||||
*codepoint |= (static_cast<unsigned>(c) & 0x3FF);
|
||||
*codepoint += 0x10000;
|
||||
return c >= 0xDC00 && c <= 0xDFFF;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename InputStream, typename OutputStream>
|
||||
static bool Validate(InputStream& is, OutputStream& os) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 2);
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2);
|
||||
typename InputStream::Ch c;
|
||||
os.Put(static_cast<typename OutputStream::Ch>(c = is.Take()));
|
||||
if (c < 0xD800 || c > 0xDFFF)
|
||||
return true;
|
||||
else if (c <= 0xDBFF) {
|
||||
os.Put(c = is.Take());
|
||||
return c >= 0xDC00 && c <= 0xDFFF;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
//! UTF-16 little endian encoding.
|
||||
template<typename CharType = wchar_t>
|
||||
struct UTF16LE : UTF16<CharType> {
|
||||
template <typename InputByteStream>
|
||||
static CharType TakeBOM(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
CharType c = Take(is);
|
||||
return static_cast<uint16_t>(c) == 0xFEFFu ? Take(is) : c;
|
||||
}
|
||||
|
||||
template <typename InputByteStream>
|
||||
static CharType Take(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
unsigned c = static_cast<uint8_t>(is.Take());
|
||||
c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 8;
|
||||
return static_cast<CharType>(c);
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void PutBOM(OutputByteStream& os) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xFEu));
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void Put(OutputByteStream& os, CharType c) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(static_cast<unsigned>(c) & 0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>((static_cast<unsigned>(c) >> 8) & 0xFFu));
|
||||
}
|
||||
};
|
||||
|
||||
//! UTF-16 big endian encoding.
|
||||
template<typename CharType = wchar_t>
|
||||
struct UTF16BE : UTF16<CharType> {
|
||||
template <typename InputByteStream>
|
||||
static CharType TakeBOM(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
CharType c = Take(is);
|
||||
return static_cast<uint16_t>(c) == 0xFEFFu ? Take(is) : c;
|
||||
}
|
||||
|
||||
template <typename InputByteStream>
|
||||
static CharType Take(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
unsigned c = static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 8;
|
||||
c |= static_cast<uint8_t>(is.Take());
|
||||
return static_cast<CharType>(c);
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void PutBOM(OutputByteStream& os) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xFEu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xFFu));
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void Put(OutputByteStream& os, CharType c) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>((static_cast<unsigned>(c) >> 8) & 0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(static_cast<unsigned>(c) & 0xFFu));
|
||||
}
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// UTF32
|
||||
|
||||
//! UTF-32 encoding.
|
||||
/*! http://en.wikipedia.org/wiki/UTF-32
|
||||
\tparam CharType Type for storing 32-bit UTF-32 data. Default is unsigned. C++11 may use char32_t instead.
|
||||
\note implements Encoding concept
|
||||
|
||||
\note For in-memory access, no need to concern endianness. The code units and code points are represented by CPU's endianness.
|
||||
For streaming, use UTF32LE and UTF32BE, which handle endianness.
|
||||
*/
|
||||
template<typename CharType = unsigned>
|
||||
struct UTF32 {
|
||||
typedef CharType Ch;
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(Ch) >= 4);
|
||||
|
||||
enum { supportUnicode = 1 };
|
||||
|
||||
template<typename OutputStream>
|
||||
static void Encode(OutputStream& os, unsigned codepoint) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 4);
|
||||
RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
|
||||
os.Put(codepoint);
|
||||
}
|
||||
|
||||
template<typename OutputStream>
|
||||
static void EncodeUnsafe(OutputStream& os, unsigned codepoint) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 4);
|
||||
RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
|
||||
PutUnsafe(os, codepoint);
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
static bool Decode(InputStream& is, unsigned* codepoint) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 4);
|
||||
Ch c = is.Take();
|
||||
*codepoint = c;
|
||||
return c <= 0x10FFFF;
|
||||
}
|
||||
|
||||
template <typename InputStream, typename OutputStream>
|
||||
static bool Validate(InputStream& is, OutputStream& os) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 4);
|
||||
Ch c;
|
||||
os.Put(c = is.Take());
|
||||
return c <= 0x10FFFF;
|
||||
}
|
||||
};
|
||||
|
||||
//! UTF-32 little endian enocoding.
|
||||
template<typename CharType = unsigned>
|
||||
struct UTF32LE : UTF32<CharType> {
|
||||
template <typename InputByteStream>
|
||||
static CharType TakeBOM(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
CharType c = Take(is);
|
||||
return static_cast<uint32_t>(c) == 0x0000FEFFu ? Take(is) : c;
|
||||
}
|
||||
|
||||
template <typename InputByteStream>
|
||||
static CharType Take(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
unsigned c = static_cast<uint8_t>(is.Take());
|
||||
c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 8;
|
||||
c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 16;
|
||||
c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 24;
|
||||
return static_cast<CharType>(c);
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void PutBOM(OutputByteStream& os) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xFEu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0x00u));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0x00u));
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void Put(OutputByteStream& os, CharType c) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(c & 0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>((c >> 8) & 0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>((c >> 16) & 0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>((c >> 24) & 0xFFu));
|
||||
}
|
||||
};
|
||||
|
||||
//! UTF-32 big endian encoding.
|
||||
template<typename CharType = unsigned>
|
||||
struct UTF32BE : UTF32<CharType> {
|
||||
template <typename InputByteStream>
|
||||
static CharType TakeBOM(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
CharType c = Take(is);
|
||||
return static_cast<uint32_t>(c) == 0x0000FEFFu ? Take(is) : c;
|
||||
}
|
||||
|
||||
template <typename InputByteStream>
|
||||
static CharType Take(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
unsigned c = static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 24;
|
||||
c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 16;
|
||||
c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 8;
|
||||
c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take()));
|
||||
return static_cast<CharType>(c);
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void PutBOM(OutputByteStream& os) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0x00u));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0x00u));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xFEu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xFFu));
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void Put(OutputByteStream& os, CharType c) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>((c >> 24) & 0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>((c >> 16) & 0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>((c >> 8) & 0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(c & 0xFFu));
|
||||
}
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// ASCII
|
||||
|
||||
//! ASCII encoding.
|
||||
/*! http://en.wikipedia.org/wiki/ASCII
|
||||
\tparam CharType Code unit for storing 7-bit ASCII data. Default is char.
|
||||
\note implements Encoding concept
|
||||
*/
|
||||
template<typename CharType = char>
|
||||
struct ASCII {
|
||||
typedef CharType Ch;
|
||||
|
||||
enum { supportUnicode = 0 };
|
||||
|
||||
template<typename OutputStream>
|
||||
static void Encode(OutputStream& os, unsigned codepoint) {
|
||||
RAPIDJSON_ASSERT(codepoint <= 0x7F);
|
||||
os.Put(static_cast<Ch>(codepoint & 0xFF));
|
||||
}
|
||||
|
||||
template<typename OutputStream>
|
||||
static void EncodeUnsafe(OutputStream& os, unsigned codepoint) {
|
||||
RAPIDJSON_ASSERT(codepoint <= 0x7F);
|
||||
PutUnsafe(os, static_cast<Ch>(codepoint & 0xFF));
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
static bool Decode(InputStream& is, unsigned* codepoint) {
|
||||
uint8_t c = static_cast<uint8_t>(is.Take());
|
||||
*codepoint = c;
|
||||
return c <= 0X7F;
|
||||
}
|
||||
|
||||
template <typename InputStream, typename OutputStream>
|
||||
static bool Validate(InputStream& is, OutputStream& os) {
|
||||
uint8_t c = static_cast<uint8_t>(is.Take());
|
||||
os.Put(static_cast<typename OutputStream::Ch>(c));
|
||||
return c <= 0x7F;
|
||||
}
|
||||
|
||||
template <typename InputByteStream>
|
||||
static CharType TakeBOM(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
uint8_t c = static_cast<uint8_t>(Take(is));
|
||||
return static_cast<Ch>(c);
|
||||
}
|
||||
|
||||
template <typename InputByteStream>
|
||||
static Ch Take(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
return static_cast<Ch>(is.Take());
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void PutBOM(OutputByteStream& os) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
(void)os;
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void Put(OutputByteStream& os, Ch c) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(c));
|
||||
}
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// AutoUTF
|
||||
|
||||
//! Runtime-specified UTF encoding type of a stream.
|
||||
enum UTFType {
|
||||
kUTF8 = 0, //!< UTF-8.
|
||||
kUTF16LE = 1, //!< UTF-16 little endian.
|
||||
kUTF16BE = 2, //!< UTF-16 big endian.
|
||||
kUTF32LE = 3, //!< UTF-32 little endian.
|
||||
kUTF32BE = 4 //!< UTF-32 big endian.
|
||||
};
|
||||
|
||||
//! Dynamically select encoding according to stream's runtime-specified UTF encoding type.
|
||||
/*! \note This class can be used with AutoUTFInputtStream and AutoUTFOutputStream, which provides GetType().
|
||||
*/
|
||||
template<typename CharType>
|
||||
struct AutoUTF {
|
||||
typedef CharType Ch;
|
||||
|
||||
enum { supportUnicode = 1 };
|
||||
|
||||
#define RAPIDJSON_ENCODINGS_FUNC(x) UTF8<Ch>::x, UTF16LE<Ch>::x, UTF16BE<Ch>::x, UTF32LE<Ch>::x, UTF32BE<Ch>::x
|
||||
|
||||
template<typename OutputStream>
|
||||
RAPIDJSON_FORCEINLINE static void Encode(OutputStream& os, unsigned codepoint) {
|
||||
typedef void (*EncodeFunc)(OutputStream&, unsigned);
|
||||
static const EncodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Encode) };
|
||||
(*f[os.GetType()])(os, codepoint);
|
||||
}
|
||||
|
||||
template<typename OutputStream>
|
||||
RAPIDJSON_FORCEINLINE static void EncodeUnsafe(OutputStream& os, unsigned codepoint) {
|
||||
typedef void (*EncodeFunc)(OutputStream&, unsigned);
|
||||
static const EncodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(EncodeUnsafe) };
|
||||
(*f[os.GetType()])(os, codepoint);
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
RAPIDJSON_FORCEINLINE static bool Decode(InputStream& is, unsigned* codepoint) {
|
||||
typedef bool (*DecodeFunc)(InputStream&, unsigned*);
|
||||
static const DecodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Decode) };
|
||||
return (*f[is.GetType()])(is, codepoint);
|
||||
}
|
||||
|
||||
template <typename InputStream, typename OutputStream>
|
||||
RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) {
|
||||
typedef bool (*ValidateFunc)(InputStream&, OutputStream&);
|
||||
static const ValidateFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Validate) };
|
||||
return (*f[is.GetType()])(is, os);
|
||||
}
|
||||
|
||||
#undef RAPIDJSON_ENCODINGS_FUNC
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Transcoder
|
||||
|
||||
//! Encoding conversion.
|
||||
template<typename SourceEncoding, typename TargetEncoding>
|
||||
struct Transcoder {
|
||||
//! Take one Unicode codepoint from source encoding, convert it to target encoding and put it to the output stream.
|
||||
template<typename InputStream, typename OutputStream>
|
||||
RAPIDJSON_FORCEINLINE static bool Transcode(InputStream& is, OutputStream& os) {
|
||||
unsigned codepoint;
|
||||
if (!SourceEncoding::Decode(is, &codepoint))
|
||||
return false;
|
||||
TargetEncoding::Encode(os, codepoint);
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename InputStream, typename OutputStream>
|
||||
RAPIDJSON_FORCEINLINE static bool TranscodeUnsafe(InputStream& is, OutputStream& os) {
|
||||
unsigned codepoint;
|
||||
if (!SourceEncoding::Decode(is, &codepoint))
|
||||
return false;
|
||||
TargetEncoding::EncodeUnsafe(os, codepoint);
|
||||
return true;
|
||||
}
|
||||
|
||||
//! Validate one Unicode codepoint from an encoded stream.
|
||||
template<typename InputStream, typename OutputStream>
|
||||
RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) {
|
||||
return Transcode(is, os); // Since source/target encoding is different, must transcode.
|
||||
}
|
||||
};
|
||||
|
||||
// Forward declaration.
|
||||
template<typename Stream>
|
||||
inline void PutUnsafe(Stream& stream, typename Stream::Ch c);
|
||||
|
||||
//! Specialization of Transcoder with same source and target encoding.
|
||||
template<typename Encoding>
|
||||
struct Transcoder<Encoding, Encoding> {
|
||||
template<typename InputStream, typename OutputStream>
|
||||
RAPIDJSON_FORCEINLINE static bool Transcode(InputStream& is, OutputStream& os) {
|
||||
os.Put(is.Take()); // Just copy one code unit. This semantic is different from primary template class.
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename InputStream, typename OutputStream>
|
||||
RAPIDJSON_FORCEINLINE static bool TranscodeUnsafe(InputStream& is, OutputStream& os) {
|
||||
PutUnsafe(os, is.Take()); // Just copy one code unit. This semantic is different from primary template class.
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename InputStream, typename OutputStream>
|
||||
RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) {
|
||||
return Encoding::Validate(is, os); // source/target encoding are the same
|
||||
}
|
||||
};
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#if defined(__GNUC__) || defined(_MSC_VER)
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_ENCODINGS_H_
|
|
@ -0,0 +1,74 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_ERROR_EN_H_
|
||||
#define RAPIDJSON_ERROR_EN_H_
|
||||
|
||||
#include "error.h"
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(switch-enum)
|
||||
RAPIDJSON_DIAG_OFF(covered-switch-default)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Maps error code of parsing into error message.
|
||||
/*!
|
||||
\ingroup RAPIDJSON_ERRORS
|
||||
\param parseErrorCode Error code obtained in parsing.
|
||||
\return the error message.
|
||||
\note User can make a copy of this function for localization.
|
||||
Using switch-case is safer for future modification of error codes.
|
||||
*/
|
||||
inline const RAPIDJSON_ERROR_CHARTYPE* GetParseError_En(ParseErrorCode parseErrorCode) {
|
||||
switch (parseErrorCode) {
|
||||
case kParseErrorNone: return RAPIDJSON_ERROR_STRING("No error.");
|
||||
|
||||
case kParseErrorDocumentEmpty: return RAPIDJSON_ERROR_STRING("The document is empty.");
|
||||
case kParseErrorDocumentRootNotSingular: return RAPIDJSON_ERROR_STRING("The document root must not be followed by other values.");
|
||||
|
||||
case kParseErrorValueInvalid: return RAPIDJSON_ERROR_STRING("Invalid value.");
|
||||
|
||||
case kParseErrorObjectMissName: return RAPIDJSON_ERROR_STRING("Missing a name for object member.");
|
||||
case kParseErrorObjectMissColon: return RAPIDJSON_ERROR_STRING("Missing a colon after a name of object member.");
|
||||
case kParseErrorObjectMissCommaOrCurlyBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or '}' after an object member.");
|
||||
|
||||
case kParseErrorArrayMissCommaOrSquareBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or ']' after an array element.");
|
||||
|
||||
case kParseErrorStringUnicodeEscapeInvalidHex: return RAPIDJSON_ERROR_STRING("Incorrect hex digit after \\u escape in string.");
|
||||
case kParseErrorStringUnicodeSurrogateInvalid: return RAPIDJSON_ERROR_STRING("The surrogate pair in string is invalid.");
|
||||
case kParseErrorStringEscapeInvalid: return RAPIDJSON_ERROR_STRING("Invalid escape character in string.");
|
||||
case kParseErrorStringMissQuotationMark: return RAPIDJSON_ERROR_STRING("Missing a closing quotation mark in string.");
|
||||
case kParseErrorStringInvalidEncoding: return RAPIDJSON_ERROR_STRING("Invalid encoding in string.");
|
||||
|
||||
case kParseErrorNumberTooBig: return RAPIDJSON_ERROR_STRING("Number too big to be stored in double.");
|
||||
case kParseErrorNumberMissFraction: return RAPIDJSON_ERROR_STRING("Miss fraction part in number.");
|
||||
case kParseErrorNumberMissExponent: return RAPIDJSON_ERROR_STRING("Miss exponent in number.");
|
||||
|
||||
case kParseErrorTermination: return RAPIDJSON_ERROR_STRING("Terminate parsing due to Handler error.");
|
||||
case kParseErrorUnspecificSyntaxError: return RAPIDJSON_ERROR_STRING("Unspecific syntax error.");
|
||||
|
||||
default: return RAPIDJSON_ERROR_STRING("Unknown error.");
|
||||
}
|
||||
}
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_ERROR_EN_H_
|
|
@ -0,0 +1,155 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_ERROR_ERROR_H_
|
||||
#define RAPIDJSON_ERROR_ERROR_H_
|
||||
|
||||
#include "../rapidjson.h"
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(padded)
|
||||
#endif
|
||||
|
||||
/*! \file error.h */
|
||||
|
||||
/*! \defgroup RAPIDJSON_ERRORS RapidJSON error handling */
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_ERROR_CHARTYPE
|
||||
|
||||
//! Character type of error messages.
|
||||
/*! \ingroup RAPIDJSON_ERRORS
|
||||
The default character type is \c char.
|
||||
On Windows, user can define this macro as \c TCHAR for supporting both
|
||||
unicode/non-unicode settings.
|
||||
*/
|
||||
#ifndef RAPIDJSON_ERROR_CHARTYPE
|
||||
#define RAPIDJSON_ERROR_CHARTYPE char
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_ERROR_STRING
|
||||
|
||||
//! Macro for converting string literial to \ref RAPIDJSON_ERROR_CHARTYPE[].
|
||||
/*! \ingroup RAPIDJSON_ERRORS
|
||||
By default this conversion macro does nothing.
|
||||
On Windows, user can define this macro as \c _T(x) for supporting both
|
||||
unicode/non-unicode settings.
|
||||
*/
|
||||
#ifndef RAPIDJSON_ERROR_STRING
|
||||
#define RAPIDJSON_ERROR_STRING(x) x
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// ParseErrorCode
|
||||
|
||||
//! Error code of parsing.
|
||||
/*! \ingroup RAPIDJSON_ERRORS
|
||||
\see GenericReader::Parse, GenericReader::GetParseErrorCode
|
||||
*/
|
||||
enum ParseErrorCode {
|
||||
kParseErrorNone = 0, //!< No error.
|
||||
|
||||
kParseErrorDocumentEmpty, //!< The document is empty.
|
||||
kParseErrorDocumentRootNotSingular, //!< The document root must not follow by other values.
|
||||
|
||||
kParseErrorValueInvalid, //!< Invalid value.
|
||||
|
||||
kParseErrorObjectMissName, //!< Missing a name for object member.
|
||||
kParseErrorObjectMissColon, //!< Missing a colon after a name of object member.
|
||||
kParseErrorObjectMissCommaOrCurlyBracket, //!< Missing a comma or '}' after an object member.
|
||||
|
||||
kParseErrorArrayMissCommaOrSquareBracket, //!< Missing a comma or ']' after an array element.
|
||||
|
||||
kParseErrorStringUnicodeEscapeInvalidHex, //!< Incorrect hex digit after \\u escape in string.
|
||||
kParseErrorStringUnicodeSurrogateInvalid, //!< The surrogate pair in string is invalid.
|
||||
kParseErrorStringEscapeInvalid, //!< Invalid escape character in string.
|
||||
kParseErrorStringMissQuotationMark, //!< Missing a closing quotation mark in string.
|
||||
kParseErrorStringInvalidEncoding, //!< Invalid encoding in string.
|
||||
|
||||
kParseErrorNumberTooBig, //!< Number too big to be stored in double.
|
||||
kParseErrorNumberMissFraction, //!< Miss fraction part in number.
|
||||
kParseErrorNumberMissExponent, //!< Miss exponent in number.
|
||||
|
||||
kParseErrorTermination, //!< Parsing was terminated.
|
||||
kParseErrorUnspecificSyntaxError //!< Unspecific syntax error.
|
||||
};
|
||||
|
||||
//! Result of parsing (wraps ParseErrorCode)
|
||||
/*!
|
||||
\ingroup RAPIDJSON_ERRORS
|
||||
\code
|
||||
Document doc;
|
||||
ParseResult ok = doc.Parse("[42]");
|
||||
if (!ok) {
|
||||
fprintf(stderr, "JSON parse error: %s (%u)",
|
||||
GetParseError_En(ok.Code()), ok.Offset());
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
\endcode
|
||||
\see GenericReader::Parse, GenericDocument::Parse
|
||||
*/
|
||||
struct ParseResult {
|
||||
public:
|
||||
//! Default constructor, no error.
|
||||
ParseResult() : code_(kParseErrorNone), offset_(0) {}
|
||||
//! Constructor to set an error.
|
||||
ParseResult(ParseErrorCode code, size_t offset) : code_(code), offset_(offset) {}
|
||||
|
||||
//! Get the error code.
|
||||
ParseErrorCode Code() const { return code_; }
|
||||
//! Get the error offset, if \ref IsError(), 0 otherwise.
|
||||
size_t Offset() const { return offset_; }
|
||||
|
||||
//! Conversion to \c bool, returns \c true, iff !\ref IsError().
|
||||
operator bool() const { return !IsError(); }
|
||||
//! Whether the result is an error.
|
||||
bool IsError() const { return code_ != kParseErrorNone; }
|
||||
|
||||
bool operator==(const ParseResult& that) const { return code_ == that.code_; }
|
||||
bool operator==(ParseErrorCode code) const { return code_ == code; }
|
||||
friend bool operator==(ParseErrorCode code, const ParseResult & err) { return code == err.code_; }
|
||||
|
||||
//! Reset error code.
|
||||
void Clear() { Set(kParseErrorNone); }
|
||||
//! Update error code and offset.
|
||||
void Set(ParseErrorCode code, size_t offset = 0) { code_ = code; offset_ = offset; }
|
||||
|
||||
private:
|
||||
ParseErrorCode code_;
|
||||
size_t offset_;
|
||||
};
|
||||
|
||||
//! Function pointer type of GetParseError().
|
||||
/*! \ingroup RAPIDJSON_ERRORS
|
||||
|
||||
This is the prototype for \c GetParseError_X(), where \c X is a locale.
|
||||
User can dynamically change locale in runtime, e.g.:
|
||||
\code
|
||||
GetParseErrorFunc GetParseError = GetParseError_En; // or whatever
|
||||
const RAPIDJSON_ERROR_CHARTYPE* s = GetParseError(document.GetParseErrorCode());
|
||||
\endcode
|
||||
*/
|
||||
typedef const RAPIDJSON_ERROR_CHARTYPE* (*GetParseErrorFunc)(ParseErrorCode);
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_ERROR_ERROR_H_
|
|
@ -0,0 +1,99 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_FILEREADSTREAM_H_
|
||||
#define RAPIDJSON_FILEREADSTREAM_H_
|
||||
|
||||
#include "stream.h"
|
||||
#include <cstdio>
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(padded)
|
||||
RAPIDJSON_DIAG_OFF(unreachable-code)
|
||||
RAPIDJSON_DIAG_OFF(missing-noreturn)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! File byte stream for input using fread().
|
||||
/*!
|
||||
\note implements Stream concept
|
||||
*/
|
||||
class FileReadStream {
|
||||
public:
|
||||
typedef char Ch; //!< Character type (byte).
|
||||
|
||||
//! Constructor.
|
||||
/*!
|
||||
\param fp File pointer opened for read.
|
||||
\param buffer user-supplied buffer.
|
||||
\param bufferSize size of buffer in bytes. Must >=4 bytes.
|
||||
*/
|
||||
FileReadStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferSize_(bufferSize), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) {
|
||||
RAPIDJSON_ASSERT(fp_ != 0);
|
||||
RAPIDJSON_ASSERT(bufferSize >= 4);
|
||||
Read();
|
||||
}
|
||||
|
||||
Ch Peek() const { return *current_; }
|
||||
Ch Take() { Ch c = *current_; Read(); return c; }
|
||||
size_t Tell() const { return count_ + static_cast<size_t>(current_ - buffer_); }
|
||||
|
||||
// Not implemented
|
||||
void Put(Ch) { RAPIDJSON_ASSERT(false); }
|
||||
void Flush() { RAPIDJSON_ASSERT(false); }
|
||||
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
// For encoding detection only.
|
||||
const Ch* Peek4() const {
|
||||
return (current_ + 4 <= bufferLast_) ? current_ : 0;
|
||||
}
|
||||
|
||||
private:
|
||||
void Read() {
|
||||
if (current_ < bufferLast_)
|
||||
++current_;
|
||||
else if (!eof_) {
|
||||
count_ += readCount_;
|
||||
readCount_ = fread(buffer_, 1, bufferSize_, fp_);
|
||||
bufferLast_ = buffer_ + readCount_ - 1;
|
||||
current_ = buffer_;
|
||||
|
||||
if (readCount_ < bufferSize_) {
|
||||
buffer_[readCount_] = '\0';
|
||||
++bufferLast_;
|
||||
eof_ = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::FILE* fp_;
|
||||
Ch *buffer_;
|
||||
size_t bufferSize_;
|
||||
Ch *bufferLast_;
|
||||
Ch *current_;
|
||||
size_t readCount_;
|
||||
size_t count_; //!< Number of characters read
|
||||
bool eof_;
|
||||
};
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_FILESTREAM_H_
|
|
@ -0,0 +1,104 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_FILEWRITESTREAM_H_
|
||||
#define RAPIDJSON_FILEWRITESTREAM_H_
|
||||
|
||||
#include "stream.h"
|
||||
#include <cstdio>
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(unreachable-code)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Wrapper of C file stream for input using fread().
|
||||
/*!
|
||||
\note implements Stream concept
|
||||
*/
|
||||
class FileWriteStream {
|
||||
public:
|
||||
typedef char Ch; //!< Character type. Only support char.
|
||||
|
||||
FileWriteStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferEnd_(buffer + bufferSize), current_(buffer_) {
|
||||
RAPIDJSON_ASSERT(fp_ != 0);
|
||||
}
|
||||
|
||||
void Put(char c) {
|
||||
if (current_ >= bufferEnd_)
|
||||
Flush();
|
||||
|
||||
*current_++ = c;
|
||||
}
|
||||
|
||||
void PutN(char c, size_t n) {
|
||||
size_t avail = static_cast<size_t>(bufferEnd_ - current_);
|
||||
while (n > avail) {
|
||||
std::memset(current_, c, avail);
|
||||
current_ += avail;
|
||||
Flush();
|
||||
n -= avail;
|
||||
avail = static_cast<size_t>(bufferEnd_ - current_);
|
||||
}
|
||||
|
||||
if (n > 0) {
|
||||
std::memset(current_, c, n);
|
||||
current_ += n;
|
||||
}
|
||||
}
|
||||
|
||||
void Flush() {
|
||||
if (current_ != buffer_) {
|
||||
size_t result = fwrite(buffer_, 1, static_cast<size_t>(current_ - buffer_), fp_);
|
||||
if (result < static_cast<size_t>(current_ - buffer_)) {
|
||||
// failure deliberately ignored at this time
|
||||
// added to avoid warn_unused_result build errors
|
||||
}
|
||||
current_ = buffer_;
|
||||
}
|
||||
}
|
||||
|
||||
// Not implemented
|
||||
char Peek() const { RAPIDJSON_ASSERT(false); return 0; }
|
||||
char Take() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; }
|
||||
char* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
size_t PutEnd(char*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
private:
|
||||
// Prohibit copy constructor & assignment operator.
|
||||
FileWriteStream(const FileWriteStream&);
|
||||
FileWriteStream& operator=(const FileWriteStream&);
|
||||
|
||||
std::FILE* fp_;
|
||||
char *buffer_;
|
||||
char *bufferEnd_;
|
||||
char *current_;
|
||||
};
|
||||
|
||||
//! Implement specialized version of PutN() with memset() for better performance.
|
||||
template<>
|
||||
inline void PutN(FileWriteStream& stream, char c, size_t n) {
|
||||
stream.PutN(c, n);
|
||||
}
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_FILESTREAM_H_
|
|
@ -0,0 +1,151 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_FWD_H_
|
||||
#define RAPIDJSON_FWD_H_
|
||||
|
||||
#include "rapidjson.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
// encodings.h
|
||||
|
||||
template<typename CharType> struct UTF8;
|
||||
template<typename CharType> struct UTF16;
|
||||
template<typename CharType> struct UTF16BE;
|
||||
template<typename CharType> struct UTF16LE;
|
||||
template<typename CharType> struct UTF32;
|
||||
template<typename CharType> struct UTF32BE;
|
||||
template<typename CharType> struct UTF32LE;
|
||||
template<typename CharType> struct ASCII;
|
||||
template<typename CharType> struct AutoUTF;
|
||||
|
||||
template<typename SourceEncoding, typename TargetEncoding>
|
||||
struct Transcoder;
|
||||
|
||||
// allocators.h
|
||||
|
||||
class CrtAllocator;
|
||||
|
||||
template <typename BaseAllocator>
|
||||
class MemoryPoolAllocator;
|
||||
|
||||
// stream.h
|
||||
|
||||
template <typename Encoding>
|
||||
struct GenericStringStream;
|
||||
|
||||
typedef GenericStringStream<UTF8<char> > StringStream;
|
||||
|
||||
template <typename Encoding>
|
||||
struct GenericInsituStringStream;
|
||||
|
||||
typedef GenericInsituStringStream<UTF8<char> > InsituStringStream;
|
||||
|
||||
// stringbuffer.h
|
||||
|
||||
template <typename Encoding, typename Allocator>
|
||||
class GenericStringBuffer;
|
||||
|
||||
typedef GenericStringBuffer<UTF8<char>, CrtAllocator> StringBuffer;
|
||||
|
||||
// filereadstream.h
|
||||
|
||||
class FileReadStream;
|
||||
|
||||
// filewritestream.h
|
||||
|
||||
class FileWriteStream;
|
||||
|
||||
// memorybuffer.h
|
||||
|
||||
template <typename Allocator>
|
||||
struct GenericMemoryBuffer;
|
||||
|
||||
typedef GenericMemoryBuffer<CrtAllocator> MemoryBuffer;
|
||||
|
||||
// memorystream.h
|
||||
|
||||
struct MemoryStream;
|
||||
|
||||
// reader.h
|
||||
|
||||
template<typename Encoding, typename Derived>
|
||||
struct BaseReaderHandler;
|
||||
|
||||
template <typename SourceEncoding, typename TargetEncoding, typename StackAllocator>
|
||||
class GenericReader;
|
||||
|
||||
typedef GenericReader<UTF8<char>, UTF8<char>, CrtAllocator> Reader;
|
||||
|
||||
// writer.h
|
||||
|
||||
template<typename OutputStream, typename SourceEncoding, typename TargetEncoding, typename StackAllocator, unsigned writeFlags>
|
||||
class Writer;
|
||||
|
||||
// prettywriter.h
|
||||
|
||||
template<typename OutputStream, typename SourceEncoding, typename TargetEncoding, typename StackAllocator, unsigned writeFlags>
|
||||
class PrettyWriter;
|
||||
|
||||
// document.h
|
||||
|
||||
template <typename Encoding, typename Allocator>
|
||||
struct GenericMember;
|
||||
|
||||
template <bool Const, typename Encoding, typename Allocator>
|
||||
class GenericMemberIterator;
|
||||
|
||||
template<typename CharType>
|
||||
struct GenericStringRef;
|
||||
|
||||
template <typename Encoding, typename Allocator>
|
||||
class GenericValue;
|
||||
|
||||
typedef GenericValue<UTF8<char>, MemoryPoolAllocator<CrtAllocator> > Value;
|
||||
|
||||
template <typename Encoding, typename Allocator, typename StackAllocator>
|
||||
class GenericDocument;
|
||||
|
||||
typedef GenericDocument<UTF8<char>, MemoryPoolAllocator<CrtAllocator>, CrtAllocator> Document;
|
||||
|
||||
// pointer.h
|
||||
|
||||
template <typename ValueType, typename Allocator>
|
||||
class GenericPointer;
|
||||
|
||||
typedef GenericPointer<Value, CrtAllocator> Pointer;
|
||||
|
||||
// schema.h
|
||||
|
||||
template <typename SchemaDocumentType>
|
||||
class IGenericRemoteSchemaDocumentProvider;
|
||||
|
||||
template <typename ValueT, typename Allocator>
|
||||
class GenericSchemaDocument;
|
||||
|
||||
typedef GenericSchemaDocument<Value, CrtAllocator> SchemaDocument;
|
||||
typedef IGenericRemoteSchemaDocumentProvider<SchemaDocument> IRemoteSchemaDocumentProvider;
|
||||
|
||||
template <
|
||||
typename SchemaDocumentType,
|
||||
typename OutputHandler,
|
||||
typename StateAllocator>
|
||||
class GenericSchemaValidator;
|
||||
|
||||
typedef GenericSchemaValidator<SchemaDocument, BaseReaderHandler<UTF8<char>, void>, CrtAllocator> SchemaValidator;
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_RAPIDJSONFWD_H_
|
|
@ -0,0 +1,290 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_BIGINTEGER_H_
|
||||
#define RAPIDJSON_BIGINTEGER_H_
|
||||
|
||||
#include "../rapidjson.h"
|
||||
|
||||
#if defined(_MSC_VER) && defined(_M_AMD64)
|
||||
#include <intrin.h> // for _umul128
|
||||
#pragma intrinsic(_umul128)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
class BigInteger {
|
||||
public:
|
||||
typedef uint64_t Type;
|
||||
|
||||
BigInteger(const BigInteger& rhs) : count_(rhs.count_) {
|
||||
std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type));
|
||||
}
|
||||
|
||||
explicit BigInteger(uint64_t u) : count_(1) {
|
||||
digits_[0] = u;
|
||||
}
|
||||
|
||||
BigInteger(const char* decimals, size_t length) : count_(1) {
|
||||
RAPIDJSON_ASSERT(length > 0);
|
||||
digits_[0] = 0;
|
||||
size_t i = 0;
|
||||
const size_t kMaxDigitPerIteration = 19; // 2^64 = 18446744073709551616 > 10^19
|
||||
while (length >= kMaxDigitPerIteration) {
|
||||
AppendDecimal64(decimals + i, decimals + i + kMaxDigitPerIteration);
|
||||
length -= kMaxDigitPerIteration;
|
||||
i += kMaxDigitPerIteration;
|
||||
}
|
||||
|
||||
if (length > 0)
|
||||
AppendDecimal64(decimals + i, decimals + i + length);
|
||||
}
|
||||
|
||||
BigInteger& operator=(const BigInteger &rhs)
|
||||
{
|
||||
if (this != &rhs) {
|
||||
count_ = rhs.count_;
|
||||
std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type));
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
BigInteger& operator=(uint64_t u) {
|
||||
digits_[0] = u;
|
||||
count_ = 1;
|
||||
return *this;
|
||||
}
|
||||
|
||||
BigInteger& operator+=(uint64_t u) {
|
||||
Type backup = digits_[0];
|
||||
digits_[0] += u;
|
||||
for (size_t i = 0; i < count_ - 1; i++) {
|
||||
if (digits_[i] >= backup)
|
||||
return *this; // no carry
|
||||
backup = digits_[i + 1];
|
||||
digits_[i + 1] += 1;
|
||||
}
|
||||
|
||||
// Last carry
|
||||
if (digits_[count_ - 1] < backup)
|
||||
PushBack(1);
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
BigInteger& operator*=(uint64_t u) {
|
||||
if (u == 0) return *this = 0;
|
||||
if (u == 1) return *this;
|
||||
if (*this == 1) return *this = u;
|
||||
|
||||
uint64_t k = 0;
|
||||
for (size_t i = 0; i < count_; i++) {
|
||||
uint64_t hi;
|
||||
digits_[i] = MulAdd64(digits_[i], u, k, &hi);
|
||||
k = hi;
|
||||
}
|
||||
|
||||
if (k > 0)
|
||||
PushBack(k);
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
BigInteger& operator*=(uint32_t u) {
|
||||
if (u == 0) return *this = 0;
|
||||
if (u == 1) return *this;
|
||||
if (*this == 1) return *this = u;
|
||||
|
||||
uint64_t k = 0;
|
||||
for (size_t i = 0; i < count_; i++) {
|
||||
const uint64_t c = digits_[i] >> 32;
|
||||
const uint64_t d = digits_[i] & 0xFFFFFFFF;
|
||||
const uint64_t uc = u * c;
|
||||
const uint64_t ud = u * d;
|
||||
const uint64_t p0 = ud + k;
|
||||
const uint64_t p1 = uc + (p0 >> 32);
|
||||
digits_[i] = (p0 & 0xFFFFFFFF) | (p1 << 32);
|
||||
k = p1 >> 32;
|
||||
}
|
||||
|
||||
if (k > 0)
|
||||
PushBack(k);
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
BigInteger& operator<<=(size_t shift) {
|
||||
if (IsZero() || shift == 0) return *this;
|
||||
|
||||
size_t offset = shift / kTypeBit;
|
||||
size_t interShift = shift % kTypeBit;
|
||||
RAPIDJSON_ASSERT(count_ + offset <= kCapacity);
|
||||
|
||||
if (interShift == 0) {
|
||||
std::memmove(&digits_[count_ - 1 + offset], &digits_[count_ - 1], count_ * sizeof(Type));
|
||||
count_ += offset;
|
||||
}
|
||||
else {
|
||||
digits_[count_] = 0;
|
||||
for (size_t i = count_; i > 0; i--)
|
||||
digits_[i + offset] = (digits_[i] << interShift) | (digits_[i - 1] >> (kTypeBit - interShift));
|
||||
digits_[offset] = digits_[0] << interShift;
|
||||
count_ += offset;
|
||||
if (digits_[count_])
|
||||
count_++;
|
||||
}
|
||||
|
||||
std::memset(digits_, 0, offset * sizeof(Type));
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool operator==(const BigInteger& rhs) const {
|
||||
return count_ == rhs.count_ && std::memcmp(digits_, rhs.digits_, count_ * sizeof(Type)) == 0;
|
||||
}
|
||||
|
||||
bool operator==(const Type rhs) const {
|
||||
return count_ == 1 && digits_[0] == rhs;
|
||||
}
|
||||
|
||||
BigInteger& MultiplyPow5(unsigned exp) {
|
||||
static const uint32_t kPow5[12] = {
|
||||
5,
|
||||
5 * 5,
|
||||
5 * 5 * 5,
|
||||
5 * 5 * 5 * 5,
|
||||
5 * 5 * 5 * 5 * 5,
|
||||
5 * 5 * 5 * 5 * 5 * 5,
|
||||
5 * 5 * 5 * 5 * 5 * 5 * 5,
|
||||
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
|
||||
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
|
||||
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
|
||||
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
|
||||
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5
|
||||
};
|
||||
if (exp == 0) return *this;
|
||||
for (; exp >= 27; exp -= 27) *this *= RAPIDJSON_UINT64_C2(0X6765C793, 0XFA10079D); // 5^27
|
||||
for (; exp >= 13; exp -= 13) *this *= static_cast<uint32_t>(1220703125u); // 5^13
|
||||
if (exp > 0) *this *= kPow5[exp - 1];
|
||||
return *this;
|
||||
}
|
||||
|
||||
// Compute absolute difference of this and rhs.
|
||||
// Assume this != rhs
|
||||
bool Difference(const BigInteger& rhs, BigInteger* out) const {
|
||||
int cmp = Compare(rhs);
|
||||
RAPIDJSON_ASSERT(cmp != 0);
|
||||
const BigInteger *a, *b; // Makes a > b
|
||||
bool ret;
|
||||
if (cmp < 0) { a = &rhs; b = this; ret = true; }
|
||||
else { a = this; b = &rhs; ret = false; }
|
||||
|
||||
Type borrow = 0;
|
||||
for (size_t i = 0; i < a->count_; i++) {
|
||||
Type d = a->digits_[i] - borrow;
|
||||
if (i < b->count_)
|
||||
d -= b->digits_[i];
|
||||
borrow = (d > a->digits_[i]) ? 1 : 0;
|
||||
out->digits_[i] = d;
|
||||
if (d != 0)
|
||||
out->count_ = i + 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int Compare(const BigInteger& rhs) const {
|
||||
if (count_ != rhs.count_)
|
||||
return count_ < rhs.count_ ? -1 : 1;
|
||||
|
||||
for (size_t i = count_; i-- > 0;)
|
||||
if (digits_[i] != rhs.digits_[i])
|
||||
return digits_[i] < rhs.digits_[i] ? -1 : 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t GetCount() const { return count_; }
|
||||
Type GetDigit(size_t index) const { RAPIDJSON_ASSERT(index < count_); return digits_[index]; }
|
||||
bool IsZero() const { return count_ == 1 && digits_[0] == 0; }
|
||||
|
||||
private:
|
||||
void AppendDecimal64(const char* begin, const char* end) {
|
||||
uint64_t u = ParseUint64(begin, end);
|
||||
if (IsZero())
|
||||
*this = u;
|
||||
else {
|
||||
unsigned exp = static_cast<unsigned>(end - begin);
|
||||
(MultiplyPow5(exp) <<= exp) += u; // *this = *this * 10^exp + u
|
||||
}
|
||||
}
|
||||
|
||||
void PushBack(Type digit) {
|
||||
RAPIDJSON_ASSERT(count_ < kCapacity);
|
||||
digits_[count_++] = digit;
|
||||
}
|
||||
|
||||
static uint64_t ParseUint64(const char* begin, const char* end) {
|
||||
uint64_t r = 0;
|
||||
for (const char* p = begin; p != end; ++p) {
|
||||
RAPIDJSON_ASSERT(*p >= '0' && *p <= '9');
|
||||
r = r * 10u + static_cast<unsigned>(*p - '0');
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
// Assume a * b + k < 2^128
|
||||
static uint64_t MulAdd64(uint64_t a, uint64_t b, uint64_t k, uint64_t* outHigh) {
|
||||
#if defined(_MSC_VER) && defined(_M_AMD64)
|
||||
uint64_t low = _umul128(a, b, outHigh) + k;
|
||||
if (low < k)
|
||||
(*outHigh)++;
|
||||
return low;
|
||||
#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
|
||||
__extension__ typedef unsigned __int128 uint128;
|
||||
uint128 p = static_cast<uint128>(a) * static_cast<uint128>(b);
|
||||
p += k;
|
||||
*outHigh = static_cast<uint64_t>(p >> 64);
|
||||
return static_cast<uint64_t>(p);
|
||||
#else
|
||||
const uint64_t a0 = a & 0xFFFFFFFF, a1 = a >> 32, b0 = b & 0xFFFFFFFF, b1 = b >> 32;
|
||||
uint64_t x0 = a0 * b0, x1 = a0 * b1, x2 = a1 * b0, x3 = a1 * b1;
|
||||
x1 += (x0 >> 32); // can't give carry
|
||||
x1 += x2;
|
||||
if (x1 < x2)
|
||||
x3 += (static_cast<uint64_t>(1) << 32);
|
||||
uint64_t lo = (x1 << 32) + (x0 & 0xFFFFFFFF);
|
||||
uint64_t hi = x3 + (x1 >> 32);
|
||||
|
||||
lo += k;
|
||||
if (lo < k)
|
||||
hi++;
|
||||
*outHigh = hi;
|
||||
return lo;
|
||||
#endif
|
||||
}
|
||||
|
||||
static const size_t kBitCount = 3328; // 64bit * 54 > 10^1000
|
||||
static const size_t kCapacity = kBitCount / sizeof(Type);
|
||||
static const size_t kTypeBit = sizeof(Type) * 8;
|
||||
|
||||
Type digits_[kCapacity];
|
||||
size_t count_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_BIGINTEGER_H_
|
|
@ -0,0 +1,258 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
// This is a C++ header-only implementation of Grisu2 algorithm from the publication:
|
||||
// Loitsch, Florian. "Printing floating-point numbers quickly and accurately with
|
||||
// integers." ACM Sigplan Notices 45.6 (2010): 233-243.
|
||||
|
||||
#ifndef RAPIDJSON_DIYFP_H_
|
||||
#define RAPIDJSON_DIYFP_H_
|
||||
|
||||
#include "../rapidjson.h"
|
||||
|
||||
#if defined(_MSC_VER) && defined(_M_AMD64)
|
||||
#include <intrin.h>
|
||||
#pragma intrinsic(_BitScanReverse64)
|
||||
#pragma intrinsic(_umul128)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(effc++)
|
||||
#endif
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(padded)
|
||||
#endif
|
||||
|
||||
struct DiyFp {
|
||||
DiyFp() : f(), e() {}
|
||||
|
||||
DiyFp(uint64_t fp, int exp) : f(fp), e(exp) {}
|
||||
|
||||
explicit DiyFp(double d) {
|
||||
union {
|
||||
double d;
|
||||
uint64_t u64;
|
||||
} u = { d };
|
||||
|
||||
int biased_e = static_cast<int>((u.u64 & kDpExponentMask) >> kDpSignificandSize);
|
||||
uint64_t significand = (u.u64 & kDpSignificandMask);
|
||||
if (biased_e != 0) {
|
||||
f = significand + kDpHiddenBit;
|
||||
e = biased_e - kDpExponentBias;
|
||||
}
|
||||
else {
|
||||
f = significand;
|
||||
e = kDpMinExponent + 1;
|
||||
}
|
||||
}
|
||||
|
||||
DiyFp operator-(const DiyFp& rhs) const {
|
||||
return DiyFp(f - rhs.f, e);
|
||||
}
|
||||
|
||||
DiyFp operator*(const DiyFp& rhs) const {
|
||||
#if defined(_MSC_VER) && defined(_M_AMD64)
|
||||
uint64_t h;
|
||||
uint64_t l = _umul128(f, rhs.f, &h);
|
||||
if (l & (uint64_t(1) << 63)) // rounding
|
||||
h++;
|
||||
return DiyFp(h, e + rhs.e + 64);
|
||||
#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
|
||||
__extension__ typedef unsigned __int128 uint128;
|
||||
uint128 p = static_cast<uint128>(f) * static_cast<uint128>(rhs.f);
|
||||
uint64_t h = static_cast<uint64_t>(p >> 64);
|
||||
uint64_t l = static_cast<uint64_t>(p);
|
||||
if (l & (uint64_t(1) << 63)) // rounding
|
||||
h++;
|
||||
return DiyFp(h, e + rhs.e + 64);
|
||||
#else
|
||||
const uint64_t M32 = 0xFFFFFFFF;
|
||||
const uint64_t a = f >> 32;
|
||||
const uint64_t b = f & M32;
|
||||
const uint64_t c = rhs.f >> 32;
|
||||
const uint64_t d = rhs.f & M32;
|
||||
const uint64_t ac = a * c;
|
||||
const uint64_t bc = b * c;
|
||||
const uint64_t ad = a * d;
|
||||
const uint64_t bd = b * d;
|
||||
uint64_t tmp = (bd >> 32) + (ad & M32) + (bc & M32);
|
||||
tmp += 1U << 31; /// mult_round
|
||||
return DiyFp(ac + (ad >> 32) + (bc >> 32) + (tmp >> 32), e + rhs.e + 64);
|
||||
#endif
|
||||
}
|
||||
|
||||
DiyFp Normalize() const {
|
||||
#if defined(_MSC_VER) && defined(_M_AMD64)
|
||||
unsigned long index;
|
||||
_BitScanReverse64(&index, f);
|
||||
return DiyFp(f << (63 - index), e - (63 - index));
|
||||
#elif defined(__GNUC__) && __GNUC__ >= 4
|
||||
int s = __builtin_clzll(f);
|
||||
return DiyFp(f << s, e - s);
|
||||
#else
|
||||
DiyFp res = *this;
|
||||
while (!(res.f & (static_cast<uint64_t>(1) << 63))) {
|
||||
res.f <<= 1;
|
||||
res.e--;
|
||||
}
|
||||
return res;
|
||||
#endif
|
||||
}
|
||||
|
||||
DiyFp NormalizeBoundary() const {
|
||||
DiyFp res = *this;
|
||||
while (!(res.f & (kDpHiddenBit << 1))) {
|
||||
res.f <<= 1;
|
||||
res.e--;
|
||||
}
|
||||
res.f <<= (kDiySignificandSize - kDpSignificandSize - 2);
|
||||
res.e = res.e - (kDiySignificandSize - kDpSignificandSize - 2);
|
||||
return res;
|
||||
}
|
||||
|
||||
void NormalizedBoundaries(DiyFp* minus, DiyFp* plus) const {
|
||||
DiyFp pl = DiyFp((f << 1) + 1, e - 1).NormalizeBoundary();
|
||||
DiyFp mi = (f == kDpHiddenBit) ? DiyFp((f << 2) - 1, e - 2) : DiyFp((f << 1) - 1, e - 1);
|
||||
mi.f <<= mi.e - pl.e;
|
||||
mi.e = pl.e;
|
||||
*plus = pl;
|
||||
*minus = mi;
|
||||
}
|
||||
|
||||
double ToDouble() const {
|
||||
union {
|
||||
double d;
|
||||
uint64_t u64;
|
||||
}u;
|
||||
const uint64_t be = (e == kDpDenormalExponent && (f & kDpHiddenBit) == 0) ? 0 :
|
||||
static_cast<uint64_t>(e + kDpExponentBias);
|
||||
u.u64 = (f & kDpSignificandMask) | (be << kDpSignificandSize);
|
||||
return u.d;
|
||||
}
|
||||
|
||||
static const int kDiySignificandSize = 64;
|
||||
static const int kDpSignificandSize = 52;
|
||||
static const int kDpExponentBias = 0x3FF + kDpSignificandSize;
|
||||
static const int kDpMaxExponent = 0x7FF - kDpExponentBias;
|
||||
static const int kDpMinExponent = -kDpExponentBias;
|
||||
static const int kDpDenormalExponent = -kDpExponentBias + 1;
|
||||
static const uint64_t kDpExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000);
|
||||
static const uint64_t kDpSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF);
|
||||
static const uint64_t kDpHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000);
|
||||
|
||||
uint64_t f;
|
||||
int e;
|
||||
};
|
||||
|
||||
inline DiyFp GetCachedPowerByIndex(size_t index) {
|
||||
// 10^-348, 10^-340, ..., 10^340
|
||||
static const uint64_t kCachedPowers_F[] = {
|
||||
RAPIDJSON_UINT64_C2(0xfa8fd5a0, 0x081c0288), RAPIDJSON_UINT64_C2(0xbaaee17f, 0xa23ebf76),
|
||||
RAPIDJSON_UINT64_C2(0x8b16fb20, 0x3055ac76), RAPIDJSON_UINT64_C2(0xcf42894a, 0x5dce35ea),
|
||||
RAPIDJSON_UINT64_C2(0x9a6bb0aa, 0x55653b2d), RAPIDJSON_UINT64_C2(0xe61acf03, 0x3d1a45df),
|
||||
RAPIDJSON_UINT64_C2(0xab70fe17, 0xc79ac6ca), RAPIDJSON_UINT64_C2(0xff77b1fc, 0xbebcdc4f),
|
||||
RAPIDJSON_UINT64_C2(0xbe5691ef, 0x416bd60c), RAPIDJSON_UINT64_C2(0x8dd01fad, 0x907ffc3c),
|
||||
RAPIDJSON_UINT64_C2(0xd3515c28, 0x31559a83), RAPIDJSON_UINT64_C2(0x9d71ac8f, 0xada6c9b5),
|
||||
RAPIDJSON_UINT64_C2(0xea9c2277, 0x23ee8bcb), RAPIDJSON_UINT64_C2(0xaecc4991, 0x4078536d),
|
||||
RAPIDJSON_UINT64_C2(0x823c1279, 0x5db6ce57), RAPIDJSON_UINT64_C2(0xc2109436, 0x4dfb5637),
|
||||
RAPIDJSON_UINT64_C2(0x9096ea6f, 0x3848984f), RAPIDJSON_UINT64_C2(0xd77485cb, 0x25823ac7),
|
||||
RAPIDJSON_UINT64_C2(0xa086cfcd, 0x97bf97f4), RAPIDJSON_UINT64_C2(0xef340a98, 0x172aace5),
|
||||
RAPIDJSON_UINT64_C2(0xb23867fb, 0x2a35b28e), RAPIDJSON_UINT64_C2(0x84c8d4df, 0xd2c63f3b),
|
||||
RAPIDJSON_UINT64_C2(0xc5dd4427, 0x1ad3cdba), RAPIDJSON_UINT64_C2(0x936b9fce, 0xbb25c996),
|
||||
RAPIDJSON_UINT64_C2(0xdbac6c24, 0x7d62a584), RAPIDJSON_UINT64_C2(0xa3ab6658, 0x0d5fdaf6),
|
||||
RAPIDJSON_UINT64_C2(0xf3e2f893, 0xdec3f126), RAPIDJSON_UINT64_C2(0xb5b5ada8, 0xaaff80b8),
|
||||
RAPIDJSON_UINT64_C2(0x87625f05, 0x6c7c4a8b), RAPIDJSON_UINT64_C2(0xc9bcff60, 0x34c13053),
|
||||
RAPIDJSON_UINT64_C2(0x964e858c, 0x91ba2655), RAPIDJSON_UINT64_C2(0xdff97724, 0x70297ebd),
|
||||
RAPIDJSON_UINT64_C2(0xa6dfbd9f, 0xb8e5b88f), RAPIDJSON_UINT64_C2(0xf8a95fcf, 0x88747d94),
|
||||
RAPIDJSON_UINT64_C2(0xb9447093, 0x8fa89bcf), RAPIDJSON_UINT64_C2(0x8a08f0f8, 0xbf0f156b),
|
||||
RAPIDJSON_UINT64_C2(0xcdb02555, 0x653131b6), RAPIDJSON_UINT64_C2(0x993fe2c6, 0xd07b7fac),
|
||||
RAPIDJSON_UINT64_C2(0xe45c10c4, 0x2a2b3b06), RAPIDJSON_UINT64_C2(0xaa242499, 0x697392d3),
|
||||
RAPIDJSON_UINT64_C2(0xfd87b5f2, 0x8300ca0e), RAPIDJSON_UINT64_C2(0xbce50864, 0x92111aeb),
|
||||
RAPIDJSON_UINT64_C2(0x8cbccc09, 0x6f5088cc), RAPIDJSON_UINT64_C2(0xd1b71758, 0xe219652c),
|
||||
RAPIDJSON_UINT64_C2(0x9c400000, 0x00000000), RAPIDJSON_UINT64_C2(0xe8d4a510, 0x00000000),
|
||||
RAPIDJSON_UINT64_C2(0xad78ebc5, 0xac620000), RAPIDJSON_UINT64_C2(0x813f3978, 0xf8940984),
|
||||
RAPIDJSON_UINT64_C2(0xc097ce7b, 0xc90715b3), RAPIDJSON_UINT64_C2(0x8f7e32ce, 0x7bea5c70),
|
||||
RAPIDJSON_UINT64_C2(0xd5d238a4, 0xabe98068), RAPIDJSON_UINT64_C2(0x9f4f2726, 0x179a2245),
|
||||
RAPIDJSON_UINT64_C2(0xed63a231, 0xd4c4fb27), RAPIDJSON_UINT64_C2(0xb0de6538, 0x8cc8ada8),
|
||||
RAPIDJSON_UINT64_C2(0x83c7088e, 0x1aab65db), RAPIDJSON_UINT64_C2(0xc45d1df9, 0x42711d9a),
|
||||
RAPIDJSON_UINT64_C2(0x924d692c, 0xa61be758), RAPIDJSON_UINT64_C2(0xda01ee64, 0x1a708dea),
|
||||
RAPIDJSON_UINT64_C2(0xa26da399, 0x9aef774a), RAPIDJSON_UINT64_C2(0xf209787b, 0xb47d6b85),
|
||||
RAPIDJSON_UINT64_C2(0xb454e4a1, 0x79dd1877), RAPIDJSON_UINT64_C2(0x865b8692, 0x5b9bc5c2),
|
||||
RAPIDJSON_UINT64_C2(0xc83553c5, 0xc8965d3d), RAPIDJSON_UINT64_C2(0x952ab45c, 0xfa97a0b3),
|
||||
RAPIDJSON_UINT64_C2(0xde469fbd, 0x99a05fe3), RAPIDJSON_UINT64_C2(0xa59bc234, 0xdb398c25),
|
||||
RAPIDJSON_UINT64_C2(0xf6c69a72, 0xa3989f5c), RAPIDJSON_UINT64_C2(0xb7dcbf53, 0x54e9bece),
|
||||
RAPIDJSON_UINT64_C2(0x88fcf317, 0xf22241e2), RAPIDJSON_UINT64_C2(0xcc20ce9b, 0xd35c78a5),
|
||||
RAPIDJSON_UINT64_C2(0x98165af3, 0x7b2153df), RAPIDJSON_UINT64_C2(0xe2a0b5dc, 0x971f303a),
|
||||
RAPIDJSON_UINT64_C2(0xa8d9d153, 0x5ce3b396), RAPIDJSON_UINT64_C2(0xfb9b7cd9, 0xa4a7443c),
|
||||
RAPIDJSON_UINT64_C2(0xbb764c4c, 0xa7a44410), RAPIDJSON_UINT64_C2(0x8bab8eef, 0xb6409c1a),
|
||||
RAPIDJSON_UINT64_C2(0xd01fef10, 0xa657842c), RAPIDJSON_UINT64_C2(0x9b10a4e5, 0xe9913129),
|
||||
RAPIDJSON_UINT64_C2(0xe7109bfb, 0xa19c0c9d), RAPIDJSON_UINT64_C2(0xac2820d9, 0x623bf429),
|
||||
RAPIDJSON_UINT64_C2(0x80444b5e, 0x7aa7cf85), RAPIDJSON_UINT64_C2(0xbf21e440, 0x03acdd2d),
|
||||
RAPIDJSON_UINT64_C2(0x8e679c2f, 0x5e44ff8f), RAPIDJSON_UINT64_C2(0xd433179d, 0x9c8cb841),
|
||||
RAPIDJSON_UINT64_C2(0x9e19db92, 0xb4e31ba9), RAPIDJSON_UINT64_C2(0xeb96bf6e, 0xbadf77d9),
|
||||
RAPIDJSON_UINT64_C2(0xaf87023b, 0x9bf0ee6b)
|
||||
};
|
||||
static const int16_t kCachedPowers_E[] = {
|
||||
-1220, -1193, -1166, -1140, -1113, -1087, -1060, -1034, -1007, -980,
|
||||
-954, -927, -901, -874, -847, -821, -794, -768, -741, -715,
|
||||
-688, -661, -635, -608, -582, -555, -529, -502, -475, -449,
|
||||
-422, -396, -369, -343, -316, -289, -263, -236, -210, -183,
|
||||
-157, -130, -103, -77, -50, -24, 3, 30, 56, 83,
|
||||
109, 136, 162, 189, 216, 242, 269, 295, 322, 348,
|
||||
375, 402, 428, 455, 481, 508, 534, 561, 588, 614,
|
||||
641, 667, 694, 720, 747, 774, 800, 827, 853, 880,
|
||||
907, 933, 960, 986, 1013, 1039, 1066
|
||||
};
|
||||
return DiyFp(kCachedPowers_F[index], kCachedPowers_E[index]);
|
||||
}
|
||||
|
||||
inline DiyFp GetCachedPower(int e, int* K) {
|
||||
|
||||
//int k = static_cast<int>(ceil((-61 - e) * 0.30102999566398114)) + 374;
|
||||
double dk = (-61 - e) * 0.30102999566398114 + 347; // dk must be positive, so can do ceiling in positive
|
||||
int k = static_cast<int>(dk);
|
||||
if (dk - k > 0.0)
|
||||
k++;
|
||||
|
||||
unsigned index = static_cast<unsigned>((k >> 3) + 1);
|
||||
*K = -(-348 + static_cast<int>(index << 3)); // decimal exponent no need lookup table
|
||||
|
||||
return GetCachedPowerByIndex(index);
|
||||
}
|
||||
|
||||
inline DiyFp GetCachedPower10(int exp, int *outExp) {
|
||||
unsigned index = (static_cast<unsigned>(exp) + 348u) / 8u;
|
||||
*outExp = -348 + static_cast<int>(index) * 8;
|
||||
return GetCachedPowerByIndex(index);
|
||||
}
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
RAPIDJSON_DIAG_OFF(padded)
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_DIYFP_H_
|
|
@ -0,0 +1,245 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
// This is a C++ header-only implementation of Grisu2 algorithm from the publication:
|
||||
// Loitsch, Florian. "Printing floating-point numbers quickly and accurately with
|
||||
// integers." ACM Sigplan Notices 45.6 (2010): 233-243.
|
||||
|
||||
#ifndef RAPIDJSON_DTOA_
|
||||
#define RAPIDJSON_DTOA_
|
||||
|
||||
#include "itoa.h" // GetDigitsLut()
|
||||
#include "diyfp.h"
|
||||
#include "ieee754.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(effc++)
|
||||
RAPIDJSON_DIAG_OFF(array-bounds) // some gcc versions generate wrong warnings https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59124
|
||||
#endif
|
||||
|
||||
inline void GrisuRound(char* buffer, int len, uint64_t delta, uint64_t rest, uint64_t ten_kappa, uint64_t wp_w) {
|
||||
while (rest < wp_w && delta - rest >= ten_kappa &&
|
||||
(rest + ten_kappa < wp_w || /// closer
|
||||
wp_w - rest > rest + ten_kappa - wp_w)) {
|
||||
buffer[len - 1]--;
|
||||
rest += ten_kappa;
|
||||
}
|
||||
}
|
||||
|
||||
inline unsigned CountDecimalDigit32(uint32_t n) {
|
||||
// Simple pure C++ implementation was faster than __builtin_clz version in this situation.
|
||||
if (n < 10) return 1;
|
||||
if (n < 100) return 2;
|
||||
if (n < 1000) return 3;
|
||||
if (n < 10000) return 4;
|
||||
if (n < 100000) return 5;
|
||||
if (n < 1000000) return 6;
|
||||
if (n < 10000000) return 7;
|
||||
if (n < 100000000) return 8;
|
||||
// Will not reach 10 digits in DigitGen()
|
||||
//if (n < 1000000000) return 9;
|
||||
//return 10;
|
||||
return 9;
|
||||
}
|
||||
|
||||
inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buffer, int* len, int* K) {
|
||||
static const uint32_t kPow10[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 };
|
||||
const DiyFp one(uint64_t(1) << -Mp.e, Mp.e);
|
||||
const DiyFp wp_w = Mp - W;
|
||||
uint32_t p1 = static_cast<uint32_t>(Mp.f >> -one.e);
|
||||
uint64_t p2 = Mp.f & (one.f - 1);
|
||||
unsigned kappa = CountDecimalDigit32(p1); // kappa in [0, 9]
|
||||
*len = 0;
|
||||
|
||||
while (kappa > 0) {
|
||||
uint32_t d = 0;
|
||||
switch (kappa) {
|
||||
case 9: d = p1 / 100000000; p1 %= 100000000; break;
|
||||
case 8: d = p1 / 10000000; p1 %= 10000000; break;
|
||||
case 7: d = p1 / 1000000; p1 %= 1000000; break;
|
||||
case 6: d = p1 / 100000; p1 %= 100000; break;
|
||||
case 5: d = p1 / 10000; p1 %= 10000; break;
|
||||
case 4: d = p1 / 1000; p1 %= 1000; break;
|
||||
case 3: d = p1 / 100; p1 %= 100; break;
|
||||
case 2: d = p1 / 10; p1 %= 10; break;
|
||||
case 1: d = p1; p1 = 0; break;
|
||||
default:;
|
||||
}
|
||||
if (d || *len)
|
||||
buffer[(*len)++] = static_cast<char>('0' + static_cast<char>(d));
|
||||
kappa--;
|
||||
uint64_t tmp = (static_cast<uint64_t>(p1) << -one.e) + p2;
|
||||
if (tmp <= delta) {
|
||||
*K += kappa;
|
||||
GrisuRound(buffer, *len, delta, tmp, static_cast<uint64_t>(kPow10[kappa]) << -one.e, wp_w.f);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// kappa = 0
|
||||
for (;;) {
|
||||
p2 *= 10;
|
||||
delta *= 10;
|
||||
char d = static_cast<char>(p2 >> -one.e);
|
||||
if (d || *len)
|
||||
buffer[(*len)++] = static_cast<char>('0' + d);
|
||||
p2 &= one.f - 1;
|
||||
kappa--;
|
||||
if (p2 < delta) {
|
||||
*K += kappa;
|
||||
int index = -static_cast<int>(kappa);
|
||||
GrisuRound(buffer, *len, delta, p2, one.f, wp_w.f * (index < 9 ? kPow10[-static_cast<int>(kappa)] : 0));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void Grisu2(double value, char* buffer, int* length, int* K) {
|
||||
const DiyFp v(value);
|
||||
DiyFp w_m, w_p;
|
||||
v.NormalizedBoundaries(&w_m, &w_p);
|
||||
|
||||
const DiyFp c_mk = GetCachedPower(w_p.e, K);
|
||||
const DiyFp W = v.Normalize() * c_mk;
|
||||
DiyFp Wp = w_p * c_mk;
|
||||
DiyFp Wm = w_m * c_mk;
|
||||
Wm.f++;
|
||||
Wp.f--;
|
||||
DigitGen(W, Wp, Wp.f - Wm.f, buffer, length, K);
|
||||
}
|
||||
|
||||
inline char* WriteExponent(int K, char* buffer) {
|
||||
if (K < 0) {
|
||||
*buffer++ = '-';
|
||||
K = -K;
|
||||
}
|
||||
|
||||
if (K >= 100) {
|
||||
*buffer++ = static_cast<char>('0' + static_cast<char>(K / 100));
|
||||
K %= 100;
|
||||
const char* d = GetDigitsLut() + K * 2;
|
||||
*buffer++ = d[0];
|
||||
*buffer++ = d[1];
|
||||
}
|
||||
else if (K >= 10) {
|
||||
const char* d = GetDigitsLut() + K * 2;
|
||||
*buffer++ = d[0];
|
||||
*buffer++ = d[1];
|
||||
}
|
||||
else
|
||||
*buffer++ = static_cast<char>('0' + static_cast<char>(K));
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
inline char* Prettify(char* buffer, int length, int k, int maxDecimalPlaces) {
|
||||
const int kk = length + k; // 10^(kk-1) <= v < 10^kk
|
||||
|
||||
if (0 <= k && kk <= 21) {
|
||||
// 1234e7 -> 12340000000
|
||||
for (int i = length; i < kk; i++)
|
||||
buffer[i] = '0';
|
||||
buffer[kk] = '.';
|
||||
buffer[kk + 1] = '0';
|
||||
return &buffer[kk + 2];
|
||||
}
|
||||
else if (0 < kk && kk <= 21) {
|
||||
// 1234e-2 -> 12.34
|
||||
std::memmove(&buffer[kk + 1], &buffer[kk], static_cast<size_t>(length - kk));
|
||||
buffer[kk] = '.';
|
||||
if (0 > k + maxDecimalPlaces) {
|
||||
// When maxDecimalPlaces = 2, 1.2345 -> 1.23, 1.102 -> 1.1
|
||||
// Remove extra trailing zeros (at least one) after truncation.
|
||||
for (int i = kk + maxDecimalPlaces; i > kk + 1; i--)
|
||||
if (buffer[i] != '0')
|
||||
return &buffer[i + 1];
|
||||
return &buffer[kk + 2]; // Reserve one zero
|
||||
}
|
||||
else
|
||||
return &buffer[length + 1];
|
||||
}
|
||||
else if (-6 < kk && kk <= 0) {
|
||||
// 1234e-6 -> 0.001234
|
||||
const int offset = 2 - kk;
|
||||
std::memmove(&buffer[offset], &buffer[0], static_cast<size_t>(length));
|
||||
buffer[0] = '0';
|
||||
buffer[1] = '.';
|
||||
for (int i = 2; i < offset; i++)
|
||||
buffer[i] = '0';
|
||||
if (length - kk > maxDecimalPlaces) {
|
||||
// When maxDecimalPlaces = 2, 0.123 -> 0.12, 0.102 -> 0.1
|
||||
// Remove extra trailing zeros (at least one) after truncation.
|
||||
for (int i = maxDecimalPlaces + 1; i > 2; i--)
|
||||
if (buffer[i] != '0')
|
||||
return &buffer[i + 1];
|
||||
return &buffer[3]; // Reserve one zero
|
||||
}
|
||||
else
|
||||
return &buffer[length + offset];
|
||||
}
|
||||
else if (kk < -maxDecimalPlaces) {
|
||||
// Truncate to zero
|
||||
buffer[0] = '0';
|
||||
buffer[1] = '.';
|
||||
buffer[2] = '0';
|
||||
return &buffer[3];
|
||||
}
|
||||
else if (length == 1) {
|
||||
// 1e30
|
||||
buffer[1] = 'e';
|
||||
return WriteExponent(kk - 1, &buffer[2]);
|
||||
}
|
||||
else {
|
||||
// 1234e30 -> 1.234e33
|
||||
std::memmove(&buffer[2], &buffer[1], static_cast<size_t>(length - 1));
|
||||
buffer[1] = '.';
|
||||
buffer[length + 1] = 'e';
|
||||
return WriteExponent(kk - 1, &buffer[0 + length + 2]);
|
||||
}
|
||||
}
|
||||
|
||||
inline char* dtoa(double value, char* buffer, int maxDecimalPlaces = 324) {
|
||||
RAPIDJSON_ASSERT(maxDecimalPlaces >= 1);
|
||||
Double d(value);
|
||||
if (d.IsZero()) {
|
||||
if (d.Sign())
|
||||
*buffer++ = '-'; // -0.0, Issue #289
|
||||
buffer[0] = '0';
|
||||
buffer[1] = '.';
|
||||
buffer[2] = '0';
|
||||
return &buffer[3];
|
||||
}
|
||||
else {
|
||||
if (value < 0) {
|
||||
*buffer++ = '-';
|
||||
value = -value;
|
||||
}
|
||||
int length, K;
|
||||
Grisu2(value, buffer, &length, &K);
|
||||
return Prettify(buffer, length, K, maxDecimalPlaces);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_DTOA_
|
|
@ -0,0 +1,78 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_IEEE754_
|
||||
#define RAPIDJSON_IEEE754_
|
||||
|
||||
#include "../rapidjson.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
class Double {
|
||||
public:
|
||||
Double() {}
|
||||
Double(double d) : d_(d) {}
|
||||
Double(uint64_t u) : u_(u) {}
|
||||
|
||||
double Value() const { return d_; }
|
||||
uint64_t Uint64Value() const { return u_; }
|
||||
|
||||
double NextPositiveDouble() const {
|
||||
RAPIDJSON_ASSERT(!Sign());
|
||||
return Double(u_ + 1).Value();
|
||||
}
|
||||
|
||||
bool Sign() const { return (u_ & kSignMask) != 0; }
|
||||
uint64_t Significand() const { return u_ & kSignificandMask; }
|
||||
int Exponent() const { return static_cast<int>(((u_ & kExponentMask) >> kSignificandSize) - kExponentBias); }
|
||||
|
||||
bool IsNan() const { return (u_ & kExponentMask) == kExponentMask && Significand() != 0; }
|
||||
bool IsInf() const { return (u_ & kExponentMask) == kExponentMask && Significand() == 0; }
|
||||
bool IsNanOrInf() const { return (u_ & kExponentMask) == kExponentMask; }
|
||||
bool IsNormal() const { return (u_ & kExponentMask) != 0 || Significand() == 0; }
|
||||
bool IsZero() const { return (u_ & (kExponentMask | kSignificandMask)) == 0; }
|
||||
|
||||
uint64_t IntegerSignificand() const { return IsNormal() ? Significand() | kHiddenBit : Significand(); }
|
||||
int IntegerExponent() const { return (IsNormal() ? Exponent() : kDenormalExponent) - kSignificandSize; }
|
||||
uint64_t ToBias() const { return (u_ & kSignMask) ? ~u_ + 1 : u_ | kSignMask; }
|
||||
|
||||
static unsigned EffectiveSignificandSize(int order) {
|
||||
if (order >= -1021)
|
||||
return 53;
|
||||
else if (order <= -1074)
|
||||
return 0;
|
||||
else
|
||||
return static_cast<unsigned>(order) + 1074;
|
||||
}
|
||||
|
||||
private:
|
||||
static const int kSignificandSize = 52;
|
||||
static const int kExponentBias = 0x3FF;
|
||||
static const int kDenormalExponent = 1 - kExponentBias;
|
||||
static const uint64_t kSignMask = RAPIDJSON_UINT64_C2(0x80000000, 0x00000000);
|
||||
static const uint64_t kExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000);
|
||||
static const uint64_t kSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF);
|
||||
static const uint64_t kHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000);
|
||||
|
||||
union {
|
||||
double d_;
|
||||
uint64_t u_;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_IEEE754_
|
|
@ -0,0 +1,304 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_ITOA_
|
||||
#define RAPIDJSON_ITOA_
|
||||
|
||||
#include "../rapidjson.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
inline const char* GetDigitsLut() {
|
||||
static const char cDigitsLut[200] = {
|
||||
'0','0','0','1','0','2','0','3','0','4','0','5','0','6','0','7','0','8','0','9',
|
||||
'1','0','1','1','1','2','1','3','1','4','1','5','1','6','1','7','1','8','1','9',
|
||||
'2','0','2','1','2','2','2','3','2','4','2','5','2','6','2','7','2','8','2','9',
|
||||
'3','0','3','1','3','2','3','3','3','4','3','5','3','6','3','7','3','8','3','9',
|
||||
'4','0','4','1','4','2','4','3','4','4','4','5','4','6','4','7','4','8','4','9',
|
||||
'5','0','5','1','5','2','5','3','5','4','5','5','5','6','5','7','5','8','5','9',
|
||||
'6','0','6','1','6','2','6','3','6','4','6','5','6','6','6','7','6','8','6','9',
|
||||
'7','0','7','1','7','2','7','3','7','4','7','5','7','6','7','7','7','8','7','9',
|
||||
'8','0','8','1','8','2','8','3','8','4','8','5','8','6','8','7','8','8','8','9',
|
||||
'9','0','9','1','9','2','9','3','9','4','9','5','9','6','9','7','9','8','9','9'
|
||||
};
|
||||
return cDigitsLut;
|
||||
}
|
||||
|
||||
inline char* u32toa(uint32_t value, char* buffer) {
|
||||
const char* cDigitsLut = GetDigitsLut();
|
||||
|
||||
if (value < 10000) {
|
||||
const uint32_t d1 = (value / 100) << 1;
|
||||
const uint32_t d2 = (value % 100) << 1;
|
||||
|
||||
if (value >= 1000)
|
||||
*buffer++ = cDigitsLut[d1];
|
||||
if (value >= 100)
|
||||
*buffer++ = cDigitsLut[d1 + 1];
|
||||
if (value >= 10)
|
||||
*buffer++ = cDigitsLut[d2];
|
||||
*buffer++ = cDigitsLut[d2 + 1];
|
||||
}
|
||||
else if (value < 100000000) {
|
||||
// value = bbbbcccc
|
||||
const uint32_t b = value / 10000;
|
||||
const uint32_t c = value % 10000;
|
||||
|
||||
const uint32_t d1 = (b / 100) << 1;
|
||||
const uint32_t d2 = (b % 100) << 1;
|
||||
|
||||
const uint32_t d3 = (c / 100) << 1;
|
||||
const uint32_t d4 = (c % 100) << 1;
|
||||
|
||||
if (value >= 10000000)
|
||||
*buffer++ = cDigitsLut[d1];
|
||||
if (value >= 1000000)
|
||||
*buffer++ = cDigitsLut[d1 + 1];
|
||||
if (value >= 100000)
|
||||
*buffer++ = cDigitsLut[d2];
|
||||
*buffer++ = cDigitsLut[d2 + 1];
|
||||
|
||||
*buffer++ = cDigitsLut[d3];
|
||||
*buffer++ = cDigitsLut[d3 + 1];
|
||||
*buffer++ = cDigitsLut[d4];
|
||||
*buffer++ = cDigitsLut[d4 + 1];
|
||||
}
|
||||
else {
|
||||
// value = aabbbbcccc in decimal
|
||||
|
||||
const uint32_t a = value / 100000000; // 1 to 42
|
||||
value %= 100000000;
|
||||
|
||||
if (a >= 10) {
|
||||
const unsigned i = a << 1;
|
||||
*buffer++ = cDigitsLut[i];
|
||||
*buffer++ = cDigitsLut[i + 1];
|
||||
}
|
||||
else
|
||||
*buffer++ = static_cast<char>('0' + static_cast<char>(a));
|
||||
|
||||
const uint32_t b = value / 10000; // 0 to 9999
|
||||
const uint32_t c = value % 10000; // 0 to 9999
|
||||
|
||||
const uint32_t d1 = (b / 100) << 1;
|
||||
const uint32_t d2 = (b % 100) << 1;
|
||||
|
||||
const uint32_t d3 = (c / 100) << 1;
|
||||
const uint32_t d4 = (c % 100) << 1;
|
||||
|
||||
*buffer++ = cDigitsLut[d1];
|
||||
*buffer++ = cDigitsLut[d1 + 1];
|
||||
*buffer++ = cDigitsLut[d2];
|
||||
*buffer++ = cDigitsLut[d2 + 1];
|
||||
*buffer++ = cDigitsLut[d3];
|
||||
*buffer++ = cDigitsLut[d3 + 1];
|
||||
*buffer++ = cDigitsLut[d4];
|
||||
*buffer++ = cDigitsLut[d4 + 1];
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
inline char* i32toa(int32_t value, char* buffer) {
|
||||
uint32_t u = static_cast<uint32_t>(value);
|
||||
if (value < 0) {
|
||||
*buffer++ = '-';
|
||||
u = ~u + 1;
|
||||
}
|
||||
|
||||
return u32toa(u, buffer);
|
||||
}
|
||||
|
||||
inline char* u64toa(uint64_t value, char* buffer) {
|
||||
const char* cDigitsLut = GetDigitsLut();
|
||||
const uint64_t kTen8 = 100000000;
|
||||
const uint64_t kTen9 = kTen8 * 10;
|
||||
const uint64_t kTen10 = kTen8 * 100;
|
||||
const uint64_t kTen11 = kTen8 * 1000;
|
||||
const uint64_t kTen12 = kTen8 * 10000;
|
||||
const uint64_t kTen13 = kTen8 * 100000;
|
||||
const uint64_t kTen14 = kTen8 * 1000000;
|
||||
const uint64_t kTen15 = kTen8 * 10000000;
|
||||
const uint64_t kTen16 = kTen8 * kTen8;
|
||||
|
||||
if (value < kTen8) {
|
||||
uint32_t v = static_cast<uint32_t>(value);
|
||||
if (v < 10000) {
|
||||
const uint32_t d1 = (v / 100) << 1;
|
||||
const uint32_t d2 = (v % 100) << 1;
|
||||
|
||||
if (v >= 1000)
|
||||
*buffer++ = cDigitsLut[d1];
|
||||
if (v >= 100)
|
||||
*buffer++ = cDigitsLut[d1 + 1];
|
||||
if (v >= 10)
|
||||
*buffer++ = cDigitsLut[d2];
|
||||
*buffer++ = cDigitsLut[d2 + 1];
|
||||
}
|
||||
else {
|
||||
// value = bbbbcccc
|
||||
const uint32_t b = v / 10000;
|
||||
const uint32_t c = v % 10000;
|
||||
|
||||
const uint32_t d1 = (b / 100) << 1;
|
||||
const uint32_t d2 = (b % 100) << 1;
|
||||
|
||||
const uint32_t d3 = (c / 100) << 1;
|
||||
const uint32_t d4 = (c % 100) << 1;
|
||||
|
||||
if (value >= 10000000)
|
||||
*buffer++ = cDigitsLut[d1];
|
||||
if (value >= 1000000)
|
||||
*buffer++ = cDigitsLut[d1 + 1];
|
||||
if (value >= 100000)
|
||||
*buffer++ = cDigitsLut[d2];
|
||||
*buffer++ = cDigitsLut[d2 + 1];
|
||||
|
||||
*buffer++ = cDigitsLut[d3];
|
||||
*buffer++ = cDigitsLut[d3 + 1];
|
||||
*buffer++ = cDigitsLut[d4];
|
||||
*buffer++ = cDigitsLut[d4 + 1];
|
||||
}
|
||||
}
|
||||
else if (value < kTen16) {
|
||||
const uint32_t v0 = static_cast<uint32_t>(value / kTen8);
|
||||
const uint32_t v1 = static_cast<uint32_t>(value % kTen8);
|
||||
|
||||
const uint32_t b0 = v0 / 10000;
|
||||
const uint32_t c0 = v0 % 10000;
|
||||
|
||||
const uint32_t d1 = (b0 / 100) << 1;
|
||||
const uint32_t d2 = (b0 % 100) << 1;
|
||||
|
||||
const uint32_t d3 = (c0 / 100) << 1;
|
||||
const uint32_t d4 = (c0 % 100) << 1;
|
||||
|
||||
const uint32_t b1 = v1 / 10000;
|
||||
const uint32_t c1 = v1 % 10000;
|
||||
|
||||
const uint32_t d5 = (b1 / 100) << 1;
|
||||
const uint32_t d6 = (b1 % 100) << 1;
|
||||
|
||||
const uint32_t d7 = (c1 / 100) << 1;
|
||||
const uint32_t d8 = (c1 % 100) << 1;
|
||||
|
||||
if (value >= kTen15)
|
||||
*buffer++ = cDigitsLut[d1];
|
||||
if (value >= kTen14)
|
||||
*buffer++ = cDigitsLut[d1 + 1];
|
||||
if (value >= kTen13)
|
||||
*buffer++ = cDigitsLut[d2];
|
||||
if (value >= kTen12)
|
||||
*buffer++ = cDigitsLut[d2 + 1];
|
||||
if (value >= kTen11)
|
||||
*buffer++ = cDigitsLut[d3];
|
||||
if (value >= kTen10)
|
||||
*buffer++ = cDigitsLut[d3 + 1];
|
||||
if (value >= kTen9)
|
||||
*buffer++ = cDigitsLut[d4];
|
||||
if (value >= kTen8)
|
||||
*buffer++ = cDigitsLut[d4 + 1];
|
||||
|
||||
*buffer++ = cDigitsLut[d5];
|
||||
*buffer++ = cDigitsLut[d5 + 1];
|
||||
*buffer++ = cDigitsLut[d6];
|
||||
*buffer++ = cDigitsLut[d6 + 1];
|
||||
*buffer++ = cDigitsLut[d7];
|
||||
*buffer++ = cDigitsLut[d7 + 1];
|
||||
*buffer++ = cDigitsLut[d8];
|
||||
*buffer++ = cDigitsLut[d8 + 1];
|
||||
}
|
||||
else {
|
||||
const uint32_t a = static_cast<uint32_t>(value / kTen16); // 1 to 1844
|
||||
value %= kTen16;
|
||||
|
||||
if (a < 10)
|
||||
*buffer++ = static_cast<char>('0' + static_cast<char>(a));
|
||||
else if (a < 100) {
|
||||
const uint32_t i = a << 1;
|
||||
*buffer++ = cDigitsLut[i];
|
||||
*buffer++ = cDigitsLut[i + 1];
|
||||
}
|
||||
else if (a < 1000) {
|
||||
*buffer++ = static_cast<char>('0' + static_cast<char>(a / 100));
|
||||
|
||||
const uint32_t i = (a % 100) << 1;
|
||||
*buffer++ = cDigitsLut[i];
|
||||
*buffer++ = cDigitsLut[i + 1];
|
||||
}
|
||||
else {
|
||||
const uint32_t i = (a / 100) << 1;
|
||||
const uint32_t j = (a % 100) << 1;
|
||||
*buffer++ = cDigitsLut[i];
|
||||
*buffer++ = cDigitsLut[i + 1];
|
||||
*buffer++ = cDigitsLut[j];
|
||||
*buffer++ = cDigitsLut[j + 1];
|
||||
}
|
||||
|
||||
const uint32_t v0 = static_cast<uint32_t>(value / kTen8);
|
||||
const uint32_t v1 = static_cast<uint32_t>(value % kTen8);
|
||||
|
||||
const uint32_t b0 = v0 / 10000;
|
||||
const uint32_t c0 = v0 % 10000;
|
||||
|
||||
const uint32_t d1 = (b0 / 100) << 1;
|
||||
const uint32_t d2 = (b0 % 100) << 1;
|
||||
|
||||
const uint32_t d3 = (c0 / 100) << 1;
|
||||
const uint32_t d4 = (c0 % 100) << 1;
|
||||
|
||||
const uint32_t b1 = v1 / 10000;
|
||||
const uint32_t c1 = v1 % 10000;
|
||||
|
||||
const uint32_t d5 = (b1 / 100) << 1;
|
||||
const uint32_t d6 = (b1 % 100) << 1;
|
||||
|
||||
const uint32_t d7 = (c1 / 100) << 1;
|
||||
const uint32_t d8 = (c1 % 100) << 1;
|
||||
|
||||
*buffer++ = cDigitsLut[d1];
|
||||
*buffer++ = cDigitsLut[d1 + 1];
|
||||
*buffer++ = cDigitsLut[d2];
|
||||
*buffer++ = cDigitsLut[d2 + 1];
|
||||
*buffer++ = cDigitsLut[d3];
|
||||
*buffer++ = cDigitsLut[d3 + 1];
|
||||
*buffer++ = cDigitsLut[d4];
|
||||
*buffer++ = cDigitsLut[d4 + 1];
|
||||
*buffer++ = cDigitsLut[d5];
|
||||
*buffer++ = cDigitsLut[d5 + 1];
|
||||
*buffer++ = cDigitsLut[d6];
|
||||
*buffer++ = cDigitsLut[d6 + 1];
|
||||
*buffer++ = cDigitsLut[d7];
|
||||
*buffer++ = cDigitsLut[d7 + 1];
|
||||
*buffer++ = cDigitsLut[d8];
|
||||
*buffer++ = cDigitsLut[d8 + 1];
|
||||
}
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
inline char* i64toa(int64_t value, char* buffer) {
|
||||
uint64_t u = static_cast<uint64_t>(value);
|
||||
if (value < 0) {
|
||||
*buffer++ = '-';
|
||||
u = ~u + 1;
|
||||
}
|
||||
|
||||
return u64toa(u, buffer);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_ITOA_
|
|
@ -0,0 +1,181 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_INTERNAL_META_H_
|
||||
#define RAPIDJSON_INTERNAL_META_H_
|
||||
|
||||
#include "../rapidjson.h"
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(effc++)
|
||||
#endif
|
||||
#if defined(_MSC_VER)
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(6334)
|
||||
#endif
|
||||
|
||||
#if RAPIDJSON_HAS_CXX11_TYPETRAITS
|
||||
#include <type_traits>
|
||||
#endif
|
||||
|
||||
//@cond RAPIDJSON_INTERNAL
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
// Helper to wrap/convert arbitrary types to void, useful for arbitrary type matching
|
||||
template <typename T> struct Void { typedef void Type; };
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// BoolType, TrueType, FalseType
|
||||
//
|
||||
template <bool Cond> struct BoolType {
|
||||
static const bool Value = Cond;
|
||||
typedef BoolType Type;
|
||||
};
|
||||
typedef BoolType<true> TrueType;
|
||||
typedef BoolType<false> FalseType;
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// SelectIf, BoolExpr, NotExpr, AndExpr, OrExpr
|
||||
//
|
||||
|
||||
template <bool C> struct SelectIfImpl { template <typename T1, typename T2> struct Apply { typedef T1 Type; }; };
|
||||
template <> struct SelectIfImpl<false> { template <typename T1, typename T2> struct Apply { typedef T2 Type; }; };
|
||||
template <bool C, typename T1, typename T2> struct SelectIfCond : SelectIfImpl<C>::template Apply<T1,T2> {};
|
||||
template <typename C, typename T1, typename T2> struct SelectIf : SelectIfCond<C::Value, T1, T2> {};
|
||||
|
||||
template <bool Cond1, bool Cond2> struct AndExprCond : FalseType {};
|
||||
template <> struct AndExprCond<true, true> : TrueType {};
|
||||
template <bool Cond1, bool Cond2> struct OrExprCond : TrueType {};
|
||||
template <> struct OrExprCond<false, false> : FalseType {};
|
||||
|
||||
template <typename C> struct BoolExpr : SelectIf<C,TrueType,FalseType>::Type {};
|
||||
template <typename C> struct NotExpr : SelectIf<C,FalseType,TrueType>::Type {};
|
||||
template <typename C1, typename C2> struct AndExpr : AndExprCond<C1::Value, C2::Value>::Type {};
|
||||
template <typename C1, typename C2> struct OrExpr : OrExprCond<C1::Value, C2::Value>::Type {};
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// AddConst, MaybeAddConst, RemoveConst
|
||||
template <typename T> struct AddConst { typedef const T Type; };
|
||||
template <bool Constify, typename T> struct MaybeAddConst : SelectIfCond<Constify, const T, T> {};
|
||||
template <typename T> struct RemoveConst { typedef T Type; };
|
||||
template <typename T> struct RemoveConst<const T> { typedef T Type; };
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// IsSame, IsConst, IsMoreConst, IsPointer
|
||||
//
|
||||
template <typename T, typename U> struct IsSame : FalseType {};
|
||||
template <typename T> struct IsSame<T, T> : TrueType {};
|
||||
|
||||
template <typename T> struct IsConst : FalseType {};
|
||||
template <typename T> struct IsConst<const T> : TrueType {};
|
||||
|
||||
template <typename CT, typename T>
|
||||
struct IsMoreConst
|
||||
: AndExpr<IsSame<typename RemoveConst<CT>::Type, typename RemoveConst<T>::Type>,
|
||||
BoolType<IsConst<CT>::Value >= IsConst<T>::Value> >::Type {};
|
||||
|
||||
template <typename T> struct IsPointer : FalseType {};
|
||||
template <typename T> struct IsPointer<T*> : TrueType {};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// IsBaseOf
|
||||
//
|
||||
#if RAPIDJSON_HAS_CXX11_TYPETRAITS
|
||||
|
||||
template <typename B, typename D> struct IsBaseOf
|
||||
: BoolType< ::std::is_base_of<B,D>::value> {};
|
||||
|
||||
#else // simplified version adopted from Boost
|
||||
|
||||
template<typename B, typename D> struct IsBaseOfImpl {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(B) != 0);
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(D) != 0);
|
||||
|
||||
typedef char (&Yes)[1];
|
||||
typedef char (&No) [2];
|
||||
|
||||
template <typename T>
|
||||
static Yes Check(const D*, T);
|
||||
static No Check(const B*, int);
|
||||
|
||||
struct Host {
|
||||
operator const B*() const;
|
||||
operator const D*();
|
||||
};
|
||||
|
||||
enum { Value = (sizeof(Check(Host(), 0)) == sizeof(Yes)) };
|
||||
};
|
||||
|
||||
template <typename B, typename D> struct IsBaseOf
|
||||
: OrExpr<IsSame<B, D>, BoolExpr<IsBaseOfImpl<B, D> > >::Type {};
|
||||
|
||||
#endif // RAPIDJSON_HAS_CXX11_TYPETRAITS
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// EnableIf / DisableIf
|
||||
//
|
||||
template <bool Condition, typename T = void> struct EnableIfCond { typedef T Type; };
|
||||
template <typename T> struct EnableIfCond<false, T> { /* empty */ };
|
||||
|
||||
template <bool Condition, typename T = void> struct DisableIfCond { typedef T Type; };
|
||||
template <typename T> struct DisableIfCond<true, T> { /* empty */ };
|
||||
|
||||
template <typename Condition, typename T = void>
|
||||
struct EnableIf : EnableIfCond<Condition::Value, T> {};
|
||||
|
||||
template <typename Condition, typename T = void>
|
||||
struct DisableIf : DisableIfCond<Condition::Value, T> {};
|
||||
|
||||
// SFINAE helpers
|
||||
struct SfinaeTag {};
|
||||
template <typename T> struct RemoveSfinaeTag;
|
||||
template <typename T> struct RemoveSfinaeTag<SfinaeTag&(*)(T)> { typedef T Type; };
|
||||
|
||||
#define RAPIDJSON_REMOVEFPTR_(type) \
|
||||
typename ::RAPIDJSON_NAMESPACE::internal::RemoveSfinaeTag \
|
||||
< ::RAPIDJSON_NAMESPACE::internal::SfinaeTag&(*) type>::Type
|
||||
|
||||
#define RAPIDJSON_ENABLEIF(cond) \
|
||||
typename ::RAPIDJSON_NAMESPACE::internal::EnableIf \
|
||||
<RAPIDJSON_REMOVEFPTR_(cond)>::Type * = NULL
|
||||
|
||||
#define RAPIDJSON_DISABLEIF(cond) \
|
||||
typename ::RAPIDJSON_NAMESPACE::internal::DisableIf \
|
||||
<RAPIDJSON_REMOVEFPTR_(cond)>::Type * = NULL
|
||||
|
||||
#define RAPIDJSON_ENABLEIF_RETURN(cond,returntype) \
|
||||
typename ::RAPIDJSON_NAMESPACE::internal::EnableIf \
|
||||
<RAPIDJSON_REMOVEFPTR_(cond), \
|
||||
RAPIDJSON_REMOVEFPTR_(returntype)>::Type
|
||||
|
||||
#define RAPIDJSON_DISABLEIF_RETURN(cond,returntype) \
|
||||
typename ::RAPIDJSON_NAMESPACE::internal::DisableIf \
|
||||
<RAPIDJSON_REMOVEFPTR_(cond), \
|
||||
RAPIDJSON_REMOVEFPTR_(returntype)>::Type
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
//@endcond
|
||||
|
||||
#if defined(__GNUC__) || defined(_MSC_VER)
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_INTERNAL_META_H_
|
|
@ -0,0 +1,55 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_POW10_
|
||||
#define RAPIDJSON_POW10_
|
||||
|
||||
#include "../rapidjson.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
//! Computes integer powers of 10 in double (10.0^n).
|
||||
/*! This function uses lookup table for fast and accurate results.
|
||||
\param n non-negative exponent. Must <= 308.
|
||||
\return 10.0^n
|
||||
*/
|
||||
inline double Pow10(int n) {
|
||||
static const double e[] = { // 1e-0...1e308: 309 * 8 bytes = 2472 bytes
|
||||
1e+0,
|
||||
1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, 1e+8, 1e+9, 1e+10, 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20,
|
||||
1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, 1e+31, 1e+32, 1e+33, 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40,
|
||||
1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50, 1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60,
|
||||
1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, 1e+68, 1e+69, 1e+70, 1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80,
|
||||
1e+81, 1e+82, 1e+83, 1e+84, 1e+85, 1e+86, 1e+87, 1e+88, 1e+89, 1e+90, 1e+91, 1e+92, 1e+93, 1e+94, 1e+95, 1e+96, 1e+97, 1e+98, 1e+99, 1e+100,
|
||||
1e+101,1e+102,1e+103,1e+104,1e+105,1e+106,1e+107,1e+108,1e+109,1e+110,1e+111,1e+112,1e+113,1e+114,1e+115,1e+116,1e+117,1e+118,1e+119,1e+120,
|
||||
1e+121,1e+122,1e+123,1e+124,1e+125,1e+126,1e+127,1e+128,1e+129,1e+130,1e+131,1e+132,1e+133,1e+134,1e+135,1e+136,1e+137,1e+138,1e+139,1e+140,
|
||||
1e+141,1e+142,1e+143,1e+144,1e+145,1e+146,1e+147,1e+148,1e+149,1e+150,1e+151,1e+152,1e+153,1e+154,1e+155,1e+156,1e+157,1e+158,1e+159,1e+160,
|
||||
1e+161,1e+162,1e+163,1e+164,1e+165,1e+166,1e+167,1e+168,1e+169,1e+170,1e+171,1e+172,1e+173,1e+174,1e+175,1e+176,1e+177,1e+178,1e+179,1e+180,
|
||||
1e+181,1e+182,1e+183,1e+184,1e+185,1e+186,1e+187,1e+188,1e+189,1e+190,1e+191,1e+192,1e+193,1e+194,1e+195,1e+196,1e+197,1e+198,1e+199,1e+200,
|
||||
1e+201,1e+202,1e+203,1e+204,1e+205,1e+206,1e+207,1e+208,1e+209,1e+210,1e+211,1e+212,1e+213,1e+214,1e+215,1e+216,1e+217,1e+218,1e+219,1e+220,
|
||||
1e+221,1e+222,1e+223,1e+224,1e+225,1e+226,1e+227,1e+228,1e+229,1e+230,1e+231,1e+232,1e+233,1e+234,1e+235,1e+236,1e+237,1e+238,1e+239,1e+240,
|
||||
1e+241,1e+242,1e+243,1e+244,1e+245,1e+246,1e+247,1e+248,1e+249,1e+250,1e+251,1e+252,1e+253,1e+254,1e+255,1e+256,1e+257,1e+258,1e+259,1e+260,
|
||||
1e+261,1e+262,1e+263,1e+264,1e+265,1e+266,1e+267,1e+268,1e+269,1e+270,1e+271,1e+272,1e+273,1e+274,1e+275,1e+276,1e+277,1e+278,1e+279,1e+280,
|
||||
1e+281,1e+282,1e+283,1e+284,1e+285,1e+286,1e+287,1e+288,1e+289,1e+290,1e+291,1e+292,1e+293,1e+294,1e+295,1e+296,1e+297,1e+298,1e+299,1e+300,
|
||||
1e+301,1e+302,1e+303,1e+304,1e+305,1e+306,1e+307,1e+308
|
||||
};
|
||||
RAPIDJSON_ASSERT(n >= 0 && n <= 308);
|
||||
return e[n];
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_POW10_
|
|
@ -0,0 +1,701 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_INTERNAL_REGEX_H_
|
||||
#define RAPIDJSON_INTERNAL_REGEX_H_
|
||||
|
||||
#include "../allocators.h"
|
||||
#include "../stream.h"
|
||||
#include "stack.h"
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(padded)
|
||||
RAPIDJSON_DIAG_OFF(switch-enum)
|
||||
RAPIDJSON_DIAG_OFF(implicit-fallthrough)
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(effc++)
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
|
||||
#endif
|
||||
|
||||
#ifndef RAPIDJSON_REGEX_VERBOSE
|
||||
#define RAPIDJSON_REGEX_VERBOSE 0
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// GenericRegex
|
||||
|
||||
static const SizeType kRegexInvalidState = ~SizeType(0); //!< Represents an invalid index in GenericRegex::State::out, out1
|
||||
static const SizeType kRegexInvalidRange = ~SizeType(0);
|
||||
|
||||
//! Regular expression engine with subset of ECMAscript grammar.
|
||||
/*!
|
||||
Supported regular expression syntax:
|
||||
- \c ab Concatenation
|
||||
- \c a|b Alternation
|
||||
- \c a? Zero or one
|
||||
- \c a* Zero or more
|
||||
- \c a+ One or more
|
||||
- \c a{3} Exactly 3 times
|
||||
- \c a{3,} At least 3 times
|
||||
- \c a{3,5} 3 to 5 times
|
||||
- \c (ab) Grouping
|
||||
- \c ^a At the beginning
|
||||
- \c a$ At the end
|
||||
- \c . Any character
|
||||
- \c [abc] Character classes
|
||||
- \c [a-c] Character class range
|
||||
- \c [a-z0-9_] Character class combination
|
||||
- \c [^abc] Negated character classes
|
||||
- \c [^a-c] Negated character class range
|
||||
- \c [\b] Backspace (U+0008)
|
||||
- \c \\| \\\\ ... Escape characters
|
||||
- \c \\f Form feed (U+000C)
|
||||
- \c \\n Line feed (U+000A)
|
||||
- \c \\r Carriage return (U+000D)
|
||||
- \c \\t Tab (U+0009)
|
||||
- \c \\v Vertical tab (U+000B)
|
||||
|
||||
\note This is a Thompson NFA engine, implemented with reference to
|
||||
Cox, Russ. "Regular Expression Matching Can Be Simple And Fast (but is slow in Java, Perl, PHP, Python, Ruby,...).",
|
||||
https://swtch.com/~rsc/regexp/regexp1.html
|
||||
*/
|
||||
template <typename Encoding, typename Allocator = CrtAllocator>
|
||||
class GenericRegex {
|
||||
public:
|
||||
typedef typename Encoding::Ch Ch;
|
||||
|
||||
GenericRegex(const Ch* source, Allocator* allocator = 0) :
|
||||
states_(allocator, 256), ranges_(allocator, 256), root_(kRegexInvalidState), stateCount_(), rangeCount_(),
|
||||
stateSet_(), state0_(allocator, 0), state1_(allocator, 0), anchorBegin_(), anchorEnd_()
|
||||
{
|
||||
GenericStringStream<Encoding> ss(source);
|
||||
DecodedStream<GenericStringStream<Encoding> > ds(ss);
|
||||
Parse(ds);
|
||||
}
|
||||
|
||||
~GenericRegex() {
|
||||
Allocator::Free(stateSet_);
|
||||
}
|
||||
|
||||
bool IsValid() const {
|
||||
return root_ != kRegexInvalidState;
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
bool Match(InputStream& is) const {
|
||||
return SearchWithAnchoring(is, true, true);
|
||||
}
|
||||
|
||||
bool Match(const Ch* s) const {
|
||||
GenericStringStream<Encoding> is(s);
|
||||
return Match(is);
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
bool Search(InputStream& is) const {
|
||||
return SearchWithAnchoring(is, anchorBegin_, anchorEnd_);
|
||||
}
|
||||
|
||||
bool Search(const Ch* s) const {
|
||||
GenericStringStream<Encoding> is(s);
|
||||
return Search(is);
|
||||
}
|
||||
|
||||
private:
|
||||
enum Operator {
|
||||
kZeroOrOne,
|
||||
kZeroOrMore,
|
||||
kOneOrMore,
|
||||
kConcatenation,
|
||||
kAlternation,
|
||||
kLeftParenthesis
|
||||
};
|
||||
|
||||
static const unsigned kAnyCharacterClass = 0xFFFFFFFF; //!< For '.'
|
||||
static const unsigned kRangeCharacterClass = 0xFFFFFFFE;
|
||||
static const unsigned kRangeNegationFlag = 0x80000000;
|
||||
|
||||
struct Range {
|
||||
unsigned start; //
|
||||
unsigned end;
|
||||
SizeType next;
|
||||
};
|
||||
|
||||
struct State {
|
||||
SizeType out; //!< Equals to kInvalid for matching state
|
||||
SizeType out1; //!< Equals to non-kInvalid for split
|
||||
SizeType rangeStart;
|
||||
unsigned codepoint;
|
||||
};
|
||||
|
||||
struct Frag {
|
||||
Frag(SizeType s, SizeType o, SizeType m) : start(s), out(o), minIndex(m) {}
|
||||
SizeType start;
|
||||
SizeType out; //!< link-list of all output states
|
||||
SizeType minIndex;
|
||||
};
|
||||
|
||||
template <typename SourceStream>
|
||||
class DecodedStream {
|
||||
public:
|
||||
DecodedStream(SourceStream& ss) : ss_(ss), codepoint_() { Decode(); }
|
||||
unsigned Peek() { return codepoint_; }
|
||||
unsigned Take() {
|
||||
unsigned c = codepoint_;
|
||||
if (c) // No further decoding when '\0'
|
||||
Decode();
|
||||
return c;
|
||||
}
|
||||
|
||||
private:
|
||||
void Decode() {
|
||||
if (!Encoding::Decode(ss_, &codepoint_))
|
||||
codepoint_ = 0;
|
||||
}
|
||||
|
||||
SourceStream& ss_;
|
||||
unsigned codepoint_;
|
||||
};
|
||||
|
||||
State& GetState(SizeType index) {
|
||||
RAPIDJSON_ASSERT(index < stateCount_);
|
||||
return states_.template Bottom<State>()[index];
|
||||
}
|
||||
|
||||
const State& GetState(SizeType index) const {
|
||||
RAPIDJSON_ASSERT(index < stateCount_);
|
||||
return states_.template Bottom<State>()[index];
|
||||
}
|
||||
|
||||
Range& GetRange(SizeType index) {
|
||||
RAPIDJSON_ASSERT(index < rangeCount_);
|
||||
return ranges_.template Bottom<Range>()[index];
|
||||
}
|
||||
|
||||
const Range& GetRange(SizeType index) const {
|
||||
RAPIDJSON_ASSERT(index < rangeCount_);
|
||||
return ranges_.template Bottom<Range>()[index];
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
void Parse(DecodedStream<InputStream>& ds) {
|
||||
Allocator allocator;
|
||||
Stack<Allocator> operandStack(&allocator, 256); // Frag
|
||||
Stack<Allocator> operatorStack(&allocator, 256); // Operator
|
||||
Stack<Allocator> atomCountStack(&allocator, 256); // unsigned (Atom per parenthesis)
|
||||
|
||||
*atomCountStack.template Push<unsigned>() = 0;
|
||||
|
||||
unsigned codepoint;
|
||||
while (ds.Peek() != 0) {
|
||||
switch (codepoint = ds.Take()) {
|
||||
case '^':
|
||||
anchorBegin_ = true;
|
||||
break;
|
||||
|
||||
case '$':
|
||||
anchorEnd_ = true;
|
||||
break;
|
||||
|
||||
case '|':
|
||||
while (!operatorStack.Empty() && *operatorStack.template Top<Operator>() < kAlternation)
|
||||
if (!Eval(operandStack, *operatorStack.template Pop<Operator>(1)))
|
||||
return;
|
||||
*operatorStack.template Push<Operator>() = kAlternation;
|
||||
*atomCountStack.template Top<unsigned>() = 0;
|
||||
break;
|
||||
|
||||
case '(':
|
||||
*operatorStack.template Push<Operator>() = kLeftParenthesis;
|
||||
*atomCountStack.template Push<unsigned>() = 0;
|
||||
break;
|
||||
|
||||
case ')':
|
||||
while (!operatorStack.Empty() && *operatorStack.template Top<Operator>() != kLeftParenthesis)
|
||||
if (!Eval(operandStack, *operatorStack.template Pop<Operator>(1)))
|
||||
return;
|
||||
if (operatorStack.Empty())
|
||||
return;
|
||||
operatorStack.template Pop<Operator>(1);
|
||||
atomCountStack.template Pop<unsigned>(1);
|
||||
ImplicitConcatenation(atomCountStack, operatorStack);
|
||||
break;
|
||||
|
||||
case '?':
|
||||
if (!Eval(operandStack, kZeroOrOne))
|
||||
return;
|
||||
break;
|
||||
|
||||
case '*':
|
||||
if (!Eval(operandStack, kZeroOrMore))
|
||||
return;
|
||||
break;
|
||||
|
||||
case '+':
|
||||
if (!Eval(operandStack, kOneOrMore))
|
||||
return;
|
||||
break;
|
||||
|
||||
case '{':
|
||||
{
|
||||
unsigned n, m;
|
||||
if (!ParseUnsigned(ds, &n))
|
||||
return;
|
||||
|
||||
if (ds.Peek() == ',') {
|
||||
ds.Take();
|
||||
if (ds.Peek() == '}')
|
||||
m = kInfinityQuantifier;
|
||||
else if (!ParseUnsigned(ds, &m) || m < n)
|
||||
return;
|
||||
}
|
||||
else
|
||||
m = n;
|
||||
|
||||
if (!EvalQuantifier(operandStack, n, m) || ds.Peek() != '}')
|
||||
return;
|
||||
ds.Take();
|
||||
}
|
||||
break;
|
||||
|
||||
case '.':
|
||||
PushOperand(operandStack, kAnyCharacterClass);
|
||||
ImplicitConcatenation(atomCountStack, operatorStack);
|
||||
break;
|
||||
|
||||
case '[':
|
||||
{
|
||||
SizeType range;
|
||||
if (!ParseRange(ds, &range))
|
||||
return;
|
||||
SizeType s = NewState(kRegexInvalidState, kRegexInvalidState, kRangeCharacterClass);
|
||||
GetState(s).rangeStart = range;
|
||||
*operandStack.template Push<Frag>() = Frag(s, s, s);
|
||||
}
|
||||
ImplicitConcatenation(atomCountStack, operatorStack);
|
||||
break;
|
||||
|
||||
case '\\': // Escape character
|
||||
if (!CharacterEscape(ds, &codepoint))
|
||||
return; // Unsupported escape character
|
||||
// fall through to default
|
||||
|
||||
default: // Pattern character
|
||||
PushOperand(operandStack, codepoint);
|
||||
ImplicitConcatenation(atomCountStack, operatorStack);
|
||||
}
|
||||
}
|
||||
|
||||
while (!operatorStack.Empty())
|
||||
if (!Eval(operandStack, *operatorStack.template Pop<Operator>(1)))
|
||||
return;
|
||||
|
||||
// Link the operand to matching state.
|
||||
if (operandStack.GetSize() == sizeof(Frag)) {
|
||||
Frag* e = operandStack.template Pop<Frag>(1);
|
||||
Patch(e->out, NewState(kRegexInvalidState, kRegexInvalidState, 0));
|
||||
root_ = e->start;
|
||||
|
||||
#if RAPIDJSON_REGEX_VERBOSE
|
||||
printf("root: %d\n", root_);
|
||||
for (SizeType i = 0; i < stateCount_ ; i++) {
|
||||
State& s = GetState(i);
|
||||
printf("[%2d] out: %2d out1: %2d c: '%c'\n", i, s.out, s.out1, (char)s.codepoint);
|
||||
}
|
||||
printf("\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
// Preallocate buffer for SearchWithAnchoring()
|
||||
RAPIDJSON_ASSERT(stateSet_ == 0);
|
||||
if (stateCount_ > 0) {
|
||||
stateSet_ = static_cast<unsigned*>(states_.GetAllocator().Malloc(GetStateSetSize()));
|
||||
state0_.template Reserve<SizeType>(stateCount_);
|
||||
state1_.template Reserve<SizeType>(stateCount_);
|
||||
}
|
||||
}
|
||||
|
||||
SizeType NewState(SizeType out, SizeType out1, unsigned codepoint) {
|
||||
State* s = states_.template Push<State>();
|
||||
s->out = out;
|
||||
s->out1 = out1;
|
||||
s->codepoint = codepoint;
|
||||
s->rangeStart = kRegexInvalidRange;
|
||||
return stateCount_++;
|
||||
}
|
||||
|
||||
void PushOperand(Stack<Allocator>& operandStack, unsigned codepoint) {
|
||||
SizeType s = NewState(kRegexInvalidState, kRegexInvalidState, codepoint);
|
||||
*operandStack.template Push<Frag>() = Frag(s, s, s);
|
||||
}
|
||||
|
||||
void ImplicitConcatenation(Stack<Allocator>& atomCountStack, Stack<Allocator>& operatorStack) {
|
||||
if (*atomCountStack.template Top<unsigned>())
|
||||
*operatorStack.template Push<Operator>() = kConcatenation;
|
||||
(*atomCountStack.template Top<unsigned>())++;
|
||||
}
|
||||
|
||||
SizeType Append(SizeType l1, SizeType l2) {
|
||||
SizeType old = l1;
|
||||
while (GetState(l1).out != kRegexInvalidState)
|
||||
l1 = GetState(l1).out;
|
||||
GetState(l1).out = l2;
|
||||
return old;
|
||||
}
|
||||
|
||||
void Patch(SizeType l, SizeType s) {
|
||||
for (SizeType next; l != kRegexInvalidState; l = next) {
|
||||
next = GetState(l).out;
|
||||
GetState(l).out = s;
|
||||
}
|
||||
}
|
||||
|
||||
bool Eval(Stack<Allocator>& operandStack, Operator op) {
|
||||
switch (op) {
|
||||
case kConcatenation:
|
||||
RAPIDJSON_ASSERT(operandStack.GetSize() >= sizeof(Frag) * 2);
|
||||
{
|
||||
Frag e2 = *operandStack.template Pop<Frag>(1);
|
||||
Frag e1 = *operandStack.template Pop<Frag>(1);
|
||||
Patch(e1.out, e2.start);
|
||||
*operandStack.template Push<Frag>() = Frag(e1.start, e2.out, Min(e1.minIndex, e2.minIndex));
|
||||
}
|
||||
return true;
|
||||
|
||||
case kAlternation:
|
||||
if (operandStack.GetSize() >= sizeof(Frag) * 2) {
|
||||
Frag e2 = *operandStack.template Pop<Frag>(1);
|
||||
Frag e1 = *operandStack.template Pop<Frag>(1);
|
||||
SizeType s = NewState(e1.start, e2.start, 0);
|
||||
*operandStack.template Push<Frag>() = Frag(s, Append(e1.out, e2.out), Min(e1.minIndex, e2.minIndex));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
||||
case kZeroOrOne:
|
||||
if (operandStack.GetSize() >= sizeof(Frag)) {
|
||||
Frag e = *operandStack.template Pop<Frag>(1);
|
||||
SizeType s = NewState(kRegexInvalidState, e.start, 0);
|
||||
*operandStack.template Push<Frag>() = Frag(s, Append(e.out, s), e.minIndex);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
||||
case kZeroOrMore:
|
||||
if (operandStack.GetSize() >= sizeof(Frag)) {
|
||||
Frag e = *operandStack.template Pop<Frag>(1);
|
||||
SizeType s = NewState(kRegexInvalidState, e.start, 0);
|
||||
Patch(e.out, s);
|
||||
*operandStack.template Push<Frag>() = Frag(s, s, e.minIndex);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
||||
default:
|
||||
RAPIDJSON_ASSERT(op == kOneOrMore);
|
||||
if (operandStack.GetSize() >= sizeof(Frag)) {
|
||||
Frag e = *operandStack.template Pop<Frag>(1);
|
||||
SizeType s = NewState(kRegexInvalidState, e.start, 0);
|
||||
Patch(e.out, s);
|
||||
*operandStack.template Push<Frag>() = Frag(e.start, s, e.minIndex);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool EvalQuantifier(Stack<Allocator>& operandStack, unsigned n, unsigned m) {
|
||||
RAPIDJSON_ASSERT(n <= m);
|
||||
RAPIDJSON_ASSERT(operandStack.GetSize() >= sizeof(Frag));
|
||||
|
||||
if (n == 0) {
|
||||
if (m == 0) // a{0} not support
|
||||
return false;
|
||||
else if (m == kInfinityQuantifier)
|
||||
Eval(operandStack, kZeroOrMore); // a{0,} -> a*
|
||||
else {
|
||||
Eval(operandStack, kZeroOrOne); // a{0,5} -> a?
|
||||
for (unsigned i = 0; i < m - 1; i++)
|
||||
CloneTopOperand(operandStack); // a{0,5} -> a? a? a? a? a?
|
||||
for (unsigned i = 0; i < m - 1; i++)
|
||||
Eval(operandStack, kConcatenation); // a{0,5} -> a?a?a?a?a?
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < n - 1; i++) // a{3} -> a a a
|
||||
CloneTopOperand(operandStack);
|
||||
|
||||
if (m == kInfinityQuantifier)
|
||||
Eval(operandStack, kOneOrMore); // a{3,} -> a a a+
|
||||
else if (m > n) {
|
||||
CloneTopOperand(operandStack); // a{3,5} -> a a a a
|
||||
Eval(operandStack, kZeroOrOne); // a{3,5} -> a a a a?
|
||||
for (unsigned i = n; i < m - 1; i++)
|
||||
CloneTopOperand(operandStack); // a{3,5} -> a a a a? a?
|
||||
for (unsigned i = n; i < m; i++)
|
||||
Eval(operandStack, kConcatenation); // a{3,5} -> a a aa?a?
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < n - 1; i++)
|
||||
Eval(operandStack, kConcatenation); // a{3} -> aaa, a{3,} -> aaa+, a{3.5} -> aaaa?a?
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static SizeType Min(SizeType a, SizeType b) { return a < b ? a : b; }
|
||||
|
||||
void CloneTopOperand(Stack<Allocator>& operandStack) {
|
||||
const Frag src = *operandStack.template Top<Frag>(); // Copy constructor to prevent invalidation
|
||||
SizeType count = stateCount_ - src.minIndex; // Assumes top operand contains states in [src->minIndex, stateCount_)
|
||||
State* s = states_.template Push<State>(count);
|
||||
memcpy(s, &GetState(src.minIndex), count * sizeof(State));
|
||||
for (SizeType j = 0; j < count; j++) {
|
||||
if (s[j].out != kRegexInvalidState)
|
||||
s[j].out += count;
|
||||
if (s[j].out1 != kRegexInvalidState)
|
||||
s[j].out1 += count;
|
||||
}
|
||||
*operandStack.template Push<Frag>() = Frag(src.start + count, src.out + count, src.minIndex + count);
|
||||
stateCount_ += count;
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
bool ParseUnsigned(DecodedStream<InputStream>& ds, unsigned* u) {
|
||||
unsigned r = 0;
|
||||
if (ds.Peek() < '0' || ds.Peek() > '9')
|
||||
return false;
|
||||
while (ds.Peek() >= '0' && ds.Peek() <= '9') {
|
||||
if (r >= 429496729 && ds.Peek() > '5') // 2^32 - 1 = 4294967295
|
||||
return false; // overflow
|
||||
r = r * 10 + (ds.Take() - '0');
|
||||
}
|
||||
*u = r;
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
bool ParseRange(DecodedStream<InputStream>& ds, SizeType* range) {
|
||||
bool isBegin = true;
|
||||
bool negate = false;
|
||||
int step = 0;
|
||||
SizeType start = kRegexInvalidRange;
|
||||
SizeType current = kRegexInvalidRange;
|
||||
unsigned codepoint;
|
||||
while ((codepoint = ds.Take()) != 0) {
|
||||
if (isBegin) {
|
||||
isBegin = false;
|
||||
if (codepoint == '^') {
|
||||
negate = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
switch (codepoint) {
|
||||
case ']':
|
||||
if (start == kRegexInvalidRange)
|
||||
return false; // Error: nothing inside []
|
||||
if (step == 2) { // Add trailing '-'
|
||||
SizeType r = NewRange('-');
|
||||
RAPIDJSON_ASSERT(current != kRegexInvalidRange);
|
||||
GetRange(current).next = r;
|
||||
}
|
||||
if (negate)
|
||||
GetRange(start).start |= kRangeNegationFlag;
|
||||
*range = start;
|
||||
return true;
|
||||
|
||||
case '\\':
|
||||
if (ds.Peek() == 'b') {
|
||||
ds.Take();
|
||||
codepoint = 0x0008; // Escape backspace character
|
||||
}
|
||||
else if (!CharacterEscape(ds, &codepoint))
|
||||
return false;
|
||||
// fall through to default
|
||||
|
||||
default:
|
||||
switch (step) {
|
||||
case 1:
|
||||
if (codepoint == '-') {
|
||||
step++;
|
||||
break;
|
||||
}
|
||||
// fall through to step 0 for other characters
|
||||
|
||||
case 0:
|
||||
{
|
||||
SizeType r = NewRange(codepoint);
|
||||
if (current != kRegexInvalidRange)
|
||||
GetRange(current).next = r;
|
||||
if (start == kRegexInvalidRange)
|
||||
start = r;
|
||||
current = r;
|
||||
}
|
||||
step = 1;
|
||||
break;
|
||||
|
||||
default:
|
||||
RAPIDJSON_ASSERT(step == 2);
|
||||
GetRange(current).end = codepoint;
|
||||
step = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
SizeType NewRange(unsigned codepoint) {
|
||||
Range* r = ranges_.template Push<Range>();
|
||||
r->start = r->end = codepoint;
|
||||
r->next = kRegexInvalidRange;
|
||||
return rangeCount_++;
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
bool CharacterEscape(DecodedStream<InputStream>& ds, unsigned* escapedCodepoint) {
|
||||
unsigned codepoint;
|
||||
switch (codepoint = ds.Take()) {
|
||||
case '^':
|
||||
case '$':
|
||||
case '|':
|
||||
case '(':
|
||||
case ')':
|
||||
case '?':
|
||||
case '*':
|
||||
case '+':
|
||||
case '.':
|
||||
case '[':
|
||||
case ']':
|
||||
case '{':
|
||||
case '}':
|
||||
case '\\':
|
||||
*escapedCodepoint = codepoint; return true;
|
||||
case 'f': *escapedCodepoint = 0x000C; return true;
|
||||
case 'n': *escapedCodepoint = 0x000A; return true;
|
||||
case 'r': *escapedCodepoint = 0x000D; return true;
|
||||
case 't': *escapedCodepoint = 0x0009; return true;
|
||||
case 'v': *escapedCodepoint = 0x000B; return true;
|
||||
default:
|
||||
return false; // Unsupported escape character
|
||||
}
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
bool SearchWithAnchoring(InputStream& is, bool anchorBegin, bool anchorEnd) const {
|
||||
RAPIDJSON_ASSERT(IsValid());
|
||||
DecodedStream<InputStream> ds(is);
|
||||
|
||||
state0_.Clear();
|
||||
Stack<Allocator> *current = &state0_, *next = &state1_;
|
||||
const size_t stateSetSize = GetStateSetSize();
|
||||
std::memset(stateSet_, 0, stateSetSize);
|
||||
|
||||
bool matched = AddState(*current, root_);
|
||||
unsigned codepoint;
|
||||
while (!current->Empty() && (codepoint = ds.Take()) != 0) {
|
||||
std::memset(stateSet_, 0, stateSetSize);
|
||||
next->Clear();
|
||||
matched = false;
|
||||
for (const SizeType* s = current->template Bottom<SizeType>(); s != current->template End<SizeType>(); ++s) {
|
||||
const State& sr = GetState(*s);
|
||||
if (sr.codepoint == codepoint ||
|
||||
sr.codepoint == kAnyCharacterClass ||
|
||||
(sr.codepoint == kRangeCharacterClass && MatchRange(sr.rangeStart, codepoint)))
|
||||
{
|
||||
matched = AddState(*next, sr.out) || matched;
|
||||
if (!anchorEnd && matched)
|
||||
return true;
|
||||
}
|
||||
if (!anchorBegin)
|
||||
AddState(*next, root_);
|
||||
}
|
||||
internal::Swap(current, next);
|
||||
}
|
||||
|
||||
return matched;
|
||||
}
|
||||
|
||||
size_t GetStateSetSize() const {
|
||||
return (stateCount_ + 31) / 32 * 4;
|
||||
}
|
||||
|
||||
// Return whether the added states is a match state
|
||||
bool AddState(Stack<Allocator>& l, SizeType index) const {
|
||||
RAPIDJSON_ASSERT(index != kRegexInvalidState);
|
||||
|
||||
const State& s = GetState(index);
|
||||
if (s.out1 != kRegexInvalidState) { // Split
|
||||
bool matched = AddState(l, s.out);
|
||||
return AddState(l, s.out1) || matched;
|
||||
}
|
||||
else if (!(stateSet_[index >> 5] & (1 << (index & 31)))) {
|
||||
stateSet_[index >> 5] |= (1 << (index & 31));
|
||||
*l.template PushUnsafe<SizeType>() = index;
|
||||
}
|
||||
return s.out == kRegexInvalidState; // by using PushUnsafe() above, we can ensure s is not validated due to reallocation.
|
||||
}
|
||||
|
||||
bool MatchRange(SizeType rangeIndex, unsigned codepoint) const {
|
||||
bool yes = (GetRange(rangeIndex).start & kRangeNegationFlag) == 0;
|
||||
while (rangeIndex != kRegexInvalidRange) {
|
||||
const Range& r = GetRange(rangeIndex);
|
||||
if (codepoint >= (r.start & ~kRangeNegationFlag) && codepoint <= r.end)
|
||||
return yes;
|
||||
rangeIndex = r.next;
|
||||
}
|
||||
return !yes;
|
||||
}
|
||||
|
||||
Stack<Allocator> states_;
|
||||
Stack<Allocator> ranges_;
|
||||
SizeType root_;
|
||||
SizeType stateCount_;
|
||||
SizeType rangeCount_;
|
||||
|
||||
static const unsigned kInfinityQuantifier = ~0u;
|
||||
|
||||
// For SearchWithAnchoring()
|
||||
uint32_t* stateSet_; // allocated by states_.GetAllocator()
|
||||
mutable Stack<Allocator> state0_;
|
||||
mutable Stack<Allocator> state1_;
|
||||
bool anchorBegin_;
|
||||
bool anchorEnd_;
|
||||
};
|
||||
|
||||
typedef GenericRegex<UTF8<> > Regex;
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_INTERNAL_REGEX_H_
|
|
@ -0,0 +1,230 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_INTERNAL_STACK_H_
|
||||
#define RAPIDJSON_INTERNAL_STACK_H_
|
||||
|
||||
#include "../allocators.h"
|
||||
#include "swap.h"
|
||||
|
||||
#if defined(__clang__)
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(c++98-compat)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Stack
|
||||
|
||||
//! A type-unsafe stack for storing different types of data.
|
||||
/*! \tparam Allocator Allocator for allocating stack memory.
|
||||
*/
|
||||
template <typename Allocator>
|
||||
class Stack {
|
||||
public:
|
||||
// Optimization note: Do not allocate memory for stack_ in constructor.
|
||||
// Do it lazily when first Push() -> Expand() -> Resize().
|
||||
Stack(Allocator* allocator, size_t stackCapacity) : allocator_(allocator), ownAllocator_(0), stack_(0), stackTop_(0), stackEnd_(0), initialCapacity_(stackCapacity) {
|
||||
}
|
||||
|
||||
#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
|
||||
Stack(Stack&& rhs)
|
||||
: allocator_(rhs.allocator_),
|
||||
ownAllocator_(rhs.ownAllocator_),
|
||||
stack_(rhs.stack_),
|
||||
stackTop_(rhs.stackTop_),
|
||||
stackEnd_(rhs.stackEnd_),
|
||||
initialCapacity_(rhs.initialCapacity_)
|
||||
{
|
||||
rhs.allocator_ = 0;
|
||||
rhs.ownAllocator_ = 0;
|
||||
rhs.stack_ = 0;
|
||||
rhs.stackTop_ = 0;
|
||||
rhs.stackEnd_ = 0;
|
||||
rhs.initialCapacity_ = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
~Stack() {
|
||||
Destroy();
|
||||
}
|
||||
|
||||
#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
|
||||
Stack& operator=(Stack&& rhs) {
|
||||
if (&rhs != this)
|
||||
{
|
||||
Destroy();
|
||||
|
||||
allocator_ = rhs.allocator_;
|
||||
ownAllocator_ = rhs.ownAllocator_;
|
||||
stack_ = rhs.stack_;
|
||||
stackTop_ = rhs.stackTop_;
|
||||
stackEnd_ = rhs.stackEnd_;
|
||||
initialCapacity_ = rhs.initialCapacity_;
|
||||
|
||||
rhs.allocator_ = 0;
|
||||
rhs.ownAllocator_ = 0;
|
||||
rhs.stack_ = 0;
|
||||
rhs.stackTop_ = 0;
|
||||
rhs.stackEnd_ = 0;
|
||||
rhs.initialCapacity_ = 0;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
|
||||
void Swap(Stack& rhs) RAPIDJSON_NOEXCEPT {
|
||||
internal::Swap(allocator_, rhs.allocator_);
|
||||
internal::Swap(ownAllocator_, rhs.ownAllocator_);
|
||||
internal::Swap(stack_, rhs.stack_);
|
||||
internal::Swap(stackTop_, rhs.stackTop_);
|
||||
internal::Swap(stackEnd_, rhs.stackEnd_);
|
||||
internal::Swap(initialCapacity_, rhs.initialCapacity_);
|
||||
}
|
||||
|
||||
void Clear() { stackTop_ = stack_; }
|
||||
|
||||
void ShrinkToFit() {
|
||||
if (Empty()) {
|
||||
// If the stack is empty, completely deallocate the memory.
|
||||
Allocator::Free(stack_);
|
||||
stack_ = 0;
|
||||
stackTop_ = 0;
|
||||
stackEnd_ = 0;
|
||||
}
|
||||
else
|
||||
Resize(GetSize());
|
||||
}
|
||||
|
||||
// Optimization note: try to minimize the size of this function for force inline.
|
||||
// Expansion is run very infrequently, so it is moved to another (probably non-inline) function.
|
||||
template<typename T>
|
||||
RAPIDJSON_FORCEINLINE void Reserve(size_t count = 1) {
|
||||
// Expand the stack if needed
|
||||
if (RAPIDJSON_UNLIKELY(stackTop_ + sizeof(T) * count > stackEnd_))
|
||||
Expand<T>(count);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
RAPIDJSON_FORCEINLINE T* Push(size_t count = 1) {
|
||||
Reserve<T>(count);
|
||||
return PushUnsafe<T>(count);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
RAPIDJSON_FORCEINLINE T* PushUnsafe(size_t count = 1) {
|
||||
RAPIDJSON_ASSERT(stackTop_ + sizeof(T) * count <= stackEnd_);
|
||||
T* ret = reinterpret_cast<T*>(stackTop_);
|
||||
stackTop_ += sizeof(T) * count;
|
||||
return ret;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
T* Pop(size_t count) {
|
||||
RAPIDJSON_ASSERT(GetSize() >= count * sizeof(T));
|
||||
stackTop_ -= count * sizeof(T);
|
||||
return reinterpret_cast<T*>(stackTop_);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
T* Top() {
|
||||
RAPIDJSON_ASSERT(GetSize() >= sizeof(T));
|
||||
return reinterpret_cast<T*>(stackTop_ - sizeof(T));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
const T* Top() const {
|
||||
RAPIDJSON_ASSERT(GetSize() >= sizeof(T));
|
||||
return reinterpret_cast<T*>(stackTop_ - sizeof(T));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
T* End() { return reinterpret_cast<T*>(stackTop_); }
|
||||
|
||||
template<typename T>
|
||||
const T* End() const { return reinterpret_cast<T*>(stackTop_); }
|
||||
|
||||
template<typename T>
|
||||
T* Bottom() { return reinterpret_cast<T*>(stack_); }
|
||||
|
||||
template<typename T>
|
||||
const T* Bottom() const { return reinterpret_cast<T*>(stack_); }
|
||||
|
||||
bool HasAllocator() const {
|
||||
return allocator_ != 0;
|
||||
}
|
||||
|
||||
Allocator& GetAllocator() {
|
||||
RAPIDJSON_ASSERT(allocator_);
|
||||
return *allocator_;
|
||||
}
|
||||
|
||||
bool Empty() const { return stackTop_ == stack_; }
|
||||
size_t GetSize() const { return static_cast<size_t>(stackTop_ - stack_); }
|
||||
size_t GetCapacity() const { return static_cast<size_t>(stackEnd_ - stack_); }
|
||||
|
||||
private:
|
||||
template<typename T>
|
||||
void Expand(size_t count) {
|
||||
// Only expand the capacity if the current stack exists. Otherwise just create a stack with initial capacity.
|
||||
size_t newCapacity;
|
||||
if (stack_ == 0) {
|
||||
if (!allocator_)
|
||||
ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator());
|
||||
newCapacity = initialCapacity_;
|
||||
} else {
|
||||
newCapacity = GetCapacity();
|
||||
newCapacity += (newCapacity + 1) / 2;
|
||||
}
|
||||
size_t newSize = GetSize() + sizeof(T) * count;
|
||||
if (newCapacity < newSize)
|
||||
newCapacity = newSize;
|
||||
|
||||
Resize(newCapacity);
|
||||
}
|
||||
|
||||
void Resize(size_t newCapacity) {
|
||||
const size_t size = GetSize(); // Backup the current size
|
||||
stack_ = static_cast<char*>(allocator_->Realloc(stack_, GetCapacity(), newCapacity));
|
||||
stackTop_ = stack_ + size;
|
||||
stackEnd_ = stack_ + newCapacity;
|
||||
}
|
||||
|
||||
void Destroy() {
|
||||
Allocator::Free(stack_);
|
||||
RAPIDJSON_DELETE(ownAllocator_); // Only delete if it is owned by the stack
|
||||
}
|
||||
|
||||
// Prohibit copy constructor & assignment operator.
|
||||
Stack(const Stack&);
|
||||
Stack& operator=(const Stack&);
|
||||
|
||||
Allocator* allocator_;
|
||||
Allocator* ownAllocator_;
|
||||
char *stack_;
|
||||
char *stackTop_;
|
||||
char *stackEnd_;
|
||||
size_t initialCapacity_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#if defined(__clang__)
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_STACK_H_
|
|
@ -0,0 +1,58 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_INTERNAL_STRFUNC_H_
|
||||
#define RAPIDJSON_INTERNAL_STRFUNC_H_
|
||||
|
||||
#include "../stream.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
//! Custom strlen() which works on different character types.
|
||||
/*! \tparam Ch Character type (e.g. char, wchar_t, short)
|
||||
\param s Null-terminated input string.
|
||||
\return Number of characters in the string.
|
||||
\note This has the same semantics as strlen(), the return value is not number of Unicode codepoints.
|
||||
*/
|
||||
template <typename Ch>
|
||||
inline SizeType StrLen(const Ch* s) {
|
||||
RAPIDJSON_ASSERT(s != 0);
|
||||
const Ch* p = s;
|
||||
while (*p) ++p;
|
||||
return SizeType(p - s);
|
||||
}
|
||||
|
||||
//! Returns number of code points in a encoded string.
|
||||
template<typename Encoding>
|
||||
bool CountStringCodePoint(const typename Encoding::Ch* s, SizeType length, SizeType* outCount) {
|
||||
RAPIDJSON_ASSERT(s != 0);
|
||||
RAPIDJSON_ASSERT(outCount != 0);
|
||||
GenericStringStream<Encoding> is(s);
|
||||
const typename Encoding::Ch* end = s + length;
|
||||
SizeType count = 0;
|
||||
while (is.src_ < end) {
|
||||
unsigned codepoint;
|
||||
if (!Encoding::Decode(is, &codepoint))
|
||||
return false;
|
||||
count++;
|
||||
}
|
||||
*outCount = count;
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_INTERNAL_STRFUNC_H_
|
|
@ -0,0 +1,269 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_STRTOD_
|
||||
#define RAPIDJSON_STRTOD_
|
||||
|
||||
#include "ieee754.h"
|
||||
#include "biginteger.h"
|
||||
#include "diyfp.h"
|
||||
#include "pow10.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
inline double FastPath(double significand, int exp) {
|
||||
if (exp < -308)
|
||||
return 0.0;
|
||||
else if (exp >= 0)
|
||||
return significand * internal::Pow10(exp);
|
||||
else
|
||||
return significand / internal::Pow10(-exp);
|
||||
}
|
||||
|
||||
inline double StrtodNormalPrecision(double d, int p) {
|
||||
if (p < -308) {
|
||||
// Prevent expSum < -308, making Pow10(p) = 0
|
||||
d = FastPath(d, -308);
|
||||
d = FastPath(d, p + 308);
|
||||
}
|
||||
else
|
||||
d = FastPath(d, p);
|
||||
return d;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T Min3(T a, T b, T c) {
|
||||
T m = a;
|
||||
if (m > b) m = b;
|
||||
if (m > c) m = c;
|
||||
return m;
|
||||
}
|
||||
|
||||
inline int CheckWithinHalfULP(double b, const BigInteger& d, int dExp) {
|
||||
const Double db(b);
|
||||
const uint64_t bInt = db.IntegerSignificand();
|
||||
const int bExp = db.IntegerExponent();
|
||||
const int hExp = bExp - 1;
|
||||
|
||||
int dS_Exp2 = 0, dS_Exp5 = 0, bS_Exp2 = 0, bS_Exp5 = 0, hS_Exp2 = 0, hS_Exp5 = 0;
|
||||
|
||||
// Adjust for decimal exponent
|
||||
if (dExp >= 0) {
|
||||
dS_Exp2 += dExp;
|
||||
dS_Exp5 += dExp;
|
||||
}
|
||||
else {
|
||||
bS_Exp2 -= dExp;
|
||||
bS_Exp5 -= dExp;
|
||||
hS_Exp2 -= dExp;
|
||||
hS_Exp5 -= dExp;
|
||||
}
|
||||
|
||||
// Adjust for binary exponent
|
||||
if (bExp >= 0)
|
||||
bS_Exp2 += bExp;
|
||||
else {
|
||||
dS_Exp2 -= bExp;
|
||||
hS_Exp2 -= bExp;
|
||||
}
|
||||
|
||||
// Adjust for half ulp exponent
|
||||
if (hExp >= 0)
|
||||
hS_Exp2 += hExp;
|
||||
else {
|
||||
dS_Exp2 -= hExp;
|
||||
bS_Exp2 -= hExp;
|
||||
}
|
||||
|
||||
// Remove common power of two factor from all three scaled values
|
||||
int common_Exp2 = Min3(dS_Exp2, bS_Exp2, hS_Exp2);
|
||||
dS_Exp2 -= common_Exp2;
|
||||
bS_Exp2 -= common_Exp2;
|
||||
hS_Exp2 -= common_Exp2;
|
||||
|
||||
BigInteger dS = d;
|
||||
dS.MultiplyPow5(static_cast<unsigned>(dS_Exp5)) <<= static_cast<unsigned>(dS_Exp2);
|
||||
|
||||
BigInteger bS(bInt);
|
||||
bS.MultiplyPow5(static_cast<unsigned>(bS_Exp5)) <<= static_cast<unsigned>(bS_Exp2);
|
||||
|
||||
BigInteger hS(1);
|
||||
hS.MultiplyPow5(static_cast<unsigned>(hS_Exp5)) <<= static_cast<unsigned>(hS_Exp2);
|
||||
|
||||
BigInteger delta(0);
|
||||
dS.Difference(bS, &delta);
|
||||
|
||||
return delta.Compare(hS);
|
||||
}
|
||||
|
||||
inline bool StrtodFast(double d, int p, double* result) {
|
||||
// Use fast path for string-to-double conversion if possible
|
||||
// see http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/
|
||||
if (p > 22 && p < 22 + 16) {
|
||||
// Fast Path Cases In Disguise
|
||||
d *= internal::Pow10(p - 22);
|
||||
p = 22;
|
||||
}
|
||||
|
||||
if (p >= -22 && p <= 22 && d <= 9007199254740991.0) { // 2^53 - 1
|
||||
*result = FastPath(d, p);
|
||||
return true;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
// Compute an approximation and see if it is within 1/2 ULP
|
||||
inline bool StrtodDiyFp(const char* decimals, size_t length, size_t decimalPosition, int exp, double* result) {
|
||||
uint64_t significand = 0;
|
||||
size_t i = 0; // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999
|
||||
for (; i < length; i++) {
|
||||
if (significand > RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) ||
|
||||
(significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > '5'))
|
||||
break;
|
||||
significand = significand * 10u + static_cast<unsigned>(decimals[i] - '0');
|
||||
}
|
||||
|
||||
if (i < length && decimals[i] >= '5') // Rounding
|
||||
significand++;
|
||||
|
||||
size_t remaining = length - i;
|
||||
const unsigned kUlpShift = 3;
|
||||
const unsigned kUlp = 1 << kUlpShift;
|
||||
int64_t error = (remaining == 0) ? 0 : kUlp / 2;
|
||||
|
||||
DiyFp v(significand, 0);
|
||||
v = v.Normalize();
|
||||
error <<= -v.e;
|
||||
|
||||
const int dExp = static_cast<int>(decimalPosition) - static_cast<int>(i) + exp;
|
||||
|
||||
int actualExp;
|
||||
DiyFp cachedPower = GetCachedPower10(dExp, &actualExp);
|
||||
if (actualExp != dExp) {
|
||||
static const DiyFp kPow10[] = {
|
||||
DiyFp(RAPIDJSON_UINT64_C2(0xa0000000, 00000000), -60), // 10^1
|
||||
DiyFp(RAPIDJSON_UINT64_C2(0xc8000000, 00000000), -57), // 10^2
|
||||
DiyFp(RAPIDJSON_UINT64_C2(0xfa000000, 00000000), -54), // 10^3
|
||||
DiyFp(RAPIDJSON_UINT64_C2(0x9c400000, 00000000), -50), // 10^4
|
||||
DiyFp(RAPIDJSON_UINT64_C2(0xc3500000, 00000000), -47), // 10^5
|
||||
DiyFp(RAPIDJSON_UINT64_C2(0xf4240000, 00000000), -44), // 10^6
|
||||
DiyFp(RAPIDJSON_UINT64_C2(0x98968000, 00000000), -40) // 10^7
|
||||
};
|
||||
int adjustment = dExp - actualExp - 1;
|
||||
RAPIDJSON_ASSERT(adjustment >= 0 && adjustment < 7);
|
||||
v = v * kPow10[adjustment];
|
||||
if (length + static_cast<unsigned>(adjustment)> 19u) // has more digits than decimal digits in 64-bit
|
||||
error += kUlp / 2;
|
||||
}
|
||||
|
||||
v = v * cachedPower;
|
||||
|
||||
error += kUlp + (error == 0 ? 0 : 1);
|
||||
|
||||
const int oldExp = v.e;
|
||||
v = v.Normalize();
|
||||
error <<= oldExp - v.e;
|
||||
|
||||
const unsigned effectiveSignificandSize = Double::EffectiveSignificandSize(64 + v.e);
|
||||
unsigned precisionSize = 64 - effectiveSignificandSize;
|
||||
if (precisionSize + kUlpShift >= 64) {
|
||||
unsigned scaleExp = (precisionSize + kUlpShift) - 63;
|
||||
v.f >>= scaleExp;
|
||||
v.e += scaleExp;
|
||||
error = (error >> scaleExp) + 1 + static_cast<int>(kUlp);
|
||||
precisionSize -= scaleExp;
|
||||
}
|
||||
|
||||
DiyFp rounded(v.f >> precisionSize, v.e + static_cast<int>(precisionSize));
|
||||
const uint64_t precisionBits = (v.f & ((uint64_t(1) << precisionSize) - 1)) * kUlp;
|
||||
const uint64_t halfWay = (uint64_t(1) << (precisionSize - 1)) * kUlp;
|
||||
if (precisionBits >= halfWay + static_cast<unsigned>(error)) {
|
||||
rounded.f++;
|
||||
if (rounded.f & (DiyFp::kDpHiddenBit << 1)) { // rounding overflows mantissa (issue #340)
|
||||
rounded.f >>= 1;
|
||||
rounded.e++;
|
||||
}
|
||||
}
|
||||
|
||||
*result = rounded.ToDouble();
|
||||
|
||||
return halfWay - static_cast<unsigned>(error) >= precisionBits || precisionBits >= halfWay + static_cast<unsigned>(error);
|
||||
}
|
||||
|
||||
inline double StrtodBigInteger(double approx, const char* decimals, size_t length, size_t decimalPosition, int exp) {
|
||||
const BigInteger dInt(decimals, length);
|
||||
const int dExp = static_cast<int>(decimalPosition) - static_cast<int>(length) + exp;
|
||||
Double a(approx);
|
||||
int cmp = CheckWithinHalfULP(a.Value(), dInt, dExp);
|
||||
if (cmp < 0)
|
||||
return a.Value(); // within half ULP
|
||||
else if (cmp == 0) {
|
||||
// Round towards even
|
||||
if (a.Significand() & 1)
|
||||
return a.NextPositiveDouble();
|
||||
else
|
||||
return a.Value();
|
||||
}
|
||||
else // adjustment
|
||||
return a.NextPositiveDouble();
|
||||
}
|
||||
|
||||
inline double StrtodFullPrecision(double d, int p, const char* decimals, size_t length, size_t decimalPosition, int exp) {
|
||||
RAPIDJSON_ASSERT(d >= 0.0);
|
||||
RAPIDJSON_ASSERT(length >= 1);
|
||||
|
||||
double result;
|
||||
if (StrtodFast(d, p, &result))
|
||||
return result;
|
||||
|
||||
// Trim leading zeros
|
||||
while (*decimals == '0' && length > 1) {
|
||||
length--;
|
||||
decimals++;
|
||||
decimalPosition--;
|
||||
}
|
||||
|
||||
// Trim trailing zeros
|
||||
while (decimals[length - 1] == '0' && length > 1) {
|
||||
length--;
|
||||
decimalPosition--;
|
||||
exp++;
|
||||
}
|
||||
|
||||
// Trim right-most digits
|
||||
const int kMaxDecimalDigit = 780;
|
||||
if (static_cast<int>(length) > kMaxDecimalDigit) {
|
||||
int delta = (static_cast<int>(length) - kMaxDecimalDigit);
|
||||
exp += delta;
|
||||
decimalPosition -= static_cast<unsigned>(delta);
|
||||
length = kMaxDecimalDigit;
|
||||
}
|
||||
|
||||
// If too small, underflow to zero
|
||||
if (int(length) + exp < -324)
|
||||
return 0.0;
|
||||
|
||||
if (StrtodDiyFp(decimals, length, decimalPosition, exp, &result))
|
||||
return result;
|
||||
|
||||
// Use approximation from StrtodDiyFp and make adjustment with BigInteger comparison
|
||||
return StrtodBigInteger(result, decimals, length, decimalPosition, exp);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_STRTOD_
|
|
@ -0,0 +1,46 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_INTERNAL_SWAP_H_
|
||||
#define RAPIDJSON_INTERNAL_SWAP_H_
|
||||
|
||||
#include "../rapidjson.h"
|
||||
|
||||
#if defined(__clang__)
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(c++98-compat)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
//! Custom swap() to avoid dependency on C++ <algorithm> header
|
||||
/*! \tparam T Type of the arguments to swap, should be instantiated with primitive C++ types only.
|
||||
\note This has the same semantics as std::swap().
|
||||
*/
|
||||
template <typename T>
|
||||
inline void Swap(T& a, T& b) RAPIDJSON_NOEXCEPT {
|
||||
T tmp = a;
|
||||
a = b;
|
||||
b = tmp;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#if defined(__clang__)
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_INTERNAL_SWAP_H_
|
|
@ -0,0 +1,115 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_ISTREAMWRAPPER_H_
|
||||
#define RAPIDJSON_ISTREAMWRAPPER_H_
|
||||
|
||||
#include "stream.h"
|
||||
#include <iosfwd>
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(padded)
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(4351) // new behavior: elements of array 'array' will be default initialized
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Wrapper of \c std::basic_istream into RapidJSON's Stream concept.
|
||||
/*!
|
||||
The classes can be wrapped including but not limited to:
|
||||
|
||||
- \c std::istringstream
|
||||
- \c std::stringstream
|
||||
- \c std::wistringstream
|
||||
- \c std::wstringstream
|
||||
- \c std::ifstream
|
||||
- \c std::fstream
|
||||
- \c std::wifstream
|
||||
- \c std::wfstream
|
||||
|
||||
\tparam StreamType Class derived from \c std::basic_istream.
|
||||
*/
|
||||
|
||||
template <typename StreamType>
|
||||
class BasicIStreamWrapper {
|
||||
public:
|
||||
typedef typename StreamType::char_type Ch;
|
||||
BasicIStreamWrapper(StreamType& stream) : stream_(stream), count_(), peekBuffer_() {}
|
||||
|
||||
Ch Peek() const {
|
||||
typename StreamType::int_type c = stream_.peek();
|
||||
return RAPIDJSON_LIKELY(c != StreamType::traits_type::eof()) ? static_cast<Ch>(c) : '\0';
|
||||
}
|
||||
|
||||
Ch Take() {
|
||||
typename StreamType::int_type c = stream_.get();
|
||||
if (RAPIDJSON_LIKELY(c != StreamType::traits_type::eof())) {
|
||||
count_++;
|
||||
return static_cast<Ch>(c);
|
||||
}
|
||||
else
|
||||
return '\0';
|
||||
}
|
||||
|
||||
// tellg() may return -1 when failed. So we count by ourself.
|
||||
size_t Tell() const { return count_; }
|
||||
|
||||
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
void Put(Ch) { RAPIDJSON_ASSERT(false); }
|
||||
void Flush() { RAPIDJSON_ASSERT(false); }
|
||||
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
// For encoding detection only.
|
||||
const Ch* Peek4() const {
|
||||
RAPIDJSON_ASSERT(sizeof(Ch) == 1); // Only usable for byte stream.
|
||||
int i;
|
||||
bool hasError = false;
|
||||
for (i = 0; i < 4; ++i) {
|
||||
typename StreamType::int_type c = stream_.get();
|
||||
if (c == StreamType::traits_type::eof()) {
|
||||
hasError = true;
|
||||
stream_.clear();
|
||||
break;
|
||||
}
|
||||
peekBuffer_[i] = static_cast<Ch>(c);
|
||||
}
|
||||
for (--i; i >= 0; --i)
|
||||
stream_.putback(peekBuffer_[i]);
|
||||
return !hasError ? peekBuffer_ : 0;
|
||||
}
|
||||
|
||||
private:
|
||||
BasicIStreamWrapper(const BasicIStreamWrapper&);
|
||||
BasicIStreamWrapper& operator=(const BasicIStreamWrapper&);
|
||||
|
||||
StreamType& stream_;
|
||||
size_t count_; //!< Number of characters read. Note:
|
||||
mutable Ch peekBuffer_[4];
|
||||
};
|
||||
|
||||
typedef BasicIStreamWrapper<std::istream> IStreamWrapper;
|
||||
typedef BasicIStreamWrapper<std::wistream> WIStreamWrapper;
|
||||
|
||||
#if defined(__clang__) || defined(_MSC_VER)
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_ISTREAMWRAPPER_H_
|
|
@ -0,0 +1,70 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_MEMORYBUFFER_H_
|
||||
#define RAPIDJSON_MEMORYBUFFER_H_
|
||||
|
||||
#include "stream.h"
|
||||
#include "internal/stack.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Represents an in-memory output byte stream.
|
||||
/*!
|
||||
This class is mainly for being wrapped by EncodedOutputStream or AutoUTFOutputStream.
|
||||
|
||||
It is similar to FileWriteBuffer but the destination is an in-memory buffer instead of a file.
|
||||
|
||||
Differences between MemoryBuffer and StringBuffer:
|
||||
1. StringBuffer has Encoding but MemoryBuffer is only a byte buffer.
|
||||
2. StringBuffer::GetString() returns a null-terminated string. MemoryBuffer::GetBuffer() returns a buffer without terminator.
|
||||
|
||||
\tparam Allocator type for allocating memory buffer.
|
||||
\note implements Stream concept
|
||||
*/
|
||||
template <typename Allocator = CrtAllocator>
|
||||
struct GenericMemoryBuffer {
|
||||
typedef char Ch; // byte
|
||||
|
||||
GenericMemoryBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {}
|
||||
|
||||
void Put(Ch c) { *stack_.template Push<Ch>() = c; }
|
||||
void Flush() {}
|
||||
|
||||
void Clear() { stack_.Clear(); }
|
||||
void ShrinkToFit() { stack_.ShrinkToFit(); }
|
||||
Ch* Push(size_t count) { return stack_.template Push<Ch>(count); }
|
||||
void Pop(size_t count) { stack_.template Pop<Ch>(count); }
|
||||
|
||||
const Ch* GetBuffer() const {
|
||||
return stack_.template Bottom<Ch>();
|
||||
}
|
||||
|
||||
size_t GetSize() const { return stack_.GetSize(); }
|
||||
|
||||
static const size_t kDefaultCapacity = 256;
|
||||
mutable internal::Stack<Allocator> stack_;
|
||||
};
|
||||
|
||||
typedef GenericMemoryBuffer<> MemoryBuffer;
|
||||
|
||||
//! Implement specialized version of PutN() with memset() for better performance.
|
||||
template<>
|
||||
inline void PutN(MemoryBuffer& memoryBuffer, char c, size_t n) {
|
||||
std::memset(memoryBuffer.stack_.Push<char>(n), c, n * sizeof(c));
|
||||
}
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_MEMORYBUFFER_H_
|
|
@ -0,0 +1,71 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_MEMORYSTREAM_H_
|
||||
#define RAPIDJSON_MEMORYSTREAM_H_
|
||||
|
||||
#include "stream.h"
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(unreachable-code)
|
||||
RAPIDJSON_DIAG_OFF(missing-noreturn)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Represents an in-memory input byte stream.
|
||||
/*!
|
||||
This class is mainly for being wrapped by EncodedInputStream or AutoUTFInputStream.
|
||||
|
||||
It is similar to FileReadBuffer but the source is an in-memory buffer instead of a file.
|
||||
|
||||
Differences between MemoryStream and StringStream:
|
||||
1. StringStream has encoding but MemoryStream is a byte stream.
|
||||
2. MemoryStream needs size of the source buffer and the buffer don't need to be null terminated. StringStream assume null-terminated string as source.
|
||||
3. MemoryStream supports Peek4() for encoding detection. StringStream is specified with an encoding so it should not have Peek4().
|
||||
\note implements Stream concept
|
||||
*/
|
||||
struct MemoryStream {
|
||||
typedef char Ch; // byte
|
||||
|
||||
MemoryStream(const Ch *src, size_t size) : src_(src), begin_(src), end_(src + size), size_(size) {}
|
||||
|
||||
Ch Peek() const { return RAPIDJSON_UNLIKELY(src_ == end_) ? '\0' : *src_; }
|
||||
Ch Take() { return RAPIDJSON_UNLIKELY(src_ == end_) ? '\0' : *src_++; }
|
||||
size_t Tell() const { return static_cast<size_t>(src_ - begin_); }
|
||||
|
||||
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
void Put(Ch) { RAPIDJSON_ASSERT(false); }
|
||||
void Flush() { RAPIDJSON_ASSERT(false); }
|
||||
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
// For encoding detection only.
|
||||
const Ch* Peek4() const {
|
||||
return Tell() + 4 <= size_ ? src_ : 0;
|
||||
}
|
||||
|
||||
const Ch* src_; //!< Current read position.
|
||||
const Ch* begin_; //!< Original head of the string.
|
||||
const Ch* end_; //!< End of stream.
|
||||
size_t size_; //!< Size of the stream.
|
||||
};
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_MEMORYBUFFER_H_
|
|
@ -0,0 +1,316 @@
|
|||
// ISO C9x compliant inttypes.h for Microsoft Visual Studio
|
||||
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
|
||||
//
|
||||
// Copyright (c) 2006-2013 Alexander Chemeris
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// 2. Redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
//
|
||||
// 3. Neither the name of the product nor the names of its contributors may
|
||||
// be used to endorse or promote products derived from this software
|
||||
// without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// The above software in this distribution may have been modified by
|
||||
// THL A29 Limited ("Tencent Modifications").
|
||||
// All Tencent Modifications are Copyright (C) 2015 THL A29 Limited.
|
||||
|
||||
#ifndef _MSC_VER // [
|
||||
#error "Use this header only with Microsoft Visual C++ compilers!"
|
||||
#endif // _MSC_VER ]
|
||||
|
||||
#ifndef _MSC_INTTYPES_H_ // [
|
||||
#define _MSC_INTTYPES_H_
|
||||
|
||||
#if _MSC_VER > 1000
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#include "stdint.h"
|
||||
|
||||
// miloyip: VC supports inttypes.h since VC2013
|
||||
#if _MSC_VER >= 1800
|
||||
#include <inttypes.h>
|
||||
#else
|
||||
|
||||
// 7.8 Format conversion of integer types
|
||||
|
||||
typedef struct {
|
||||
intmax_t quot;
|
||||
intmax_t rem;
|
||||
} imaxdiv_t;
|
||||
|
||||
// 7.8.1 Macros for format specifiers
|
||||
|
||||
#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198
|
||||
|
||||
// The fprintf macros for signed integers are:
|
||||
#define PRId8 "d"
|
||||
#define PRIi8 "i"
|
||||
#define PRIdLEAST8 "d"
|
||||
#define PRIiLEAST8 "i"
|
||||
#define PRIdFAST8 "d"
|
||||
#define PRIiFAST8 "i"
|
||||
|
||||
#define PRId16 "hd"
|
||||
#define PRIi16 "hi"
|
||||
#define PRIdLEAST16 "hd"
|
||||
#define PRIiLEAST16 "hi"
|
||||
#define PRIdFAST16 "hd"
|
||||
#define PRIiFAST16 "hi"
|
||||
|
||||
#define PRId32 "I32d"
|
||||
#define PRIi32 "I32i"
|
||||
#define PRIdLEAST32 "I32d"
|
||||
#define PRIiLEAST32 "I32i"
|
||||
#define PRIdFAST32 "I32d"
|
||||
#define PRIiFAST32 "I32i"
|
||||
|
||||
#define PRId64 "I64d"
|
||||
#define PRIi64 "I64i"
|
||||
#define PRIdLEAST64 "I64d"
|
||||
#define PRIiLEAST64 "I64i"
|
||||
#define PRIdFAST64 "I64d"
|
||||
#define PRIiFAST64 "I64i"
|
||||
|
||||
#define PRIdMAX "I64d"
|
||||
#define PRIiMAX "I64i"
|
||||
|
||||
#define PRIdPTR "Id"
|
||||
#define PRIiPTR "Ii"
|
||||
|
||||
// The fprintf macros for unsigned integers are:
|
||||
#define PRIo8 "o"
|
||||
#define PRIu8 "u"
|
||||
#define PRIx8 "x"
|
||||
#define PRIX8 "X"
|
||||
#define PRIoLEAST8 "o"
|
||||
#define PRIuLEAST8 "u"
|
||||
#define PRIxLEAST8 "x"
|
||||
#define PRIXLEAST8 "X"
|
||||
#define PRIoFAST8 "o"
|
||||
#define PRIuFAST8 "u"
|
||||
#define PRIxFAST8 "x"
|
||||
#define PRIXFAST8 "X"
|
||||
|
||||
#define PRIo16 "ho"
|
||||
#define PRIu16 "hu"
|
||||
#define PRIx16 "hx"
|
||||
#define PRIX16 "hX"
|
||||
#define PRIoLEAST16 "ho"
|
||||
#define PRIuLEAST16 "hu"
|
||||
#define PRIxLEAST16 "hx"
|
||||
#define PRIXLEAST16 "hX"
|
||||
#define PRIoFAST16 "ho"
|
||||
#define PRIuFAST16 "hu"
|
||||
#define PRIxFAST16 "hx"
|
||||
#define PRIXFAST16 "hX"
|
||||
|
||||
#define PRIo32 "I32o"
|
||||
#define PRIu32 "I32u"
|
||||
#define PRIx32 "I32x"
|
||||
#define PRIX32 "I32X"
|
||||
#define PRIoLEAST32 "I32o"
|
||||
#define PRIuLEAST32 "I32u"
|
||||
#define PRIxLEAST32 "I32x"
|
||||
#define PRIXLEAST32 "I32X"
|
||||
#define PRIoFAST32 "I32o"
|
||||
#define PRIuFAST32 "I32u"
|
||||
#define PRIxFAST32 "I32x"
|
||||
#define PRIXFAST32 "I32X"
|
||||
|
||||
#define PRIo64 "I64o"
|
||||
#define PRIu64 "I64u"
|
||||
#define PRIx64 "I64x"
|
||||
#define PRIX64 "I64X"
|
||||
#define PRIoLEAST64 "I64o"
|
||||
#define PRIuLEAST64 "I64u"
|
||||
#define PRIxLEAST64 "I64x"
|
||||
#define PRIXLEAST64 "I64X"
|
||||
#define PRIoFAST64 "I64o"
|
||||
#define PRIuFAST64 "I64u"
|
||||
#define PRIxFAST64 "I64x"
|
||||
#define PRIXFAST64 "I64X"
|
||||
|
||||
#define PRIoMAX "I64o"
|
||||
#define PRIuMAX "I64u"
|
||||
#define PRIxMAX "I64x"
|
||||
#define PRIXMAX "I64X"
|
||||
|
||||
#define PRIoPTR "Io"
|
||||
#define PRIuPTR "Iu"
|
||||
#define PRIxPTR "Ix"
|
||||
#define PRIXPTR "IX"
|
||||
|
||||
// The fscanf macros for signed integers are:
|
||||
#define SCNd8 "d"
|
||||
#define SCNi8 "i"
|
||||
#define SCNdLEAST8 "d"
|
||||
#define SCNiLEAST8 "i"
|
||||
#define SCNdFAST8 "d"
|
||||
#define SCNiFAST8 "i"
|
||||
|
||||
#define SCNd16 "hd"
|
||||
#define SCNi16 "hi"
|
||||
#define SCNdLEAST16 "hd"
|
||||
#define SCNiLEAST16 "hi"
|
||||
#define SCNdFAST16 "hd"
|
||||
#define SCNiFAST16 "hi"
|
||||
|
||||
#define SCNd32 "ld"
|
||||
#define SCNi32 "li"
|
||||
#define SCNdLEAST32 "ld"
|
||||
#define SCNiLEAST32 "li"
|
||||
#define SCNdFAST32 "ld"
|
||||
#define SCNiFAST32 "li"
|
||||
|
||||
#define SCNd64 "I64d"
|
||||
#define SCNi64 "I64i"
|
||||
#define SCNdLEAST64 "I64d"
|
||||
#define SCNiLEAST64 "I64i"
|
||||
#define SCNdFAST64 "I64d"
|
||||
#define SCNiFAST64 "I64i"
|
||||
|
||||
#define SCNdMAX "I64d"
|
||||
#define SCNiMAX "I64i"
|
||||
|
||||
#ifdef _WIN64 // [
|
||||
# define SCNdPTR "I64d"
|
||||
# define SCNiPTR "I64i"
|
||||
#else // _WIN64 ][
|
||||
# define SCNdPTR "ld"
|
||||
# define SCNiPTR "li"
|
||||
#endif // _WIN64 ]
|
||||
|
||||
// The fscanf macros for unsigned integers are:
|
||||
#define SCNo8 "o"
|
||||
#define SCNu8 "u"
|
||||
#define SCNx8 "x"
|
||||
#define SCNX8 "X"
|
||||
#define SCNoLEAST8 "o"
|
||||
#define SCNuLEAST8 "u"
|
||||
#define SCNxLEAST8 "x"
|
||||
#define SCNXLEAST8 "X"
|
||||
#define SCNoFAST8 "o"
|
||||
#define SCNuFAST8 "u"
|
||||
#define SCNxFAST8 "x"
|
||||
#define SCNXFAST8 "X"
|
||||
|
||||
#define SCNo16 "ho"
|
||||
#define SCNu16 "hu"
|
||||
#define SCNx16 "hx"
|
||||
#define SCNX16 "hX"
|
||||
#define SCNoLEAST16 "ho"
|
||||
#define SCNuLEAST16 "hu"
|
||||
#define SCNxLEAST16 "hx"
|
||||
#define SCNXLEAST16 "hX"
|
||||
#define SCNoFAST16 "ho"
|
||||
#define SCNuFAST16 "hu"
|
||||
#define SCNxFAST16 "hx"
|
||||
#define SCNXFAST16 "hX"
|
||||
|
||||
#define SCNo32 "lo"
|
||||
#define SCNu32 "lu"
|
||||
#define SCNx32 "lx"
|
||||
#define SCNX32 "lX"
|
||||
#define SCNoLEAST32 "lo"
|
||||
#define SCNuLEAST32 "lu"
|
||||
#define SCNxLEAST32 "lx"
|
||||
#define SCNXLEAST32 "lX"
|
||||
#define SCNoFAST32 "lo"
|
||||
#define SCNuFAST32 "lu"
|
||||
#define SCNxFAST32 "lx"
|
||||
#define SCNXFAST32 "lX"
|
||||
|
||||
#define SCNo64 "I64o"
|
||||
#define SCNu64 "I64u"
|
||||
#define SCNx64 "I64x"
|
||||
#define SCNX64 "I64X"
|
||||
#define SCNoLEAST64 "I64o"
|
||||
#define SCNuLEAST64 "I64u"
|
||||
#define SCNxLEAST64 "I64x"
|
||||
#define SCNXLEAST64 "I64X"
|
||||
#define SCNoFAST64 "I64o"
|
||||
#define SCNuFAST64 "I64u"
|
||||
#define SCNxFAST64 "I64x"
|
||||
#define SCNXFAST64 "I64X"
|
||||
|
||||
#define SCNoMAX "I64o"
|
||||
#define SCNuMAX "I64u"
|
||||
#define SCNxMAX "I64x"
|
||||
#define SCNXMAX "I64X"
|
||||
|
||||
#ifdef _WIN64 // [
|
||||
# define SCNoPTR "I64o"
|
||||
# define SCNuPTR "I64u"
|
||||
# define SCNxPTR "I64x"
|
||||
# define SCNXPTR "I64X"
|
||||
#else // _WIN64 ][
|
||||
# define SCNoPTR "lo"
|
||||
# define SCNuPTR "lu"
|
||||
# define SCNxPTR "lx"
|
||||
# define SCNXPTR "lX"
|
||||
#endif // _WIN64 ]
|
||||
|
||||
#endif // __STDC_FORMAT_MACROS ]
|
||||
|
||||
// 7.8.2 Functions for greatest-width integer types
|
||||
|
||||
// 7.8.2.1 The imaxabs function
|
||||
#define imaxabs _abs64
|
||||
|
||||
// 7.8.2.2 The imaxdiv function
|
||||
|
||||
// This is modified version of div() function from Microsoft's div.c found
|
||||
// in %MSVC.NET%\crt\src\div.c
|
||||
#ifdef STATIC_IMAXDIV // [
|
||||
static
|
||||
#else // STATIC_IMAXDIV ][
|
||||
_inline
|
||||
#endif // STATIC_IMAXDIV ]
|
||||
imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom)
|
||||
{
|
||||
imaxdiv_t result;
|
||||
|
||||
result.quot = numer / denom;
|
||||
result.rem = numer % denom;
|
||||
|
||||
if (numer < 0 && result.rem > 0) {
|
||||
// did division wrong; must fix up
|
||||
++result.quot;
|
||||
result.rem -= denom;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// 7.8.2.3 The strtoimax and strtoumax functions
|
||||
#define strtoimax _strtoi64
|
||||
#define strtoumax _strtoui64
|
||||
|
||||
// 7.8.2.4 The wcstoimax and wcstoumax functions
|
||||
#define wcstoimax _wcstoi64
|
||||
#define wcstoumax _wcstoui64
|
||||
|
||||
#endif // _MSC_VER >= 1800
|
||||
|
||||
#endif // _MSC_INTTYPES_H_ ]
|
|
@ -0,0 +1,300 @@
|
|||
// ISO C9x compliant stdint.h for Microsoft Visual Studio
|
||||
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
|
||||
//
|
||||
// Copyright (c) 2006-2013 Alexander Chemeris
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// 2. Redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
//
|
||||
// 3. Neither the name of the product nor the names of its contributors may
|
||||
// be used to endorse or promote products derived from this software
|
||||
// without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// The above software in this distribution may have been modified by
|
||||
// THL A29 Limited ("Tencent Modifications").
|
||||
// All Tencent Modifications are Copyright (C) 2015 THL A29 Limited.
|
||||
|
||||
#ifndef _MSC_VER // [
|
||||
#error "Use this header only with Microsoft Visual C++ compilers!"
|
||||
#endif // _MSC_VER ]
|
||||
|
||||
#ifndef _MSC_STDINT_H_ // [
|
||||
#define _MSC_STDINT_H_
|
||||
|
||||
#if _MSC_VER > 1000
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
// miloyip: Originally Visual Studio 2010 uses its own stdint.h. However it generates warning with INT64_C(), so change to use this file for vs2010.
|
||||
#if _MSC_VER >= 1600 // [
|
||||
#include <stdint.h>
|
||||
|
||||
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
|
||||
|
||||
#undef INT8_C
|
||||
#undef INT16_C
|
||||
#undef INT32_C
|
||||
#undef INT64_C
|
||||
#undef UINT8_C
|
||||
#undef UINT16_C
|
||||
#undef UINT32_C
|
||||
#undef UINT64_C
|
||||
|
||||
// 7.18.4.1 Macros for minimum-width integer constants
|
||||
|
||||
#define INT8_C(val) val##i8
|
||||
#define INT16_C(val) val##i16
|
||||
#define INT32_C(val) val##i32
|
||||
#define INT64_C(val) val##i64
|
||||
|
||||
#define UINT8_C(val) val##ui8
|
||||
#define UINT16_C(val) val##ui16
|
||||
#define UINT32_C(val) val##ui32
|
||||
#define UINT64_C(val) val##ui64
|
||||
|
||||
// 7.18.4.2 Macros for greatest-width integer constants
|
||||
// These #ifndef's are needed to prevent collisions with <boost/cstdint.hpp>.
|
||||
// Check out Issue 9 for the details.
|
||||
#ifndef INTMAX_C // [
|
||||
# define INTMAX_C INT64_C
|
||||
#endif // INTMAX_C ]
|
||||
#ifndef UINTMAX_C // [
|
||||
# define UINTMAX_C UINT64_C
|
||||
#endif // UINTMAX_C ]
|
||||
|
||||
#endif // __STDC_CONSTANT_MACROS ]
|
||||
|
||||
#else // ] _MSC_VER >= 1700 [
|
||||
|
||||
#include <limits.h>
|
||||
|
||||
// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
|
||||
// compiling for ARM we have to wrap <wchar.h> include with 'extern "C++" {}'
|
||||
// or compiler would give many errors like this:
|
||||
// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
|
||||
#if defined(__cplusplus) && !defined(_M_ARM)
|
||||
extern "C" {
|
||||
#endif
|
||||
# include <wchar.h>
|
||||
#if defined(__cplusplus) && !defined(_M_ARM)
|
||||
}
|
||||
#endif
|
||||
|
||||
// Define _W64 macros to mark types changing their size, like intptr_t.
|
||||
#ifndef _W64
|
||||
# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
|
||||
# define _W64 __w64
|
||||
# else
|
||||
# define _W64
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
// 7.18.1 Integer types
|
||||
|
||||
// 7.18.1.1 Exact-width integer types
|
||||
|
||||
// Visual Studio 6 and Embedded Visual C++ 4 doesn't
|
||||
// realize that, e.g. char has the same size as __int8
|
||||
// so we give up on __intX for them.
|
||||
#if (_MSC_VER < 1300)
|
||||
typedef signed char int8_t;
|
||||
typedef signed short int16_t;
|
||||
typedef signed int int32_t;
|
||||
typedef unsigned char uint8_t;
|
||||
typedef unsigned short uint16_t;
|
||||
typedef unsigned int uint32_t;
|
||||
#else
|
||||
typedef signed __int8 int8_t;
|
||||
typedef signed __int16 int16_t;
|
||||
typedef signed __int32 int32_t;
|
||||
typedef unsigned __int8 uint8_t;
|
||||
typedef unsigned __int16 uint16_t;
|
||||
typedef unsigned __int32 uint32_t;
|
||||
#endif
|
||||
typedef signed __int64 int64_t;
|
||||
typedef unsigned __int64 uint64_t;
|
||||
|
||||
|
||||
// 7.18.1.2 Minimum-width integer types
|
||||
typedef int8_t int_least8_t;
|
||||
typedef int16_t int_least16_t;
|
||||
typedef int32_t int_least32_t;
|
||||
typedef int64_t int_least64_t;
|
||||
typedef uint8_t uint_least8_t;
|
||||
typedef uint16_t uint_least16_t;
|
||||
typedef uint32_t uint_least32_t;
|
||||
typedef uint64_t uint_least64_t;
|
||||
|
||||
// 7.18.1.3 Fastest minimum-width integer types
|
||||
typedef int8_t int_fast8_t;
|
||||
typedef int16_t int_fast16_t;
|
||||
typedef int32_t int_fast32_t;
|
||||
typedef int64_t int_fast64_t;
|
||||
typedef uint8_t uint_fast8_t;
|
||||
typedef uint16_t uint_fast16_t;
|
||||
typedef uint32_t uint_fast32_t;
|
||||
typedef uint64_t uint_fast64_t;
|
||||
|
||||
// 7.18.1.4 Integer types capable of holding object pointers
|
||||
#ifdef _WIN64 // [
|
||||
typedef signed __int64 intptr_t;
|
||||
typedef unsigned __int64 uintptr_t;
|
||||
#else // _WIN64 ][
|
||||
typedef _W64 signed int intptr_t;
|
||||
typedef _W64 unsigned int uintptr_t;
|
||||
#endif // _WIN64 ]
|
||||
|
||||
// 7.18.1.5 Greatest-width integer types
|
||||
typedef int64_t intmax_t;
|
||||
typedef uint64_t uintmax_t;
|
||||
|
||||
|
||||
// 7.18.2 Limits of specified-width integer types
|
||||
|
||||
#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
|
||||
|
||||
// 7.18.2.1 Limits of exact-width integer types
|
||||
#define INT8_MIN ((int8_t)_I8_MIN)
|
||||
#define INT8_MAX _I8_MAX
|
||||
#define INT16_MIN ((int16_t)_I16_MIN)
|
||||
#define INT16_MAX _I16_MAX
|
||||
#define INT32_MIN ((int32_t)_I32_MIN)
|
||||
#define INT32_MAX _I32_MAX
|
||||
#define INT64_MIN ((int64_t)_I64_MIN)
|
||||
#define INT64_MAX _I64_MAX
|
||||
#define UINT8_MAX _UI8_MAX
|
||||
#define UINT16_MAX _UI16_MAX
|
||||
#define UINT32_MAX _UI32_MAX
|
||||
#define UINT64_MAX _UI64_MAX
|
||||
|
||||
// 7.18.2.2 Limits of minimum-width integer types
|
||||
#define INT_LEAST8_MIN INT8_MIN
|
||||
#define INT_LEAST8_MAX INT8_MAX
|
||||
#define INT_LEAST16_MIN INT16_MIN
|
||||
#define INT_LEAST16_MAX INT16_MAX
|
||||
#define INT_LEAST32_MIN INT32_MIN
|
||||
#define INT_LEAST32_MAX INT32_MAX
|
||||
#define INT_LEAST64_MIN INT64_MIN
|
||||
#define INT_LEAST64_MAX INT64_MAX
|
||||
#define UINT_LEAST8_MAX UINT8_MAX
|
||||
#define UINT_LEAST16_MAX UINT16_MAX
|
||||
#define UINT_LEAST32_MAX UINT32_MAX
|
||||
#define UINT_LEAST64_MAX UINT64_MAX
|
||||
|
||||
// 7.18.2.3 Limits of fastest minimum-width integer types
|
||||
#define INT_FAST8_MIN INT8_MIN
|
||||
#define INT_FAST8_MAX INT8_MAX
|
||||
#define INT_FAST16_MIN INT16_MIN
|
||||
#define INT_FAST16_MAX INT16_MAX
|
||||
#define INT_FAST32_MIN INT32_MIN
|
||||
#define INT_FAST32_MAX INT32_MAX
|
||||
#define INT_FAST64_MIN INT64_MIN
|
||||
#define INT_FAST64_MAX INT64_MAX
|
||||
#define UINT_FAST8_MAX UINT8_MAX
|
||||
#define UINT_FAST16_MAX UINT16_MAX
|
||||
#define UINT_FAST32_MAX UINT32_MAX
|
||||
#define UINT_FAST64_MAX UINT64_MAX
|
||||
|
||||
// 7.18.2.4 Limits of integer types capable of holding object pointers
|
||||
#ifdef _WIN64 // [
|
||||
# define INTPTR_MIN INT64_MIN
|
||||
# define INTPTR_MAX INT64_MAX
|
||||
# define UINTPTR_MAX UINT64_MAX
|
||||
#else // _WIN64 ][
|
||||
# define INTPTR_MIN INT32_MIN
|
||||
# define INTPTR_MAX INT32_MAX
|
||||
# define UINTPTR_MAX UINT32_MAX
|
||||
#endif // _WIN64 ]
|
||||
|
||||
// 7.18.2.5 Limits of greatest-width integer types
|
||||
#define INTMAX_MIN INT64_MIN
|
||||
#define INTMAX_MAX INT64_MAX
|
||||
#define UINTMAX_MAX UINT64_MAX
|
||||
|
||||
// 7.18.3 Limits of other integer types
|
||||
|
||||
#ifdef _WIN64 // [
|
||||
# define PTRDIFF_MIN _I64_MIN
|
||||
# define PTRDIFF_MAX _I64_MAX
|
||||
#else // _WIN64 ][
|
||||
# define PTRDIFF_MIN _I32_MIN
|
||||
# define PTRDIFF_MAX _I32_MAX
|
||||
#endif // _WIN64 ]
|
||||
|
||||
#define SIG_ATOMIC_MIN INT_MIN
|
||||
#define SIG_ATOMIC_MAX INT_MAX
|
||||
|
||||
#ifndef SIZE_MAX // [
|
||||
# ifdef _WIN64 // [
|
||||
# define SIZE_MAX _UI64_MAX
|
||||
# else // _WIN64 ][
|
||||
# define SIZE_MAX _UI32_MAX
|
||||
# endif // _WIN64 ]
|
||||
#endif // SIZE_MAX ]
|
||||
|
||||
// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
|
||||
#ifndef WCHAR_MIN // [
|
||||
# define WCHAR_MIN 0
|
||||
#endif // WCHAR_MIN ]
|
||||
#ifndef WCHAR_MAX // [
|
||||
# define WCHAR_MAX _UI16_MAX
|
||||
#endif // WCHAR_MAX ]
|
||||
|
||||
#define WINT_MIN 0
|
||||
#define WINT_MAX _UI16_MAX
|
||||
|
||||
#endif // __STDC_LIMIT_MACROS ]
|
||||
|
||||
|
||||
// 7.18.4 Limits of other integer types
|
||||
|
||||
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
|
||||
|
||||
// 7.18.4.1 Macros for minimum-width integer constants
|
||||
|
||||
#define INT8_C(val) val##i8
|
||||
#define INT16_C(val) val##i16
|
||||
#define INT32_C(val) val##i32
|
||||
#define INT64_C(val) val##i64
|
||||
|
||||
#define UINT8_C(val) val##ui8
|
||||
#define UINT16_C(val) val##ui16
|
||||
#define UINT32_C(val) val##ui32
|
||||
#define UINT64_C(val) val##ui64
|
||||
|
||||
// 7.18.4.2 Macros for greatest-width integer constants
|
||||
// These #ifndef's are needed to prevent collisions with <boost/cstdint.hpp>.
|
||||
// Check out Issue 9 for the details.
|
||||
#ifndef INTMAX_C // [
|
||||
# define INTMAX_C INT64_C
|
||||
#endif // INTMAX_C ]
|
||||
#ifndef UINTMAX_C // [
|
||||
# define UINTMAX_C UINT64_C
|
||||
#endif // UINTMAX_C ]
|
||||
|
||||
#endif // __STDC_CONSTANT_MACROS ]
|
||||
|
||||
#endif // _MSC_VER >= 1600 ]
|
||||
|
||||
#endif // _MSC_STDINT_H_ ]
|
|
@ -0,0 +1,81 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_OSTREAMWRAPPER_H_
|
||||
#define RAPIDJSON_OSTREAMWRAPPER_H_
|
||||
|
||||
#include "stream.h"
|
||||
#include <iosfwd>
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(padded)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Wrapper of \c std::basic_ostream into RapidJSON's Stream concept.
|
||||
/*!
|
||||
The classes can be wrapped including but not limited to:
|
||||
|
||||
- \c std::ostringstream
|
||||
- \c std::stringstream
|
||||
- \c std::wpstringstream
|
||||
- \c std::wstringstream
|
||||
- \c std::ifstream
|
||||
- \c std::fstream
|
||||
- \c std::wofstream
|
||||
- \c std::wfstream
|
||||
|
||||
\tparam StreamType Class derived from \c std::basic_ostream.
|
||||
*/
|
||||
|
||||
template <typename StreamType>
|
||||
class BasicOStreamWrapper {
|
||||
public:
|
||||
typedef typename StreamType::char_type Ch;
|
||||
BasicOStreamWrapper(StreamType& stream) : stream_(stream) {}
|
||||
|
||||
void Put(Ch c) {
|
||||
stream_.put(c);
|
||||
}
|
||||
|
||||
void Flush() {
|
||||
stream_.flush();
|
||||
}
|
||||
|
||||
// Not implemented
|
||||
char Peek() const { RAPIDJSON_ASSERT(false); return 0; }
|
||||
char Take() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; }
|
||||
char* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
size_t PutEnd(char*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
private:
|
||||
BasicOStreamWrapper(const BasicOStreamWrapper&);
|
||||
BasicOStreamWrapper& operator=(const BasicOStreamWrapper&);
|
||||
|
||||
StreamType& stream_;
|
||||
};
|
||||
|
||||
typedef BasicOStreamWrapper<std::ostream> OStreamWrapper;
|
||||
typedef BasicOStreamWrapper<std::wostream> WOStreamWrapper;
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_OSTREAMWRAPPER_H_
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,261 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_PRETTYWRITER_H_
|
||||
#define RAPIDJSON_PRETTYWRITER_H_
|
||||
|
||||
#include "writer.h"
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(effc++)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Combination of PrettyWriter format flags.
|
||||
/*! \see PrettyWriter::SetFormatOptions
|
||||
*/
|
||||
enum PrettyFormatOptions {
|
||||
kFormatDefault = 0, //!< Default pretty formatting.
|
||||
kFormatSingleLineArray = 1 //!< Format arrays on a single line.
|
||||
};
|
||||
|
||||
//! Writer with indentation and spacing.
|
||||
/*!
|
||||
\tparam OutputStream Type of ouptut os.
|
||||
\tparam SourceEncoding Encoding of source string.
|
||||
\tparam TargetEncoding Encoding of output stream.
|
||||
\tparam StackAllocator Type of allocator for allocating memory of stack.
|
||||
*/
|
||||
template<typename OutputStream, typename SourceEncoding = UTF8<>, typename TargetEncoding = UTF8<>, typename StackAllocator = CrtAllocator, unsigned writeFlags = kWriteDefaultFlags>
|
||||
class PrettyWriter : public Writer<OutputStream, SourceEncoding, TargetEncoding, StackAllocator, writeFlags> {
|
||||
public:
|
||||
typedef Writer<OutputStream, SourceEncoding, TargetEncoding, StackAllocator> Base;
|
||||
typedef typename Base::Ch Ch;
|
||||
|
||||
//! Constructor
|
||||
/*! \param os Output stream.
|
||||
\param allocator User supplied allocator. If it is null, it will create a private one.
|
||||
\param levelDepth Initial capacity of stack.
|
||||
*/
|
||||
explicit PrettyWriter(OutputStream& os, StackAllocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) :
|
||||
Base(os, allocator, levelDepth), indentChar_(' '), indentCharCount_(4), formatOptions_(kFormatDefault) {}
|
||||
|
||||
|
||||
explicit PrettyWriter(StackAllocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) :
|
||||
Base(allocator, levelDepth), indentChar_(' '), indentCharCount_(4) {}
|
||||
|
||||
//! Set custom indentation.
|
||||
/*! \param indentChar Character for indentation. Must be whitespace character (' ', '\\t', '\\n', '\\r').
|
||||
\param indentCharCount Number of indent characters for each indentation level.
|
||||
\note The default indentation is 4 spaces.
|
||||
*/
|
||||
PrettyWriter& SetIndent(Ch indentChar, unsigned indentCharCount) {
|
||||
RAPIDJSON_ASSERT(indentChar == ' ' || indentChar == '\t' || indentChar == '\n' || indentChar == '\r');
|
||||
indentChar_ = indentChar;
|
||||
indentCharCount_ = indentCharCount;
|
||||
return *this;
|
||||
}
|
||||
|
||||
//! Set pretty writer formatting options.
|
||||
/*! \param options Formatting options.
|
||||
*/
|
||||
PrettyWriter& SetFormatOptions(PrettyFormatOptions options) {
|
||||
formatOptions_ = options;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/*! @name Implementation of Handler
|
||||
\see Handler
|
||||
*/
|
||||
//@{
|
||||
|
||||
bool Null() { PrettyPrefix(kNullType); return Base::WriteNull(); }
|
||||
bool Bool(bool b) { PrettyPrefix(b ? kTrueType : kFalseType); return Base::WriteBool(b); }
|
||||
bool Int(int i) { PrettyPrefix(kNumberType); return Base::WriteInt(i); }
|
||||
bool Uint(unsigned u) { PrettyPrefix(kNumberType); return Base::WriteUint(u); }
|
||||
bool Int64(int64_t i64) { PrettyPrefix(kNumberType); return Base::WriteInt64(i64); }
|
||||
bool Uint64(uint64_t u64) { PrettyPrefix(kNumberType); return Base::WriteUint64(u64); }
|
||||
bool Double(double d) { PrettyPrefix(kNumberType); return Base::WriteDouble(d); }
|
||||
|
||||
bool RawNumber(const Ch* str, SizeType length, bool copy = false) {
|
||||
RAPIDJSON_ASSERT(str != 0);
|
||||
(void)copy;
|
||||
PrettyPrefix(kNumberType);
|
||||
return Base::WriteString(str, length);
|
||||
}
|
||||
|
||||
bool String(const Ch* str, SizeType length, bool copy = false) {
|
||||
RAPIDJSON_ASSERT(str != 0);
|
||||
(void)copy;
|
||||
PrettyPrefix(kStringType);
|
||||
return Base::WriteString(str, length);
|
||||
}
|
||||
|
||||
#if RAPIDJSON_HAS_STDSTRING
|
||||
bool String(const std::basic_string<Ch>& str) {
|
||||
return String(str.data(), SizeType(str.size()));
|
||||
}
|
||||
#endif
|
||||
|
||||
bool StartObject() {
|
||||
PrettyPrefix(kObjectType);
|
||||
new (Base::level_stack_.template Push<typename Base::Level>()) typename Base::Level(false);
|
||||
return Base::WriteStartObject();
|
||||
}
|
||||
|
||||
bool Key(const Ch* str, SizeType length, bool copy = false) { return String(str, length, copy); }
|
||||
|
||||
#if RAPIDJSON_HAS_STDSTRING
|
||||
bool Key(const std::basic_string<Ch>& str) {
|
||||
return Key(str.data(), SizeType(str.size()));
|
||||
}
|
||||
#endif
|
||||
|
||||
bool EndObject(SizeType memberCount = 0) {
|
||||
(void)memberCount;
|
||||
RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level));
|
||||
RAPIDJSON_ASSERT(!Base::level_stack_.template Top<typename Base::Level>()->inArray);
|
||||
bool empty = Base::level_stack_.template Pop<typename Base::Level>(1)->valueCount == 0;
|
||||
|
||||
if (!empty) {
|
||||
Base::os_->Put('\n');
|
||||
WriteIndent();
|
||||
}
|
||||
bool ret = Base::WriteEndObject();
|
||||
(void)ret;
|
||||
RAPIDJSON_ASSERT(ret == true);
|
||||
if (Base::level_stack_.Empty()) // end of json text
|
||||
Base::os_->Flush();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StartArray() {
|
||||
PrettyPrefix(kArrayType);
|
||||
new (Base::level_stack_.template Push<typename Base::Level>()) typename Base::Level(true);
|
||||
return Base::WriteStartArray();
|
||||
}
|
||||
|
||||
bool EndArray(SizeType memberCount = 0) {
|
||||
(void)memberCount;
|
||||
RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level));
|
||||
RAPIDJSON_ASSERT(Base::level_stack_.template Top<typename Base::Level>()->inArray);
|
||||
bool empty = Base::level_stack_.template Pop<typename Base::Level>(1)->valueCount == 0;
|
||||
|
||||
if (!empty && !(formatOptions_ & kFormatSingleLineArray)) {
|
||||
Base::os_->Put('\n');
|
||||
WriteIndent();
|
||||
}
|
||||
bool ret = Base::WriteEndArray();
|
||||
(void)ret;
|
||||
RAPIDJSON_ASSERT(ret == true);
|
||||
if (Base::level_stack_.Empty()) // end of json text
|
||||
Base::os_->Flush();
|
||||
return true;
|
||||
}
|
||||
|
||||
//@}
|
||||
|
||||
/*! @name Convenience extensions */
|
||||
//@{
|
||||
|
||||
//! Simpler but slower overload.
|
||||
bool String(const Ch* str) { return String(str, internal::StrLen(str)); }
|
||||
bool Key(const Ch* str) { return Key(str, internal::StrLen(str)); }
|
||||
|
||||
//@}
|
||||
|
||||
//! Write a raw JSON value.
|
||||
/*!
|
||||
For user to write a stringified JSON as a value.
|
||||
|
||||
\param json A well-formed JSON value. It should not contain null character within [0, length - 1] range.
|
||||
\param length Length of the json.
|
||||
\param type Type of the root of json.
|
||||
\note When using PrettyWriter::RawValue(), the result json may not be indented correctly.
|
||||
*/
|
||||
bool RawValue(const Ch* json, size_t length, Type type) {
|
||||
RAPIDJSON_ASSERT(json != 0);
|
||||
PrettyPrefix(type);
|
||||
return Base::WriteRawValue(json, length);
|
||||
}
|
||||
|
||||
protected:
|
||||
void PrettyPrefix(Type type) {
|
||||
(void)type;
|
||||
if (Base::level_stack_.GetSize() != 0) { // this value is not at root
|
||||
typename Base::Level* level = Base::level_stack_.template Top<typename Base::Level>();
|
||||
|
||||
if (level->inArray) {
|
||||
if (level->valueCount > 0) {
|
||||
Base::os_->Put(','); // add comma if it is not the first element in array
|
||||
if (formatOptions_ & kFormatSingleLineArray)
|
||||
Base::os_->Put(' ');
|
||||
}
|
||||
|
||||
if (!(formatOptions_ & kFormatSingleLineArray)) {
|
||||
Base::os_->Put('\n');
|
||||
WriteIndent();
|
||||
}
|
||||
}
|
||||
else { // in object
|
||||
if (level->valueCount > 0) {
|
||||
if (level->valueCount % 2 == 0) {
|
||||
Base::os_->Put(',');
|
||||
Base::os_->Put('\n');
|
||||
}
|
||||
else {
|
||||
Base::os_->Put(':');
|
||||
Base::os_->Put(' ');
|
||||
}
|
||||
}
|
||||
else
|
||||
Base::os_->Put('\n');
|
||||
|
||||
if (level->valueCount % 2 == 0)
|
||||
WriteIndent();
|
||||
}
|
||||
if (!level->inArray && level->valueCount % 2 == 0)
|
||||
RAPIDJSON_ASSERT(type == kStringType); // if it's in object, then even number should be a name
|
||||
level->valueCount++;
|
||||
}
|
||||
else {
|
||||
RAPIDJSON_ASSERT(!Base::hasRoot_); // Should only has one and only one root.
|
||||
Base::hasRoot_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
void WriteIndent() {
|
||||
size_t count = (Base::level_stack_.GetSize() / sizeof(typename Base::Level)) * indentCharCount_;
|
||||
PutN(*Base::os_, static_cast<typename TargetEncoding::Ch>(indentChar_), count);
|
||||
}
|
||||
|
||||
Ch indentChar_;
|
||||
unsigned indentCharCount_;
|
||||
PrettyFormatOptions formatOptions_;
|
||||
|
||||
private:
|
||||
// Prohibit copy constructor & assignment operator.
|
||||
PrettyWriter(const PrettyWriter&);
|
||||
PrettyWriter& operator=(const PrettyWriter&);
|
||||
};
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_RAPIDJSON_H_
|
|
@ -0,0 +1,615 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_RAPIDJSON_H_
|
||||
#define RAPIDJSON_RAPIDJSON_H_
|
||||
|
||||
/*!\file rapidjson.h
|
||||
\brief common definitions and configuration
|
||||
|
||||
\see RAPIDJSON_CONFIG
|
||||
*/
|
||||
|
||||
/*! \defgroup RAPIDJSON_CONFIG RapidJSON configuration
|
||||
\brief Configuration macros for library features
|
||||
|
||||
Some RapidJSON features are configurable to adapt the library to a wide
|
||||
variety of platforms, environments and usage scenarios. Most of the
|
||||
features can be configured in terms of overriden or predefined
|
||||
preprocessor macros at compile-time.
|
||||
|
||||
Some additional customization is available in the \ref RAPIDJSON_ERRORS APIs.
|
||||
|
||||
\note These macros should be given on the compiler command-line
|
||||
(where applicable) to avoid inconsistent values when compiling
|
||||
different translation units of a single application.
|
||||
*/
|
||||
|
||||
#include <cstdlib> // malloc(), realloc(), free(), size_t
|
||||
#include <cstring> // memset(), memcpy(), memmove(), memcmp()
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_VERSION_STRING
|
||||
//
|
||||
// ALWAYS synchronize the following 3 macros with corresponding variables in /CMakeLists.txt.
|
||||
//
|
||||
|
||||
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
|
||||
// token stringification
|
||||
#define RAPIDJSON_STRINGIFY(x) RAPIDJSON_DO_STRINGIFY(x)
|
||||
#define RAPIDJSON_DO_STRINGIFY(x) #x
|
||||
//!@endcond
|
||||
|
||||
/*! \def RAPIDJSON_MAJOR_VERSION
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief Major version of RapidJSON in integer.
|
||||
*/
|
||||
/*! \def RAPIDJSON_MINOR_VERSION
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief Minor version of RapidJSON in integer.
|
||||
*/
|
||||
/*! \def RAPIDJSON_PATCH_VERSION
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief Patch version of RapidJSON in integer.
|
||||
*/
|
||||
/*! \def RAPIDJSON_VERSION_STRING
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief Version of RapidJSON in "<major>.<minor>.<patch>" string format.
|
||||
*/
|
||||
#define RAPIDJSON_MAJOR_VERSION 1
|
||||
#define RAPIDJSON_MINOR_VERSION 1
|
||||
#define RAPIDJSON_PATCH_VERSION 0
|
||||
#define RAPIDJSON_VERSION_STRING \
|
||||
RAPIDJSON_STRINGIFY(RAPIDJSON_MAJOR_VERSION.RAPIDJSON_MINOR_VERSION.RAPIDJSON_PATCH_VERSION)
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_NAMESPACE_(BEGIN|END)
|
||||
/*! \def RAPIDJSON_NAMESPACE
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief provide custom rapidjson namespace
|
||||
|
||||
In order to avoid symbol clashes and/or "One Definition Rule" errors
|
||||
between multiple inclusions of (different versions of) RapidJSON in
|
||||
a single binary, users can customize the name of the main RapidJSON
|
||||
namespace.
|
||||
|
||||
In case of a single nesting level, defining \c RAPIDJSON_NAMESPACE
|
||||
to a custom name (e.g. \c MyRapidJSON) is sufficient. If multiple
|
||||
levels are needed, both \ref RAPIDJSON_NAMESPACE_BEGIN and \ref
|
||||
RAPIDJSON_NAMESPACE_END need to be defined as well:
|
||||
|
||||
\code
|
||||
// in some .cpp file
|
||||
#define RAPIDJSON_NAMESPACE my::rapidjson
|
||||
#define RAPIDJSON_NAMESPACE_BEGIN namespace my { namespace rapidjson {
|
||||
#define RAPIDJSON_NAMESPACE_END } }
|
||||
#include "rapidjson/..."
|
||||
\endcode
|
||||
|
||||
\see rapidjson
|
||||
*/
|
||||
/*! \def RAPIDJSON_NAMESPACE_BEGIN
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief provide custom rapidjson namespace (opening expression)
|
||||
\see RAPIDJSON_NAMESPACE
|
||||
*/
|
||||
/*! \def RAPIDJSON_NAMESPACE_END
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief provide custom rapidjson namespace (closing expression)
|
||||
\see RAPIDJSON_NAMESPACE
|
||||
*/
|
||||
#ifndef RAPIDJSON_NAMESPACE
|
||||
#define RAPIDJSON_NAMESPACE rapidjson
|
||||
#endif
|
||||
#ifndef RAPIDJSON_NAMESPACE_BEGIN
|
||||
#define RAPIDJSON_NAMESPACE_BEGIN namespace RAPIDJSON_NAMESPACE {
|
||||
#endif
|
||||
#ifndef RAPIDJSON_NAMESPACE_END
|
||||
#define RAPIDJSON_NAMESPACE_END }
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_HAS_STDSTRING
|
||||
|
||||
#ifndef RAPIDJSON_HAS_STDSTRING
|
||||
#ifdef RAPIDJSON_DOXYGEN_RUNNING
|
||||
#define RAPIDJSON_HAS_STDSTRING 1 // force generation of documentation
|
||||
#else
|
||||
#define RAPIDJSON_HAS_STDSTRING 0 // no std::string support by default
|
||||
#endif
|
||||
/*! \def RAPIDJSON_HAS_STDSTRING
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief Enable RapidJSON support for \c std::string
|
||||
|
||||
By defining this preprocessor symbol to \c 1, several convenience functions for using
|
||||
\ref rapidjson::GenericValue with \c std::string are enabled, especially
|
||||
for construction and comparison.
|
||||
|
||||
\hideinitializer
|
||||
*/
|
||||
#endif // !defined(RAPIDJSON_HAS_STDSTRING)
|
||||
|
||||
#if RAPIDJSON_HAS_STDSTRING
|
||||
#include <string>
|
||||
#endif // RAPIDJSON_HAS_STDSTRING
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_NO_INT64DEFINE
|
||||
|
||||
/*! \def RAPIDJSON_NO_INT64DEFINE
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief Use external 64-bit integer types.
|
||||
|
||||
RapidJSON requires the 64-bit integer types \c int64_t and \c uint64_t types
|
||||
to be available at global scope.
|
||||
|
||||
If users have their own definition, define RAPIDJSON_NO_INT64DEFINE to
|
||||
prevent RapidJSON from defining its own types.
|
||||
*/
|
||||
#ifndef RAPIDJSON_NO_INT64DEFINE
|
||||
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
|
||||
#if defined(_MSC_VER) && (_MSC_VER < 1800) // Visual Studio 2013
|
||||
#include "msinttypes/stdint.h"
|
||||
#include "msinttypes/inttypes.h"
|
||||
#else
|
||||
// Other compilers should have this.
|
||||
#include <stdint.h>
|
||||
#include <inttypes.h>
|
||||
#endif
|
||||
//!@endcond
|
||||
#ifdef RAPIDJSON_DOXYGEN_RUNNING
|
||||
#define RAPIDJSON_NO_INT64DEFINE
|
||||
#endif
|
||||
#endif // RAPIDJSON_NO_INT64TYPEDEF
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_FORCEINLINE
|
||||
|
||||
#ifndef RAPIDJSON_FORCEINLINE
|
||||
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
|
||||
#if defined(_MSC_VER) && defined(NDEBUG)
|
||||
#define RAPIDJSON_FORCEINLINE __forceinline
|
||||
#elif defined(__GNUC__) && __GNUC__ >= 4 && defined(NDEBUG)
|
||||
#define RAPIDJSON_FORCEINLINE __attribute__((always_inline))
|
||||
#else
|
||||
#define RAPIDJSON_FORCEINLINE
|
||||
#endif
|
||||
//!@endcond
|
||||
#endif // RAPIDJSON_FORCEINLINE
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_ENDIAN
|
||||
#define RAPIDJSON_LITTLEENDIAN 0 //!< Little endian machine
|
||||
#define RAPIDJSON_BIGENDIAN 1 //!< Big endian machine
|
||||
|
||||
//! Endianness of the machine.
|
||||
/*!
|
||||
\def RAPIDJSON_ENDIAN
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
|
||||
GCC 4.6 provided macro for detecting endianness of the target machine. But other
|
||||
compilers may not have this. User can define RAPIDJSON_ENDIAN to either
|
||||
\ref RAPIDJSON_LITTLEENDIAN or \ref RAPIDJSON_BIGENDIAN.
|
||||
|
||||
Default detection implemented with reference to
|
||||
\li https://gcc.gnu.org/onlinedocs/gcc-4.6.0/cpp/Common-Predefined-Macros.html
|
||||
\li http://www.boost.org/doc/libs/1_42_0/boost/detail/endian.hpp
|
||||
*/
|
||||
#ifndef RAPIDJSON_ENDIAN
|
||||
// Detect with GCC 4.6's macro
|
||||
# ifdef __BYTE_ORDER__
|
||||
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
|
||||
# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
|
||||
# else
|
||||
# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN.
|
||||
# endif // __BYTE_ORDER__
|
||||
// Detect with GLIBC's endian.h
|
||||
# elif defined(__GLIBC__)
|
||||
# include <endian.h>
|
||||
# if (__BYTE_ORDER == __LITTLE_ENDIAN)
|
||||
# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
|
||||
# elif (__BYTE_ORDER == __BIG_ENDIAN)
|
||||
# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
|
||||
# else
|
||||
# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN.
|
||||
# endif // __GLIBC__
|
||||
// Detect with _LITTLE_ENDIAN and _BIG_ENDIAN macro
|
||||
# elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN)
|
||||
# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
|
||||
# elif defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN)
|
||||
# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
|
||||
// Detect with architecture macros
|
||||
# elif defined(__sparc) || defined(__sparc__) || defined(_POWER) || defined(__powerpc__) || defined(__ppc__) || defined(__hpux) || defined(__hppa) || defined(_MIPSEB) || defined(_POWER) || defined(__s390__)
|
||||
# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
|
||||
# elif defined(__i386__) || defined(__alpha__) || defined(__ia64) || defined(__ia64__) || defined(_M_IX86) || defined(_M_IA64) || defined(_M_ALPHA) || defined(__amd64) || defined(__amd64__) || defined(_M_AMD64) || defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || defined(__bfin__)
|
||||
# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
|
||||
# elif defined(_MSC_VER) && defined(_M_ARM)
|
||||
# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
|
||||
# elif defined(RAPIDJSON_DOXYGEN_RUNNING)
|
||||
# define RAPIDJSON_ENDIAN
|
||||
# else
|
||||
# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN.
|
||||
# endif
|
||||
#endif // RAPIDJSON_ENDIAN
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_64BIT
|
||||
|
||||
//! Whether using 64-bit architecture
|
||||
#ifndef RAPIDJSON_64BIT
|
||||
#if defined(__LP64__) || (defined(__x86_64__) && defined(__ILP32__)) || defined(_WIN64) || defined(__EMSCRIPTEN__)
|
||||
#define RAPIDJSON_64BIT 1
|
||||
#else
|
||||
#define RAPIDJSON_64BIT 0
|
||||
#endif
|
||||
#endif // RAPIDJSON_64BIT
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_ALIGN
|
||||
|
||||
//! Data alignment of the machine.
|
||||
/*! \ingroup RAPIDJSON_CONFIG
|
||||
\param x pointer to align
|
||||
|
||||
Some machines require strict data alignment. Currently the default uses 4 bytes
|
||||
alignment on 32-bit platforms and 8 bytes alignment for 64-bit platforms.
|
||||
User can customize by defining the RAPIDJSON_ALIGN function macro.
|
||||
*/
|
||||
#ifndef RAPIDJSON_ALIGN
|
||||
#if RAPIDJSON_64BIT == 1
|
||||
#define RAPIDJSON_ALIGN(x) (((x) + static_cast<uint64_t>(7u)) & ~static_cast<uint64_t>(7u))
|
||||
#else
|
||||
#define RAPIDJSON_ALIGN(x) (((x) + 3u) & ~3u)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_UINT64_C2
|
||||
|
||||
//! Construct a 64-bit literal by a pair of 32-bit integer.
|
||||
/*!
|
||||
64-bit literal with or without ULL suffix is prone to compiler warnings.
|
||||
UINT64_C() is C macro which cause compilation problems.
|
||||
Use this macro to define 64-bit constants by a pair of 32-bit integer.
|
||||
*/
|
||||
#ifndef RAPIDJSON_UINT64_C2
|
||||
#define RAPIDJSON_UINT64_C2(high32, low32) ((static_cast<uint64_t>(high32) << 32) | static_cast<uint64_t>(low32))
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_48BITPOINTER_OPTIMIZATION
|
||||
|
||||
//! Use only lower 48-bit address for some pointers.
|
||||
/*!
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
|
||||
This optimization uses the fact that current X86-64 architecture only implement lower 48-bit virtual address.
|
||||
The higher 16-bit can be used for storing other data.
|
||||
\c GenericValue uses this optimization to reduce its size form 24 bytes to 16 bytes in 64-bit architecture.
|
||||
*/
|
||||
#ifndef RAPIDJSON_48BITPOINTER_OPTIMIZATION
|
||||
#if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64)
|
||||
#define RAPIDJSON_48BITPOINTER_OPTIMIZATION 1
|
||||
#else
|
||||
#define RAPIDJSON_48BITPOINTER_OPTIMIZATION 0
|
||||
#endif
|
||||
#endif // RAPIDJSON_48BITPOINTER_OPTIMIZATION
|
||||
|
||||
#if RAPIDJSON_48BITPOINTER_OPTIMIZATION == 1
|
||||
#if RAPIDJSON_64BIT != 1
|
||||
#error RAPIDJSON_48BITPOINTER_OPTIMIZATION can only be set to 1 when RAPIDJSON_64BIT=1
|
||||
#endif
|
||||
#define RAPIDJSON_SETPOINTER(type, p, x) (p = reinterpret_cast<type *>((reinterpret_cast<uintptr_t>(p) & static_cast<uintptr_t>(RAPIDJSON_UINT64_C2(0xFFFF0000, 0x00000000))) | reinterpret_cast<uintptr_t>(reinterpret_cast<const void*>(x))))
|
||||
#define RAPIDJSON_GETPOINTER(type, p) (reinterpret_cast<type *>(reinterpret_cast<uintptr_t>(p) & static_cast<uintptr_t>(RAPIDJSON_UINT64_C2(0x0000FFFF, 0xFFFFFFFF))))
|
||||
#else
|
||||
#define RAPIDJSON_SETPOINTER(type, p, x) (p = (x))
|
||||
#define RAPIDJSON_GETPOINTER(type, p) (p)
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_SSE2/RAPIDJSON_SSE42/RAPIDJSON_SIMD
|
||||
|
||||
/*! \def RAPIDJSON_SIMD
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief Enable SSE2/SSE4.2 optimization.
|
||||
|
||||
RapidJSON supports optimized implementations for some parsing operations
|
||||
based on the SSE2 or SSE4.2 SIMD extensions on modern Intel-compatible
|
||||
processors.
|
||||
|
||||
To enable these optimizations, two different symbols can be defined;
|
||||
\code
|
||||
// Enable SSE2 optimization.
|
||||
#define RAPIDJSON_SSE2
|
||||
|
||||
// Enable SSE4.2 optimization.
|
||||
#define RAPIDJSON_SSE42
|
||||
\endcode
|
||||
|
||||
\c RAPIDJSON_SSE42 takes precedence, if both are defined.
|
||||
|
||||
If any of these symbols is defined, RapidJSON defines the macro
|
||||
\c RAPIDJSON_SIMD to indicate the availability of the optimized code.
|
||||
*/
|
||||
#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) \
|
||||
|| defined(RAPIDJSON_DOXYGEN_RUNNING)
|
||||
#define RAPIDJSON_SIMD
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_NO_SIZETYPEDEFINE
|
||||
|
||||
#ifndef RAPIDJSON_NO_SIZETYPEDEFINE
|
||||
/*! \def RAPIDJSON_NO_SIZETYPEDEFINE
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief User-provided \c SizeType definition.
|
||||
|
||||
In order to avoid using 32-bit size types for indexing strings and arrays,
|
||||
define this preprocessor symbol and provide the type rapidjson::SizeType
|
||||
before including RapidJSON:
|
||||
\code
|
||||
#define RAPIDJSON_NO_SIZETYPEDEFINE
|
||||
namespace rapidjson { typedef ::std::size_t SizeType; }
|
||||
#include "rapidjson/..."
|
||||
\endcode
|
||||
|
||||
\see rapidjson::SizeType
|
||||
*/
|
||||
#ifdef RAPIDJSON_DOXYGEN_RUNNING
|
||||
#define RAPIDJSON_NO_SIZETYPEDEFINE
|
||||
#endif
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
//! Size type (for string lengths, array sizes, etc.)
|
||||
/*! RapidJSON uses 32-bit array/string indices even on 64-bit platforms,
|
||||
instead of using \c size_t. Users may override the SizeType by defining
|
||||
\ref RAPIDJSON_NO_SIZETYPEDEFINE.
|
||||
*/
|
||||
typedef unsigned SizeType;
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
#endif
|
||||
|
||||
// always import std::size_t to rapidjson namespace
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
using std::size_t;
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_ASSERT
|
||||
|
||||
//! Assertion.
|
||||
/*! \ingroup RAPIDJSON_CONFIG
|
||||
By default, rapidjson uses C \c assert() for internal assertions.
|
||||
User can override it by defining RAPIDJSON_ASSERT(x) macro.
|
||||
|
||||
\note Parsing errors are handled and can be customized by the
|
||||
\ref RAPIDJSON_ERRORS APIs.
|
||||
*/
|
||||
#ifndef RAPIDJSON_ASSERT
|
||||
#include <cassert>
|
||||
#define RAPIDJSON_ASSERT(x) assert(x)
|
||||
#endif // RAPIDJSON_ASSERT
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_STATIC_ASSERT
|
||||
|
||||
// Adopt from boost
|
||||
#ifndef RAPIDJSON_STATIC_ASSERT
|
||||
#ifndef __clang__
|
||||
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
|
||||
#endif
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
template <bool x> struct STATIC_ASSERTION_FAILURE;
|
||||
template <> struct STATIC_ASSERTION_FAILURE<true> { enum { value = 1 }; };
|
||||
template<int x> struct StaticAssertTest {};
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#define RAPIDJSON_JOIN(X, Y) RAPIDJSON_DO_JOIN(X, Y)
|
||||
#define RAPIDJSON_DO_JOIN(X, Y) RAPIDJSON_DO_JOIN2(X, Y)
|
||||
#define RAPIDJSON_DO_JOIN2(X, Y) X##Y
|
||||
|
||||
#if defined(__GNUC__)
|
||||
#define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE __attribute__((unused))
|
||||
#else
|
||||
#define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE
|
||||
#endif
|
||||
#ifndef __clang__
|
||||
//!@endcond
|
||||
#endif
|
||||
|
||||
/*! \def RAPIDJSON_STATIC_ASSERT
|
||||
\brief (Internal) macro to check for conditions at compile-time
|
||||
\param x compile-time condition
|
||||
\hideinitializer
|
||||
*/
|
||||
#define RAPIDJSON_STATIC_ASSERT(x) \
|
||||
typedef ::RAPIDJSON_NAMESPACE::StaticAssertTest< \
|
||||
sizeof(::RAPIDJSON_NAMESPACE::STATIC_ASSERTION_FAILURE<bool(x) >)> \
|
||||
RAPIDJSON_JOIN(StaticAssertTypedef, __LINE__) RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_LIKELY, RAPIDJSON_UNLIKELY
|
||||
|
||||
//! Compiler branching hint for expression with high probability to be true.
|
||||
/*!
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\param x Boolean expression likely to be true.
|
||||
*/
|
||||
#ifndef RAPIDJSON_LIKELY
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#define RAPIDJSON_LIKELY(x) __builtin_expect(!!(x), 1)
|
||||
#else
|
||||
#define RAPIDJSON_LIKELY(x) (x)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
//! Compiler branching hint for expression with low probability to be true.
|
||||
/*!
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\param x Boolean expression unlikely to be true.
|
||||
*/
|
||||
#ifndef RAPIDJSON_UNLIKELY
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#define RAPIDJSON_UNLIKELY(x) __builtin_expect(!!(x), 0)
|
||||
#else
|
||||
#define RAPIDJSON_UNLIKELY(x) (x)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Helpers
|
||||
|
||||
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
|
||||
|
||||
#define RAPIDJSON_MULTILINEMACRO_BEGIN do {
|
||||
#define RAPIDJSON_MULTILINEMACRO_END \
|
||||
} while((void)0, 0)
|
||||
|
||||
// adopted from Boost
|
||||
#define RAPIDJSON_VERSION_CODE(x,y,z) \
|
||||
(((x)*100000) + ((y)*100) + (z))
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_DIAG_PUSH/POP, RAPIDJSON_DIAG_OFF
|
||||
|
||||
#if defined(__GNUC__)
|
||||
#define RAPIDJSON_GNUC \
|
||||
RAPIDJSON_VERSION_CODE(__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__)
|
||||
#endif
|
||||
|
||||
#if defined(__clang__) || (defined(RAPIDJSON_GNUC) && RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,2,0))
|
||||
|
||||
#define RAPIDJSON_PRAGMA(x) _Pragma(RAPIDJSON_STRINGIFY(x))
|
||||
#define RAPIDJSON_DIAG_PRAGMA(x) RAPIDJSON_PRAGMA(GCC diagnostic x)
|
||||
#define RAPIDJSON_DIAG_OFF(x) \
|
||||
RAPIDJSON_DIAG_PRAGMA(ignored RAPIDJSON_STRINGIFY(RAPIDJSON_JOIN(-W,x)))
|
||||
|
||||
// push/pop support in Clang and GCC>=4.6
|
||||
#if defined(__clang__) || (defined(RAPIDJSON_GNUC) && RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0))
|
||||
#define RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_PRAGMA(push)
|
||||
#define RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_PRAGMA(pop)
|
||||
#else // GCC >= 4.2, < 4.6
|
||||
#define RAPIDJSON_DIAG_PUSH /* ignored */
|
||||
#define RAPIDJSON_DIAG_POP /* ignored */
|
||||
#endif
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
|
||||
// pragma (MSVC specific)
|
||||
#define RAPIDJSON_PRAGMA(x) __pragma(x)
|
||||
#define RAPIDJSON_DIAG_PRAGMA(x) RAPIDJSON_PRAGMA(warning(x))
|
||||
|
||||
#define RAPIDJSON_DIAG_OFF(x) RAPIDJSON_DIAG_PRAGMA(disable: x)
|
||||
#define RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_PRAGMA(push)
|
||||
#define RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_PRAGMA(pop)
|
||||
|
||||
#else
|
||||
|
||||
#define RAPIDJSON_DIAG_OFF(x) /* ignored */
|
||||
#define RAPIDJSON_DIAG_PUSH /* ignored */
|
||||
#define RAPIDJSON_DIAG_POP /* ignored */
|
||||
|
||||
#endif // RAPIDJSON_DIAG_*
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// C++11 features
|
||||
|
||||
#ifndef RAPIDJSON_HAS_CXX11_RVALUE_REFS
|
||||
#if defined(__clang__)
|
||||
#if __has_feature(cxx_rvalue_references) && \
|
||||
(defined(_LIBCPP_VERSION) || defined(__GLIBCXX__) && __GLIBCXX__ >= 20080306)
|
||||
#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1
|
||||
#else
|
||||
#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0
|
||||
#endif
|
||||
#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,3,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \
|
||||
(defined(_MSC_VER) && _MSC_VER >= 1600)
|
||||
|
||||
#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1
|
||||
#else
|
||||
#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0
|
||||
#endif
|
||||
#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS
|
||||
|
||||
#ifndef RAPIDJSON_HAS_CXX11_NOEXCEPT
|
||||
#if defined(__clang__)
|
||||
#define RAPIDJSON_HAS_CXX11_NOEXCEPT __has_feature(cxx_noexcept)
|
||||
#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__))
|
||||
// (defined(_MSC_VER) && _MSC_VER >= ????) // not yet supported
|
||||
#define RAPIDJSON_HAS_CXX11_NOEXCEPT 1
|
||||
#else
|
||||
#define RAPIDJSON_HAS_CXX11_NOEXCEPT 0
|
||||
#endif
|
||||
#endif
|
||||
#if RAPIDJSON_HAS_CXX11_NOEXCEPT
|
||||
#define RAPIDJSON_NOEXCEPT noexcept
|
||||
#else
|
||||
#define RAPIDJSON_NOEXCEPT /* noexcept */
|
||||
#endif // RAPIDJSON_HAS_CXX11_NOEXCEPT
|
||||
|
||||
// no automatic detection, yet
|
||||
#ifndef RAPIDJSON_HAS_CXX11_TYPETRAITS
|
||||
#define RAPIDJSON_HAS_CXX11_TYPETRAITS 0
|
||||
#endif
|
||||
|
||||
#ifndef RAPIDJSON_HAS_CXX11_RANGE_FOR
|
||||
#if defined(__clang__)
|
||||
#define RAPIDJSON_HAS_CXX11_RANGE_FOR __has_feature(cxx_range_for)
|
||||
#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,3,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \
|
||||
(defined(_MSC_VER) && _MSC_VER >= 1700)
|
||||
#define RAPIDJSON_HAS_CXX11_RANGE_FOR 1
|
||||
#else
|
||||
#define RAPIDJSON_HAS_CXX11_RANGE_FOR 0
|
||||
#endif
|
||||
#endif // RAPIDJSON_HAS_CXX11_RANGE_FOR
|
||||
|
||||
//!@endcond
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// new/delete
|
||||
|
||||
#ifndef RAPIDJSON_NEW
|
||||
///! customization point for global \c new
|
||||
#define RAPIDJSON_NEW(x) new x
|
||||
#endif
|
||||
#ifndef RAPIDJSON_DELETE
|
||||
///! customization point for global \c delete
|
||||
#define RAPIDJSON_DELETE(x) delete x
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Type
|
||||
|
||||
/*! \namespace rapidjson
|
||||
\brief main RapidJSON namespace
|
||||
\see RAPIDJSON_NAMESPACE
|
||||
*/
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Type of JSON value
|
||||
enum Type {
|
||||
kNullType = 0, //!< null
|
||||
kFalseType = 1, //!< false
|
||||
kTrueType = 2, //!< true
|
||||
kObjectType = 3, //!< object
|
||||
kArrayType = 4, //!< array
|
||||
kStringType = 5, //!< string
|
||||
kNumberType = 6 //!< number
|
||||
};
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_RAPIDJSON_H_
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,179 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#include "rapidjson.h"
|
||||
|
||||
#ifndef RAPIDJSON_STREAM_H_
|
||||
#define RAPIDJSON_STREAM_H_
|
||||
|
||||
#include "encodings.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Stream
|
||||
|
||||
/*! \class rapidjson::Stream
|
||||
\brief Concept for reading and writing characters.
|
||||
|
||||
For read-only stream, no need to implement PutBegin(), Put(), Flush() and PutEnd().
|
||||
|
||||
For write-only stream, only need to implement Put() and Flush().
|
||||
|
||||
\code
|
||||
concept Stream {
|
||||
typename Ch; //!< Character type of the stream.
|
||||
|
||||
//! Read the current character from stream without moving the read cursor.
|
||||
Ch Peek() const;
|
||||
|
||||
//! Read the current character from stream and moving the read cursor to next character.
|
||||
Ch Take();
|
||||
|
||||
//! Get the current read cursor.
|
||||
//! \return Number of characters read from start.
|
||||
size_t Tell();
|
||||
|
||||
//! Begin writing operation at the current read pointer.
|
||||
//! \return The begin writer pointer.
|
||||
Ch* PutBegin();
|
||||
|
||||
//! Write a character.
|
||||
void Put(Ch c);
|
||||
|
||||
//! Flush the buffer.
|
||||
void Flush();
|
||||
|
||||
//! End the writing operation.
|
||||
//! \param begin The begin write pointer returned by PutBegin().
|
||||
//! \return Number of characters written.
|
||||
size_t PutEnd(Ch* begin);
|
||||
}
|
||||
\endcode
|
||||
*/
|
||||
|
||||
//! Provides additional information for stream.
|
||||
/*!
|
||||
By using traits pattern, this type provides a default configuration for stream.
|
||||
For custom stream, this type can be specialized for other configuration.
|
||||
See TEST(Reader, CustomStringStream) in readertest.cpp for example.
|
||||
*/
|
||||
template<typename Stream>
|
||||
struct StreamTraits {
|
||||
//! Whether to make local copy of stream for optimization during parsing.
|
||||
/*!
|
||||
By default, for safety, streams do not use local copy optimization.
|
||||
Stream that can be copied fast should specialize this, like StreamTraits<StringStream>.
|
||||
*/
|
||||
enum { copyOptimization = 0 };
|
||||
};
|
||||
|
||||
//! Reserve n characters for writing to a stream.
|
||||
template<typename Stream>
|
||||
inline void PutReserve(Stream& stream, size_t count) {
|
||||
(void)stream;
|
||||
(void)count;
|
||||
}
|
||||
|
||||
//! Write character to a stream, presuming buffer is reserved.
|
||||
template<typename Stream>
|
||||
inline void PutUnsafe(Stream& stream, typename Stream::Ch c) {
|
||||
stream.Put(c);
|
||||
}
|
||||
|
||||
//! Put N copies of a character to a stream.
|
||||
template<typename Stream, typename Ch>
|
||||
inline void PutN(Stream& stream, Ch c, size_t n) {
|
||||
PutReserve(stream, n);
|
||||
for (size_t i = 0; i < n; i++)
|
||||
PutUnsafe(stream, c);
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// StringStream
|
||||
|
||||
//! Read-only string stream.
|
||||
/*! \note implements Stream concept
|
||||
*/
|
||||
template <typename Encoding>
|
||||
struct GenericStringStream {
|
||||
typedef typename Encoding::Ch Ch;
|
||||
|
||||
GenericStringStream(const Ch *src) : src_(src), head_(src) {}
|
||||
|
||||
Ch Peek() const { return *src_; }
|
||||
Ch Take() { return *src_++; }
|
||||
size_t Tell() const { return static_cast<size_t>(src_ - head_); }
|
||||
|
||||
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
void Put(Ch) { RAPIDJSON_ASSERT(false); }
|
||||
void Flush() { RAPIDJSON_ASSERT(false); }
|
||||
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
const Ch* src_; //!< Current read position.
|
||||
const Ch* head_; //!< Original head of the string.
|
||||
};
|
||||
|
||||
template <typename Encoding>
|
||||
struct StreamTraits<GenericStringStream<Encoding> > {
|
||||
enum { copyOptimization = 1 };
|
||||
};
|
||||
|
||||
//! String stream with UTF8 encoding.
|
||||
typedef GenericStringStream<UTF8<> > StringStream;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// InsituStringStream
|
||||
|
||||
//! A read-write string stream.
|
||||
/*! This string stream is particularly designed for in-situ parsing.
|
||||
\note implements Stream concept
|
||||
*/
|
||||
template <typename Encoding>
|
||||
struct GenericInsituStringStream {
|
||||
typedef typename Encoding::Ch Ch;
|
||||
|
||||
GenericInsituStringStream(Ch *src) : src_(src), dst_(0), head_(src) {}
|
||||
|
||||
// Read
|
||||
Ch Peek() { return *src_; }
|
||||
Ch Take() { return *src_++; }
|
||||
size_t Tell() { return static_cast<size_t>(src_ - head_); }
|
||||
|
||||
// Write
|
||||
void Put(Ch c) { RAPIDJSON_ASSERT(dst_ != 0); *dst_++ = c; }
|
||||
|
||||
Ch* PutBegin() { return dst_ = src_; }
|
||||
size_t PutEnd(Ch* begin) { return static_cast<size_t>(dst_ - begin); }
|
||||
void Flush() {}
|
||||
|
||||
Ch* Push(size_t count) { Ch* begin = dst_; dst_ += count; return begin; }
|
||||
void Pop(size_t count) { dst_ -= count; }
|
||||
|
||||
Ch* src_;
|
||||
Ch* dst_;
|
||||
Ch* head_;
|
||||
};
|
||||
|
||||
template <typename Encoding>
|
||||
struct StreamTraits<GenericInsituStringStream<Encoding> > {
|
||||
enum { copyOptimization = 1 };
|
||||
};
|
||||
|
||||
//! Insitu string stream with UTF8 encoding.
|
||||
typedef GenericInsituStringStream<UTF8<> > InsituStringStream;
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_STREAM_H_
|
|
@ -0,0 +1,117 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_STRINGBUFFER_H_
|
||||
#define RAPIDJSON_STRINGBUFFER_H_
|
||||
|
||||
#include "stream.h"
|
||||
#include "internal/stack.h"
|
||||
|
||||
#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
|
||||
#include <utility> // std::move
|
||||
#endif
|
||||
|
||||
#include "internal/stack.h"
|
||||
|
||||
#if defined(__clang__)
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(c++98-compat)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Represents an in-memory output stream.
|
||||
/*!
|
||||
\tparam Encoding Encoding of the stream.
|
||||
\tparam Allocator type for allocating memory buffer.
|
||||
\note implements Stream concept
|
||||
*/
|
||||
template <typename Encoding, typename Allocator = CrtAllocator>
|
||||
class GenericStringBuffer {
|
||||
public:
|
||||
typedef typename Encoding::Ch Ch;
|
||||
|
||||
GenericStringBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {}
|
||||
|
||||
#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
|
||||
GenericStringBuffer(GenericStringBuffer&& rhs) : stack_(std::move(rhs.stack_)) {}
|
||||
GenericStringBuffer& operator=(GenericStringBuffer&& rhs) {
|
||||
if (&rhs != this)
|
||||
stack_ = std::move(rhs.stack_);
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
|
||||
void Put(Ch c) { *stack_.template Push<Ch>() = c; }
|
||||
void PutUnsafe(Ch c) { *stack_.template PushUnsafe<Ch>() = c; }
|
||||
void Flush() {}
|
||||
|
||||
void Clear() { stack_.Clear(); }
|
||||
void ShrinkToFit() {
|
||||
// Push and pop a null terminator. This is safe.
|
||||
*stack_.template Push<Ch>() = '\0';
|
||||
stack_.ShrinkToFit();
|
||||
stack_.template Pop<Ch>(1);
|
||||
}
|
||||
|
||||
void Reserve(size_t count) { stack_.template Reserve<Ch>(count); }
|
||||
Ch* Push(size_t count) { return stack_.template Push<Ch>(count); }
|
||||
Ch* PushUnsafe(size_t count) { return stack_.template PushUnsafe<Ch>(count); }
|
||||
void Pop(size_t count) { stack_.template Pop<Ch>(count); }
|
||||
|
||||
const Ch* GetString() const {
|
||||
// Push and pop a null terminator. This is safe.
|
||||
*stack_.template Push<Ch>() = '\0';
|
||||
stack_.template Pop<Ch>(1);
|
||||
|
||||
return stack_.template Bottom<Ch>();
|
||||
}
|
||||
|
||||
size_t GetSize() const { return stack_.GetSize(); }
|
||||
|
||||
static const size_t kDefaultCapacity = 256;
|
||||
mutable internal::Stack<Allocator> stack_;
|
||||
|
||||
private:
|
||||
// Prohibit copy constructor & assignment operator.
|
||||
GenericStringBuffer(const GenericStringBuffer&);
|
||||
GenericStringBuffer& operator=(const GenericStringBuffer&);
|
||||
};
|
||||
|
||||
//! String buffer with UTF8 encoding
|
||||
typedef GenericStringBuffer<UTF8<> > StringBuffer;
|
||||
|
||||
template<typename Encoding, typename Allocator>
|
||||
inline void PutReserve(GenericStringBuffer<Encoding, Allocator>& stream, size_t count) {
|
||||
stream.Reserve(count);
|
||||
}
|
||||
|
||||
template<typename Encoding, typename Allocator>
|
||||
inline void PutUnsafe(GenericStringBuffer<Encoding, Allocator>& stream, typename Encoding::Ch c) {
|
||||
stream.PutUnsafe(c);
|
||||
}
|
||||
|
||||
//! Implement specialized version of PutN() with memset() for better performance.
|
||||
template<>
|
||||
inline void PutN(GenericStringBuffer<UTF8<> >& stream, char c, size_t n) {
|
||||
std::memset(stream.stack_.Push<char>(n), c, n * sizeof(c));
|
||||
}
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#if defined(__clang__)
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_STRINGBUFFER_H_
|
|
@ -0,0 +1,616 @@
|
|||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_WRITER_H_
|
||||
#define RAPIDJSON_WRITER_H_
|
||||
|
||||
#include "stream.h"
|
||||
#include "internal/stack.h"
|
||||
#include "internal/strfunc.h"
|
||||
#include "internal/dtoa.h"
|
||||
#include "internal/itoa.h"
|
||||
#include "stringbuffer.h"
|
||||
#include <new> // placement new
|
||||
|
||||
#if defined(RAPIDJSON_SIMD) && defined(_MSC_VER)
|
||||
#include <intrin.h>
|
||||
#pragma intrinsic(_BitScanForward)
|
||||
#endif
|
||||
#ifdef RAPIDJSON_SSE42
|
||||
#include <nmmintrin.h>
|
||||
#elif defined(RAPIDJSON_SSE2)
|
||||
#include <emmintrin.h>
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant
|
||||
#endif
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(padded)
|
||||
RAPIDJSON_DIAG_OFF(unreachable-code)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// WriteFlag
|
||||
|
||||
/*! \def RAPIDJSON_WRITE_DEFAULT_FLAGS
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief User-defined kWriteDefaultFlags definition.
|
||||
|
||||
User can define this as any \c WriteFlag combinations.
|
||||
*/
|
||||
#ifndef RAPIDJSON_WRITE_DEFAULT_FLAGS
|
||||
#define RAPIDJSON_WRITE_DEFAULT_FLAGS kWriteNoFlags
|
||||
#endif
|
||||
|
||||
//! Combination of writeFlags
|
||||
enum WriteFlag {
|
||||
kWriteNoFlags = 0, //!< No flags are set.
|
||||
kWriteValidateEncodingFlag = 1, //!< Validate encoding of JSON strings.
|
||||
kWriteNanAndInfFlag = 2, //!< Allow writing of Infinity, -Infinity and NaN.
|
||||
kWriteDefaultFlags = RAPIDJSON_WRITE_DEFAULT_FLAGS //!< Default write flags. Can be customized by defining RAPIDJSON_WRITE_DEFAULT_FLAGS
|
||||
};
|
||||
|
||||
//! JSON writer
|
||||
/*! Writer implements the concept Handler.
|
||||
It generates JSON text by events to an output os.
|
||||
|
||||
User may programmatically calls the functions of a writer to generate JSON text.
|
||||
|
||||
On the other side, a writer can also be passed to objects that generates events,
|
||||
|
||||
for example Reader::Parse() and Document::Accept().
|
||||
|
||||
\tparam OutputStream Type of output stream.
|
||||
\tparam SourceEncoding Encoding of source string.
|
||||
\tparam TargetEncoding Encoding of output stream.
|
||||
\tparam StackAllocator Type of allocator for allocating memory of stack.
|
||||
\note implements Handler concept
|
||||
*/
|
||||
template<typename OutputStream, typename SourceEncoding = UTF8<>, typename TargetEncoding = UTF8<>, typename StackAllocator = CrtAllocator, unsigned writeFlags = kWriteDefaultFlags>
|
||||
class Writer {
|
||||
public:
|
||||
typedef typename SourceEncoding::Ch Ch;
|
||||
|
||||
static const int kDefaultMaxDecimalPlaces = 324;
|
||||
|
||||
//! Constructor
|
||||
/*! \param os Output stream.
|
||||
\param stackAllocator User supplied allocator. If it is null, it will create a private one.
|
||||
\param levelDepth Initial capacity of stack.
|
||||
*/
|
||||
explicit
|
||||
Writer(OutputStream& os, StackAllocator* stackAllocator = 0, size_t levelDepth = kDefaultLevelDepth) :
|
||||
os_(&os), level_stack_(stackAllocator, levelDepth * sizeof(Level)), maxDecimalPlaces_(kDefaultMaxDecimalPlaces), hasRoot_(false) {}
|
||||
|
||||
explicit
|
||||
Writer(StackAllocator* allocator = 0, size_t levelDepth = kDefaultLevelDepth) :
|
||||
os_(0), level_stack_(allocator, levelDepth * sizeof(Level)), maxDecimalPlaces_(kDefaultMaxDecimalPlaces), hasRoot_(false) {}
|
||||
|
||||
//! Reset the writer with a new stream.
|
||||
/*!
|
||||
This function reset the writer with a new stream and default settings,
|
||||
in order to make a Writer object reusable for output multiple JSONs.
|
||||
|
||||
\param os New output stream.
|
||||
\code
|
||||
Writer<OutputStream> writer(os1);
|
||||
writer.StartObject();
|
||||
// ...
|
||||
writer.EndObject();
|
||||
|
||||
writer.Reset(os2);
|
||||
writer.StartObject();
|
||||
// ...
|
||||
writer.EndObject();
|
||||
\endcode
|
||||
*/
|
||||
void Reset(OutputStream& os) {
|
||||
os_ = &os;
|
||||
hasRoot_ = false;
|
||||
level_stack_.Clear();
|
||||
}
|
||||
|
||||
//! Checks whether the output is a complete JSON.
|
||||
/*!
|
||||
A complete JSON has a complete root object or array.
|
||||
*/
|
||||
bool IsComplete() const {
|
||||
return hasRoot_ && level_stack_.Empty();
|
||||
}
|
||||
|
||||
int GetMaxDecimalPlaces() const {
|
||||
return maxDecimalPlaces_;
|
||||
}
|
||||
|
||||
//! Sets the maximum number of decimal places for double output.
|
||||
/*!
|
||||
This setting truncates the output with specified number of decimal places.
|
||||
|
||||
For example,
|
||||
|
||||
\code
|
||||
writer.SetMaxDecimalPlaces(3);
|
||||
writer.StartArray();
|
||||
writer.Double(0.12345); // "0.123"
|
||||
writer.Double(0.0001); // "0.0"
|
||||
writer.Double(1.234567890123456e30); // "1.234567890123456e30" (do not truncate significand for positive exponent)
|
||||
writer.Double(1.23e-4); // "0.0" (do truncate significand for negative exponent)
|
||||
writer.EndArray();
|
||||
\endcode
|
||||
|
||||
The default setting does not truncate any decimal places. You can restore to this setting by calling
|
||||
\code
|
||||
writer.SetMaxDecimalPlaces(Writer::kDefaultMaxDecimalPlaces);
|
||||
\endcode
|
||||
*/
|
||||
void SetMaxDecimalPlaces(int maxDecimalPlaces) {
|
||||
maxDecimalPlaces_ = maxDecimalPlaces;
|
||||
}
|
||||
|
||||
/*!@name Implementation of Handler
|
||||
\see Handler
|
||||
*/
|
||||
//@{
|
||||
|
||||
bool Null() { Prefix(kNullType); return EndValue(WriteNull()); }
|
||||
bool Bool(bool b) { Prefix(b ? kTrueType : kFalseType); return EndValue(WriteBool(b)); }
|
||||
bool Int(int i) { Prefix(kNumberType); return EndValue(WriteInt(i)); }
|
||||
bool Uint(unsigned u) { Prefix(kNumberType); return EndValue(WriteUint(u)); }
|
||||
bool Int64(int64_t i64) { Prefix(kNumberType); return EndValue(WriteInt64(i64)); }
|
||||
bool Uint64(uint64_t u64) { Prefix(kNumberType); return EndValue(WriteUint64(u64)); }
|
||||
|
||||
//! Writes the given \c double value to the stream
|
||||
/*!
|
||||
\param d The value to be written.
|
||||
\return Whether it is succeed.
|
||||
*/
|
||||
bool Double(double d) { Prefix(kNumberType); return EndValue(WriteDouble(d)); }
|
||||
|
||||
bool RawNumber(const Ch* str, SizeType length, bool copy = false) {
|
||||
RAPIDJSON_ASSERT(str != 0);
|
||||
(void)copy;
|
||||
Prefix(kNumberType);
|
||||
return EndValue(WriteString(str, length));
|
||||
}
|
||||
|
||||
bool String(const Ch* str, SizeType length, bool copy = false) {
|
||||
RAPIDJSON_ASSERT(str != 0);
|
||||
(void)copy;
|
||||
Prefix(kStringType);
|
||||
return EndValue(WriteString(str, length));
|
||||
}
|
||||
|
||||
#if RAPIDJSON_HAS_STDSTRING
|
||||
bool String(const std::basic_string<Ch>& str) {
|
||||
return String(str.data(), SizeType(str.size()));
|
||||
}
|
||||
#endif
|
||||
|
||||
bool StartObject() {
|
||||
Prefix(kObjectType);
|
||||
new (level_stack_.template Push<Level>()) Level(false);
|
||||
return WriteStartObject();
|
||||
}
|
||||
|
||||
bool Key(const Ch* str, SizeType length, bool copy = false) { return String(str, length, copy); }
|
||||
|
||||
bool EndObject(SizeType memberCount = 0) {
|
||||
(void)memberCount;
|
||||
RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level));
|
||||
RAPIDJSON_ASSERT(!level_stack_.template Top<Level>()->inArray);
|
||||
level_stack_.template Pop<Level>(1);
|
||||
return EndValue(WriteEndObject());
|
||||
}
|
||||
|
||||
bool StartArray() {
|
||||
Prefix(kArrayType);
|
||||
new (level_stack_.template Push<Level>()) Level(true);
|
||||
return WriteStartArray();
|
||||
}
|
||||
|
||||
bool EndArray(SizeType elementCount = 0) {
|
||||
(void)elementCount;
|
||||
RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level));
|
||||
RAPIDJSON_ASSERT(level_stack_.template Top<Level>()->inArray);
|
||||
level_stack_.template Pop<Level>(1);
|
||||
return EndValue(WriteEndArray());
|
||||
}
|
||||
//@}
|
||||
|
||||
/*! @name Convenience extensions */
|
||||
//@{
|
||||
|
||||
//! Simpler but slower overload.
|
||||
bool String(const Ch* str) { return String(str, internal::StrLen(str)); }
|
||||
bool Key(const Ch* str) { return Key(str, internal::StrLen(str)); }
|
||||
|
||||
//@}
|
||||
|
||||
//! Write a raw JSON value.
|
||||
/*!
|
||||
For user to write a stringified JSON as a value.
|
||||
|
||||
\param json A well-formed JSON value. It should not contain null character within [0, length - 1] range.
|
||||
\param length Length of the json.
|
||||
\param type Type of the root of json.
|
||||
*/
|
||||
bool RawValue(const Ch* json, size_t length, Type type) {
|
||||
RAPIDJSON_ASSERT(json != 0);
|
||||
Prefix(type);
|
||||
return EndValue(WriteRawValue(json, length));
|
||||
}
|
||||
|
||||
protected:
|
||||
//! Information for each nested level
|
||||
struct Level {
|
||||
Level(bool inArray_) : valueCount(0), inArray(inArray_) {}
|
||||
size_t valueCount; //!< number of values in this level
|
||||
bool inArray; //!< true if in array, otherwise in object
|
||||
};
|
||||
|
||||
static const size_t kDefaultLevelDepth = 32;
|
||||
|
||||
bool WriteNull() {
|
||||
PutReserve(*os_, 4);
|
||||
PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 'l'); return true;
|
||||
}
|
||||
|
||||
bool WriteBool(bool b) {
|
||||
if (b) {
|
||||
PutReserve(*os_, 4);
|
||||
PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'r'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'e');
|
||||
}
|
||||
else {
|
||||
PutReserve(*os_, 5);
|
||||
PutUnsafe(*os_, 'f'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 's'); PutUnsafe(*os_, 'e');
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WriteInt(int i) {
|
||||
char buffer[11];
|
||||
const char* end = internal::i32toa(i, buffer);
|
||||
PutReserve(*os_, static_cast<size_t>(end - buffer));
|
||||
for (const char* p = buffer; p != end; ++p)
|
||||
PutUnsafe(*os_, static_cast<typename TargetEncoding::Ch>(*p));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WriteUint(unsigned u) {
|
||||
char buffer[10];
|
||||
const char* end = internal::u32toa(u, buffer);
|
||||
PutReserve(*os_, static_cast<size_t>(end - buffer));
|
||||
for (const char* p = buffer; p != end; ++p)
|
||||
PutUnsafe(*os_, static_cast<typename TargetEncoding::Ch>(*p));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WriteInt64(int64_t i64) {
|
||||
char buffer[21];
|
||||
const char* end = internal::i64toa(i64, buffer);
|
||||
PutReserve(*os_, static_cast<size_t>(end - buffer));
|
||||
for (const char* p = buffer; p != end; ++p)
|
||||
PutUnsafe(*os_, static_cast<typename TargetEncoding::Ch>(*p));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WriteUint64(uint64_t u64) {
|
||||
char buffer[20];
|
||||
char* end = internal::u64toa(u64, buffer);
|
||||
PutReserve(*os_, static_cast<size_t>(end - buffer));
|
||||
for (char* p = buffer; p != end; ++p)
|
||||
PutUnsafe(*os_, static_cast<typename TargetEncoding::Ch>(*p));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WriteDouble(double d) {
|
||||
if (internal::Double(d).IsNanOrInf()) {
|
||||
if (!(writeFlags & kWriteNanAndInfFlag))
|
||||
return false;
|
||||
if (internal::Double(d).IsNan()) {
|
||||
PutReserve(*os_, 3);
|
||||
PutUnsafe(*os_, 'N'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'N');
|
||||
return true;
|
||||
}
|
||||
if (internal::Double(d).Sign()) {
|
||||
PutReserve(*os_, 9);
|
||||
PutUnsafe(*os_, '-');
|
||||
}
|
||||
else
|
||||
PutReserve(*os_, 8);
|
||||
PutUnsafe(*os_, 'I'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'f');
|
||||
PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'y');
|
||||
return true;
|
||||
}
|
||||
|
||||
char buffer[25];
|
||||
char* end = internal::dtoa(d, buffer, maxDecimalPlaces_);
|
||||
PutReserve(*os_, static_cast<size_t>(end - buffer));
|
||||
for (char* p = buffer; p != end; ++p)
|
||||
PutUnsafe(*os_, static_cast<typename TargetEncoding::Ch>(*p));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WriteString(const Ch* str, SizeType length) {
|
||||
static const typename TargetEncoding::Ch hexDigits[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };
|
||||
static const char escape[256] = {
|
||||
#define Z16 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
||||
//0 1 2 3 4 5 6 7 8 9 A B C D E F
|
||||
'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'b', 't', 'n', 'u', 'f', 'r', 'u', 'u', // 00
|
||||
'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', // 10
|
||||
0, 0, '"', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20
|
||||
Z16, Z16, // 30~4F
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,'\\', 0, 0, 0, // 50
|
||||
Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16 // 60~FF
|
||||
#undef Z16
|
||||
};
|
||||
|
||||
if (TargetEncoding::supportUnicode)
|
||||
PutReserve(*os_, 2 + length * 6); // "\uxxxx..."
|
||||
else
|
||||
PutReserve(*os_, 2 + length * 12); // "\uxxxx\uyyyy..."
|
||||
|
||||
PutUnsafe(*os_, '\"');
|
||||
GenericStringStream<SourceEncoding> is(str);
|
||||
while (ScanWriteUnescapedString(is, length)) {
|
||||
const Ch c = is.Peek();
|
||||
if (!TargetEncoding::supportUnicode && static_cast<unsigned>(c) >= 0x80) {
|
||||
// Unicode escaping
|
||||
unsigned codepoint;
|
||||
if (RAPIDJSON_UNLIKELY(!SourceEncoding::Decode(is, &codepoint)))
|
||||
return false;
|
||||
PutUnsafe(*os_, '\\');
|
||||
PutUnsafe(*os_, 'u');
|
||||
if (codepoint <= 0xD7FF || (codepoint >= 0xE000 && codepoint <= 0xFFFF)) {
|
||||
PutUnsafe(*os_, hexDigits[(codepoint >> 12) & 15]);
|
||||
PutUnsafe(*os_, hexDigits[(codepoint >> 8) & 15]);
|
||||
PutUnsafe(*os_, hexDigits[(codepoint >> 4) & 15]);
|
||||
PutUnsafe(*os_, hexDigits[(codepoint ) & 15]);
|
||||
}
|
||||
else {
|
||||
RAPIDJSON_ASSERT(codepoint >= 0x010000 && codepoint <= 0x10FFFF);
|
||||
// Surrogate pair
|
||||
unsigned s = codepoint - 0x010000;
|
||||
unsigned lead = (s >> 10) + 0xD800;
|
||||
unsigned trail = (s & 0x3FF) + 0xDC00;
|
||||
PutUnsafe(*os_, hexDigits[(lead >> 12) & 15]);
|
||||
PutUnsafe(*os_, hexDigits[(lead >> 8) & 15]);
|
||||
PutUnsafe(*os_, hexDigits[(lead >> 4) & 15]);
|
||||
PutUnsafe(*os_, hexDigits[(lead ) & 15]);
|
||||
PutUnsafe(*os_, '\\');
|
||||
PutUnsafe(*os_, 'u');
|
||||
PutUnsafe(*os_, hexDigits[(trail >> 12) & 15]);
|
||||
PutUnsafe(*os_, hexDigits[(trail >> 8) & 15]);
|
||||
PutUnsafe(*os_, hexDigits[(trail >> 4) & 15]);
|
||||
PutUnsafe(*os_, hexDigits[(trail ) & 15]);
|
||||
}
|
||||
}
|
||||
else if ((sizeof(Ch) == 1 || static_cast<unsigned>(c) < 256) && RAPIDJSON_UNLIKELY(escape[static_cast<unsigned char>(c)])) {
|
||||
is.Take();
|
||||
PutUnsafe(*os_, '\\');
|
||||
PutUnsafe(*os_, static_cast<typename TargetEncoding::Ch>(escape[static_cast<unsigned char>(c)]));
|
||||
if (escape[static_cast<unsigned char>(c)] == 'u') {
|
||||
PutUnsafe(*os_, '0');
|
||||
PutUnsafe(*os_, '0');
|
||||
PutUnsafe(*os_, hexDigits[static_cast<unsigned char>(c) >> 4]);
|
||||
PutUnsafe(*os_, hexDigits[static_cast<unsigned char>(c) & 0xF]);
|
||||
}
|
||||
}
|
||||
else if (RAPIDJSON_UNLIKELY(!(writeFlags & kWriteValidateEncodingFlag ?
|
||||
Transcoder<SourceEncoding, TargetEncoding>::Validate(is, *os_) :
|
||||
Transcoder<SourceEncoding, TargetEncoding>::TranscodeUnsafe(is, *os_))))
|
||||
return false;
|
||||
}
|
||||
PutUnsafe(*os_, '\"');
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ScanWriteUnescapedString(GenericStringStream<SourceEncoding>& is, size_t length) {
|
||||
return RAPIDJSON_LIKELY(is.Tell() < length);
|
||||
}
|
||||
|
||||
bool WriteStartObject() { os_->Put('{'); return true; }
|
||||
bool WriteEndObject() { os_->Put('}'); return true; }
|
||||
bool WriteStartArray() { os_->Put('['); return true; }
|
||||
bool WriteEndArray() { os_->Put(']'); return true; }
|
||||
|
||||
bool WriteRawValue(const Ch* json, size_t length) {
|
||||
PutReserve(*os_, length);
|
||||
for (size_t i = 0; i < length; i++) {
|
||||
RAPIDJSON_ASSERT(json[i] != '\0');
|
||||
PutUnsafe(*os_, json[i]);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void Prefix(Type type) {
|
||||
(void)type;
|
||||
if (RAPIDJSON_LIKELY(level_stack_.GetSize() != 0)) { // this value is not at root
|
||||
Level* level = level_stack_.template Top<Level>();
|
||||
if (level->valueCount > 0) {
|
||||
if (level->inArray)
|
||||
os_->Put(','); // add comma if it is not the first element in array
|
||||
else // in object
|
||||
os_->Put((level->valueCount % 2 == 0) ? ',' : ':');
|
||||
}
|
||||
if (!level->inArray && level->valueCount % 2 == 0)
|
||||
RAPIDJSON_ASSERT(type == kStringType); // if it's in object, then even number should be a name
|
||||
level->valueCount++;
|
||||
}
|
||||
else {
|
||||
RAPIDJSON_ASSERT(!hasRoot_); // Should only has one and only one root.
|
||||
hasRoot_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Flush the value if it is the top level one.
|
||||
bool EndValue(bool ret) {
|
||||
if (RAPIDJSON_UNLIKELY(level_stack_.Empty())) // end of json text
|
||||
os_->Flush();
|
||||
return ret;
|
||||
}
|
||||
|
||||
OutputStream* os_;
|
||||
internal::Stack<StackAllocator> level_stack_;
|
||||
int maxDecimalPlaces_;
|
||||
bool hasRoot_;
|
||||
|
||||
private:
|
||||
// Prohibit copy constructor & assignment operator.
|
||||
Writer(const Writer&);
|
||||
Writer& operator=(const Writer&);
|
||||
};
|
||||
|
||||
// Full specialization for StringStream to prevent memory copying
|
||||
|
||||
template<>
|
||||
inline bool Writer<StringBuffer>::WriteInt(int i) {
|
||||
char *buffer = os_->Push(11);
|
||||
const char* end = internal::i32toa(i, buffer);
|
||||
os_->Pop(static_cast<size_t>(11 - (end - buffer)));
|
||||
return true;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline bool Writer<StringBuffer>::WriteUint(unsigned u) {
|
||||
char *buffer = os_->Push(10);
|
||||
const char* end = internal::u32toa(u, buffer);
|
||||
os_->Pop(static_cast<size_t>(10 - (end - buffer)));
|
||||
return true;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline bool Writer<StringBuffer>::WriteInt64(int64_t i64) {
|
||||
char *buffer = os_->Push(21);
|
||||
const char* end = internal::i64toa(i64, buffer);
|
||||
os_->Pop(static_cast<size_t>(21 - (end - buffer)));
|
||||
return true;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline bool Writer<StringBuffer>::WriteUint64(uint64_t u) {
|
||||
char *buffer = os_->Push(20);
|
||||
const char* end = internal::u64toa(u, buffer);
|
||||
os_->Pop(static_cast<size_t>(20 - (end - buffer)));
|
||||
return true;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline bool Writer<StringBuffer>::WriteDouble(double d) {
|
||||
if (internal::Double(d).IsNanOrInf()) {
|
||||
// Note: This code path can only be reached if (RAPIDJSON_WRITE_DEFAULT_FLAGS & kWriteNanAndInfFlag).
|
||||
if (!(kWriteDefaultFlags & kWriteNanAndInfFlag))
|
||||
return false;
|
||||
if (internal::Double(d).IsNan()) {
|
||||
PutReserve(*os_, 3);
|
||||
PutUnsafe(*os_, 'N'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'N');
|
||||
return true;
|
||||
}
|
||||
if (internal::Double(d).Sign()) {
|
||||
PutReserve(*os_, 9);
|
||||
PutUnsafe(*os_, '-');
|
||||
}
|
||||
else
|
||||
PutReserve(*os_, 8);
|
||||
PutUnsafe(*os_, 'I'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'f');
|
||||
PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'y');
|
||||
return true;
|
||||
}
|
||||
|
||||
char *buffer = os_->Push(25);
|
||||
char* end = internal::dtoa(d, buffer, maxDecimalPlaces_);
|
||||
os_->Pop(static_cast<size_t>(25 - (end - buffer)));
|
||||
return true;
|
||||
}
|
||||
|
||||
#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42)
|
||||
template<>
|
||||
inline bool Writer<StringBuffer>::ScanWriteUnescapedString(StringStream& is, size_t length) {
|
||||
if (length < 16)
|
||||
return RAPIDJSON_LIKELY(is.Tell() < length);
|
||||
|
||||
if (!RAPIDJSON_LIKELY(is.Tell() < length))
|
||||
return false;
|
||||
|
||||
const char* p = is.src_;
|
||||
const char* end = is.head_ + length;
|
||||
const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15));
|
||||
const char* endAligned = reinterpret_cast<const char*>(reinterpret_cast<size_t>(end) & static_cast<size_t>(~15));
|
||||
if (nextAligned > end)
|
||||
return true;
|
||||
|
||||
while (p != nextAligned)
|
||||
if (*p < 0x20 || *p == '\"' || *p == '\\') {
|
||||
is.src_ = p;
|
||||
return RAPIDJSON_LIKELY(is.Tell() < length);
|
||||
}
|
||||
else
|
||||
os_->PutUnsafe(*p++);
|
||||
|
||||
// The rest of string using SIMD
|
||||
static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' };
|
||||
static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' };
|
||||
static const char space[16] = { 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19 };
|
||||
const __m128i dq = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&dquote[0]));
|
||||
const __m128i bs = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&bslash[0]));
|
||||
const __m128i sp = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&space[0]));
|
||||
|
||||
for (; p != endAligned; p += 16) {
|
||||
const __m128i s = _mm_load_si128(reinterpret_cast<const __m128i *>(p));
|
||||
const __m128i t1 = _mm_cmpeq_epi8(s, dq);
|
||||
const __m128i t2 = _mm_cmpeq_epi8(s, bs);
|
||||
const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x19) == 0x19
|
||||
const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3);
|
||||
unsigned short r = static_cast<unsigned short>(_mm_movemask_epi8(x));
|
||||
if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped
|
||||
SizeType len;
|
||||
#ifdef _MSC_VER // Find the index of first escaped
|
||||
unsigned long offset;
|
||||
_BitScanForward(&offset, r);
|
||||
len = offset;
|
||||
#else
|
||||
len = static_cast<SizeType>(__builtin_ffs(r) - 1);
|
||||
#endif
|
||||
char* q = reinterpret_cast<char*>(os_->PushUnsafe(len));
|
||||
for (size_t i = 0; i < len; i++)
|
||||
q[i] = p[i];
|
||||
|
||||
p += len;
|
||||
break;
|
||||
}
|
||||
_mm_storeu_si128(reinterpret_cast<__m128i *>(os_->PushUnsafe(16)), s);
|
||||
}
|
||||
|
||||
is.src_ = p;
|
||||
return RAPIDJSON_LIKELY(is.Tell() < length);
|
||||
}
|
||||
#endif // defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42)
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#ifdef _MSC_VER
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_RAPIDJSON_H_
|
|
@ -0,0 +1,52 @@
|
|||
Use of this software is granted under one of the following two licenses,
|
||||
to be chosen freely by the user.
|
||||
|
||||
1. Boost Software License - Version 1.0 - August 17th, 2003
|
||||
===============================================================================
|
||||
|
||||
Copyright (c) 2006, 2007 Marcin Kalicinski
|
||||
|
||||
Permission is hereby granted, free of charge, to any person or organization
|
||||
obtaining a copy of the software and accompanying documentation covered by
|
||||
this license (the "Software") to use, reproduce, display, distribute,
|
||||
execute, and transmit the Software, and to prepare derivative works of the
|
||||
Software, and to permit third-parties to whom the Software is furnished to
|
||||
do so, all subject to the following:
|
||||
|
||||
The copyright notices in the Software and this entire statement, including
|
||||
the above license grant, this restriction and the following disclaimer,
|
||||
must be included in all copies of the Software, in whole or in part, and
|
||||
all derivative works of the Software, unless such copies or derivative
|
||||
works are solely in the form of machine-executable object code generated by
|
||||
a source language processor.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
|
||||
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
|
||||
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
|
||||
2. The MIT License
|
||||
===============================================================================
|
||||
|
||||
Copyright (c) 2006, 2007 Marcin Kalicinski
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
IN THE SOFTWARE.
|
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,174 @@
|
|||
#ifndef RAPIDXML_ITERATORS_HPP_INCLUDED
|
||||
#define RAPIDXML_ITERATORS_HPP_INCLUDED
|
||||
|
||||
// Copyright (C) 2006, 2009 Marcin Kalicinski
|
||||
// Version 1.13
|
||||
// Revision $DateTime: 2009/05/13 01:46:17 $
|
||||
//! \file rapidxml_iterators.hpp This file contains rapidxml iterators
|
||||
|
||||
#include "rapidxml.hpp"
|
||||
|
||||
namespace rapidxml
|
||||
{
|
||||
|
||||
//! Iterator of child nodes of xml_node
|
||||
template<class Ch>
|
||||
class node_iterator
|
||||
{
|
||||
|
||||
public:
|
||||
|
||||
typedef typename xml_node<Ch> value_type;
|
||||
typedef typename xml_node<Ch> &reference;
|
||||
typedef typename xml_node<Ch> *pointer;
|
||||
typedef std::ptrdiff_t difference_type;
|
||||
typedef std::bidirectional_iterator_tag iterator_category;
|
||||
|
||||
node_iterator()
|
||||
: m_node(0)
|
||||
{
|
||||
}
|
||||
|
||||
node_iterator(xml_node<Ch> *node)
|
||||
: m_node(node->first_node())
|
||||
{
|
||||
}
|
||||
|
||||
reference operator *() const
|
||||
{
|
||||
assert(m_node);
|
||||
return *m_node;
|
||||
}
|
||||
|
||||
pointer operator->() const
|
||||
{
|
||||
assert(m_node);
|
||||
return m_node;
|
||||
}
|
||||
|
||||
node_iterator& operator++()
|
||||
{
|
||||
assert(m_node);
|
||||
m_node = m_node->next_sibling();
|
||||
return *this;
|
||||
}
|
||||
|
||||
node_iterator operator++(int)
|
||||
{
|
||||
node_iterator tmp = *this;
|
||||
++this;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
node_iterator& operator--()
|
||||
{
|
||||
assert(m_node && m_node->previous_sibling());
|
||||
m_node = m_node->previous_sibling();
|
||||
return *this;
|
||||
}
|
||||
|
||||
node_iterator operator--(int)
|
||||
{
|
||||
node_iterator tmp = *this;
|
||||
++this;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
bool operator ==(const node_iterator<Ch> &rhs)
|
||||
{
|
||||
return m_node == rhs.m_node;
|
||||
}
|
||||
|
||||
bool operator !=(const node_iterator<Ch> &rhs)
|
||||
{
|
||||
return m_node != rhs.m_node;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
xml_node<Ch> *m_node;
|
||||
|
||||
};
|
||||
|
||||
//! Iterator of child attributes of xml_node
|
||||
template<class Ch>
|
||||
class attribute_iterator
|
||||
{
|
||||
|
||||
public:
|
||||
|
||||
typedef typename xml_attribute<Ch> value_type;
|
||||
typedef typename xml_attribute<Ch> &reference;
|
||||
typedef typename xml_attribute<Ch> *pointer;
|
||||
typedef std::ptrdiff_t difference_type;
|
||||
typedef std::bidirectional_iterator_tag iterator_category;
|
||||
|
||||
attribute_iterator()
|
||||
: m_attribute(0)
|
||||
{
|
||||
}
|
||||
|
||||
attribute_iterator(xml_node<Ch> *node)
|
||||
: m_attribute(node->first_attribute())
|
||||
{
|
||||
}
|
||||
|
||||
reference operator *() const
|
||||
{
|
||||
assert(m_attribute);
|
||||
return *m_attribute;
|
||||
}
|
||||
|
||||
pointer operator->() const
|
||||
{
|
||||
assert(m_attribute);
|
||||
return m_attribute;
|
||||
}
|
||||
|
||||
attribute_iterator& operator++()
|
||||
{
|
||||
assert(m_attribute);
|
||||
m_attribute = m_attribute->next_attribute();
|
||||
return *this;
|
||||
}
|
||||
|
||||
attribute_iterator operator++(int)
|
||||
{
|
||||
attribute_iterator tmp = *this;
|
||||
++this;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
attribute_iterator& operator--()
|
||||
{
|
||||
assert(m_attribute && m_attribute->previous_attribute());
|
||||
m_attribute = m_attribute->previous_attribute();
|
||||
return *this;
|
||||
}
|
||||
|
||||
attribute_iterator operator--(int)
|
||||
{
|
||||
attribute_iterator tmp = *this;
|
||||
++this;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
bool operator ==(const attribute_iterator<Ch> &rhs)
|
||||
{
|
||||
return m_attribute == rhs.m_attribute;
|
||||
}
|
||||
|
||||
bool operator !=(const attribute_iterator<Ch> &rhs)
|
||||
{
|
||||
return m_attribute != rhs.m_attribute;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
xml_attribute<Ch> *m_attribute;
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,425 @@
|
|||
#ifndef RAPIDXML_PRINT_HPP_INCLUDED
|
||||
#define RAPIDXML_PRINT_HPP_INCLUDED
|
||||
|
||||
// Copyright (C) 2006, 2009 Marcin Kalicinski
|
||||
// Version 1.13
|
||||
// Revision $DateTime: 2009/05/13 01:46:17 $
|
||||
//! \file rapidxml_print.hpp This file contains rapidxml printer implementation
|
||||
|
||||
#include "rapidxml.hpp"
|
||||
|
||||
// Only include streams if not disabled
|
||||
#ifndef RAPIDXML_NO_STREAMS
|
||||
#include <ostream>
|
||||
#include <iterator>
|
||||
#endif
|
||||
|
||||
namespace rapidxml
|
||||
{
|
||||
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
// Printing flags
|
||||
|
||||
const int print_no_indenting = 0x1; //!< Printer flag instructing the printer to suppress indenting of XML. See print() function.
|
||||
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
// Internal
|
||||
|
||||
//! \cond internal
|
||||
namespace internal
|
||||
{
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Internal character operations
|
||||
|
||||
// Copy characters from given range to given output iterator
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt copy_chars(const Ch *begin, const Ch *end, OutIt out)
|
||||
{
|
||||
while (begin != end)
|
||||
*out++ = *begin++;
|
||||
return out;
|
||||
}
|
||||
|
||||
// Copy characters from given range to given output iterator and expand
|
||||
// characters into references (< > ' " &)
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt copy_and_expand_chars(const Ch *begin, const Ch *end, Ch noexpand, OutIt out)
|
||||
{
|
||||
while (begin != end)
|
||||
{
|
||||
if (*begin == noexpand)
|
||||
{
|
||||
*out++ = *begin; // No expansion, copy character
|
||||
}
|
||||
else
|
||||
{
|
||||
switch (*begin)
|
||||
{
|
||||
case Ch('<'):
|
||||
*out++ = Ch('&'); *out++ = Ch('l'); *out++ = Ch('t'); *out++ = Ch(';');
|
||||
break;
|
||||
case Ch('>'):
|
||||
*out++ = Ch('&'); *out++ = Ch('g'); *out++ = Ch('t'); *out++ = Ch(';');
|
||||
break;
|
||||
case Ch('\''):
|
||||
*out++ = Ch('&'); *out++ = Ch('a'); *out++ = Ch('p'); *out++ = Ch('o'); *out++ = Ch('s'); *out++ = Ch(';');
|
||||
break;
|
||||
case Ch('"'):
|
||||
*out++ = Ch('&'); *out++ = Ch('q'); *out++ = Ch('u'); *out++ = Ch('o'); *out++ = Ch('t'); *out++ = Ch(';');
|
||||
break;
|
||||
case Ch('&'):
|
||||
*out++ = Ch('&'); *out++ = Ch('a'); *out++ = Ch('m'); *out++ = Ch('p'); *out++ = Ch(';');
|
||||
break;
|
||||
default:
|
||||
*out++ = *begin; // No expansion, copy character
|
||||
}
|
||||
}
|
||||
++begin; // Step to next character
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
// Fill given output iterator with repetitions of the same character
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt fill_chars(OutIt out, int n, Ch ch)
|
||||
{
|
||||
for (int i = 0; i < n; ++i)
|
||||
*out++ = ch;
|
||||
return out;
|
||||
}
|
||||
|
||||
// Find character
|
||||
template<class Ch, Ch ch>
|
||||
inline bool find_char(const Ch *begin, const Ch *end)
|
||||
{
|
||||
while (begin != end)
|
||||
if (*begin++ == ch)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Internal printing operations
|
||||
|
||||
// Print node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_node(OutIt out, const xml_node<Ch> *node, int flags, int indent);
|
||||
|
||||
// Print children of the node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_children(OutIt out, const xml_node<Ch> *node, int flags, int indent)
|
||||
{
|
||||
for (xml_node<Ch> *child = node->first_node(); child; child = child->next_sibling())
|
||||
out = print_node(out, child, flags, indent);
|
||||
return out;
|
||||
}
|
||||
|
||||
// Print attributes of the node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_attributes(OutIt out, const xml_node<Ch> *node, int flags)
|
||||
{
|
||||
for (xml_attribute<Ch> *attribute = node->first_attribute(); attribute; attribute = attribute->next_attribute())
|
||||
{
|
||||
if (attribute->name() && attribute->value())
|
||||
{
|
||||
// Print attribute name
|
||||
*out = Ch(' '), ++out;
|
||||
out = copy_chars(attribute->name(), attribute->name() + attribute->name_size(), out);
|
||||
*out = Ch('='), ++out;
|
||||
// Print attribute value using appropriate quote type
|
||||
if (find_char<Ch, Ch('"')>(attribute->value(), attribute->value() + attribute->value_size()))
|
||||
{
|
||||
*out = Ch('\''), ++out;
|
||||
out = copy_and_expand_chars(attribute->value(), attribute->value() + attribute->value_size(), Ch('"'), out);
|
||||
*out = Ch('\''), ++out;
|
||||
}
|
||||
else
|
||||
{
|
||||
*out = Ch('"'), ++out;
|
||||
out = copy_and_expand_chars(attribute->value(), attribute->value() + attribute->value_size(), Ch('\''), out);
|
||||
*out = Ch('"'), ++out;
|
||||
}
|
||||
}
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
// Print data node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_data_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
|
||||
{
|
||||
assert(node->type() == node_data);
|
||||
if (!(flags & print_no_indenting))
|
||||
out = fill_chars(out, indent, Ch('\t'));
|
||||
out = copy_and_expand_chars(node->value(), node->value() + node->value_size(), Ch(0), out);
|
||||
return out;
|
||||
}
|
||||
|
||||
// Print data node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_cdata_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
|
||||
{
|
||||
assert(node->type() == node_cdata);
|
||||
if (!(flags & print_no_indenting))
|
||||
out = fill_chars(out, indent, Ch('\t'));
|
||||
*out = Ch('<'); ++out;
|
||||
*out = Ch('!'); ++out;
|
||||
*out = Ch('['); ++out;
|
||||
*out = Ch('C'); ++out;
|
||||
*out = Ch('D'); ++out;
|
||||
*out = Ch('A'); ++out;
|
||||
*out = Ch('T'); ++out;
|
||||
*out = Ch('A'); ++out;
|
||||
*out = Ch('['); ++out;
|
||||
out = copy_chars(node->value(), node->value() + node->value_size(), out);
|
||||
*out = Ch(']'); ++out;
|
||||
*out = Ch(']'); ++out;
|
||||
*out = Ch('>'); ++out;
|
||||
return out;
|
||||
}
|
||||
|
||||
// Print element node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_element_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
|
||||
{
|
||||
assert(node->type() == node_element);
|
||||
|
||||
// Print element name and attributes, if any
|
||||
if (!(flags & print_no_indenting))
|
||||
out = fill_chars(out, indent, Ch('\t'));
|
||||
*out = Ch('<'), ++out;
|
||||
out = copy_chars(node->name(), node->name() + node->name_size(), out);
|
||||
out = print_attributes(out, node, flags);
|
||||
|
||||
// If node is childless
|
||||
if (node->value_size() == 0 && !node->first_node())
|
||||
{
|
||||
// Print childless node tag ending
|
||||
*out = Ch('/'), ++out;
|
||||
*out = Ch('>'), ++out;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Print normal node tag ending
|
||||
*out = Ch('>'), ++out;
|
||||
|
||||
// Test if node contains a single data node only (and no other nodes)
|
||||
xml_node<Ch> *child = node->first_node();
|
||||
if (!child)
|
||||
{
|
||||
// If node has no children, only print its value without indenting
|
||||
out = copy_and_expand_chars(node->value(), node->value() + node->value_size(), Ch(0), out);
|
||||
}
|
||||
else if (child->next_sibling() == 0 && child->type() == node_data)
|
||||
{
|
||||
// If node has a sole data child, only print its value without indenting
|
||||
out = copy_and_expand_chars(child->value(), child->value() + child->value_size(), Ch(0), out);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Print all children with full indenting
|
||||
if (!(flags & print_no_indenting))
|
||||
*out = Ch('\n'), ++out;
|
||||
out = print_children(out, node, flags, indent + 1);
|
||||
if (!(flags & print_no_indenting))
|
||||
out = fill_chars(out, indent, Ch('\t'));
|
||||
}
|
||||
|
||||
// Print node end
|
||||
*out = Ch('<'), ++out;
|
||||
*out = Ch('/'), ++out;
|
||||
out = copy_chars(node->name(), node->name() + node->name_size(), out);
|
||||
*out = Ch('>'), ++out;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
// Print declaration node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_declaration_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
|
||||
{
|
||||
// Print declaration start
|
||||
if (!(flags & print_no_indenting))
|
||||
out = fill_chars(out, indent, Ch('\t'));
|
||||
*out = Ch('<'), ++out;
|
||||
*out = Ch('?'), ++out;
|
||||
*out = Ch('x'), ++out;
|
||||
*out = Ch('m'), ++out;
|
||||
*out = Ch('l'), ++out;
|
||||
|
||||
// Print attributes
|
||||
out = print_attributes(out, node, flags);
|
||||
|
||||
// Print declaration end
|
||||
*out = Ch('?'), ++out;
|
||||
*out = Ch('>'), ++out;
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
// Print comment node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_comment_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
|
||||
{
|
||||
assert(node->type() == node_comment);
|
||||
if (!(flags & print_no_indenting))
|
||||
out = fill_chars(out, indent, Ch('\t'));
|
||||
*out = Ch('<'), ++out;
|
||||
*out = Ch('!'), ++out;
|
||||
*out = Ch('-'), ++out;
|
||||
*out = Ch('-'), ++out;
|
||||
out = copy_chars(node->value(), node->value() + node->value_size(), out);
|
||||
*out = Ch('-'), ++out;
|
||||
*out = Ch('-'), ++out;
|
||||
*out = Ch('>'), ++out;
|
||||
return out;
|
||||
}
|
||||
|
||||
// Print doctype node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_doctype_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
|
||||
{
|
||||
assert(node->type() == node_doctype);
|
||||
if (!(flags & print_no_indenting))
|
||||
out = fill_chars(out, indent, Ch('\t'));
|
||||
*out = Ch('<'), ++out;
|
||||
*out = Ch('!'), ++out;
|
||||
*out = Ch('D'), ++out;
|
||||
*out = Ch('O'), ++out;
|
||||
*out = Ch('C'), ++out;
|
||||
*out = Ch('T'), ++out;
|
||||
*out = Ch('Y'), ++out;
|
||||
*out = Ch('P'), ++out;
|
||||
*out = Ch('E'), ++out;
|
||||
*out = Ch(' '), ++out;
|
||||
out = copy_chars(node->value(), node->value() + node->value_size(), out);
|
||||
*out = Ch('>'), ++out;
|
||||
return out;
|
||||
}
|
||||
|
||||
// Print pi node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_pi_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
|
||||
{
|
||||
assert(node->type() == node_pi);
|
||||
if (!(flags & print_no_indenting))
|
||||
out = fill_chars(out, indent, Ch('\t'));
|
||||
*out = Ch('<'), ++out;
|
||||
*out = Ch('?'), ++out;
|
||||
out = copy_chars(node->name(), node->name() + node->name_size(), out);
|
||||
*out = Ch(' '), ++out;
|
||||
out = copy_chars(node->value(), node->value() + node->value_size(), out);
|
||||
*out = Ch('?'), ++out;
|
||||
*out = Ch('>'), ++out;
|
||||
return out;
|
||||
}
|
||||
|
||||
// Print node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
|
||||
{
|
||||
// Print proper node type
|
||||
switch (node->type())
|
||||
{
|
||||
|
||||
// Document
|
||||
case node_document:
|
||||
out = print_children(out, node, flags, indent);
|
||||
break;
|
||||
|
||||
// Element
|
||||
case node_element:
|
||||
out = print_element_node(out, node, flags, indent);
|
||||
break;
|
||||
|
||||
// Data
|
||||
case node_data:
|
||||
out = print_data_node(out, node, flags, indent);
|
||||
break;
|
||||
|
||||
// CDATA
|
||||
case node_cdata:
|
||||
out = print_cdata_node(out, node, flags, indent);
|
||||
break;
|
||||
|
||||
// Declaration
|
||||
case node_declaration:
|
||||
out = print_declaration_node(out, node, flags, indent);
|
||||
break;
|
||||
|
||||
// Comment
|
||||
case node_comment:
|
||||
out = print_comment_node(out, node, flags, indent);
|
||||
break;
|
||||
|
||||
// Doctype
|
||||
case node_doctype:
|
||||
out = print_doctype_node(out, node, flags, indent);
|
||||
break;
|
||||
|
||||
// Pi
|
||||
case node_pi:
|
||||
out = print_pi_node(out, node, flags, indent);
|
||||
break;
|
||||
|
||||
// Unknown
|
||||
default:
|
||||
assert(0);
|
||||
break;
|
||||
}
|
||||
|
||||
// If indenting not disabled, add line break after node
|
||||
if (!(flags & print_no_indenting))
|
||||
*out = Ch('\n'), ++out;
|
||||
|
||||
// Return modified iterator
|
||||
return out;
|
||||
}
|
||||
|
||||
}
|
||||
//! \endcond
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Printing
|
||||
|
||||
//! Prints XML to given output iterator.
|
||||
//! \param out Output iterator to print to.
|
||||
//! \param node Node to be printed. Pass xml_document to print entire document.
|
||||
//! \param flags Flags controlling how XML is printed.
|
||||
//! \return Output iterator pointing to position immediately after last character of printed text.
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print(OutIt out, const xml_node<Ch> &node, int flags = 0)
|
||||
{
|
||||
return internal::print_node(out, &node, flags, 0);
|
||||
}
|
||||
|
||||
#ifndef RAPIDXML_NO_STREAMS
|
||||
|
||||
//! Prints XML to given output stream.
|
||||
//! \param out Output stream to print to.
|
||||
//! \param node Node to be printed. Pass xml_document to print entire document.
|
||||
//! \param flags Flags controlling how XML is printed.
|
||||
//! \return Output stream.
|
||||
template<class Ch>
|
||||
inline std::basic_ostream<Ch> &print(std::basic_ostream<Ch> &out, const xml_node<Ch> &node, int flags = 0)
|
||||
{
|
||||
print(std::ostream_iterator<Ch>(out), node, flags);
|
||||
return out;
|
||||
}
|
||||
|
||||
//! Prints formatted XML to given output stream. Uses default printing flags. Use print() function to customize printing process.
|
||||
//! \param out Output stream to print to.
|
||||
//! \param node Node to be printed.
|
||||
//! \return Output stream.
|
||||
template<class Ch>
|
||||
inline std::basic_ostream<Ch> &operator <<(std::basic_ostream<Ch> &out, const xml_node<Ch> &node)
|
||||
{
|
||||
return print(out, node);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,122 @@
|
|||
#ifndef RAPIDXML_UTILS_HPP_INCLUDED
|
||||
#define RAPIDXML_UTILS_HPP_INCLUDED
|
||||
|
||||
// Copyright (C) 2006, 2009 Marcin Kalicinski
|
||||
// Version 1.13
|
||||
// Revision $DateTime: 2009/05/13 01:46:17 $
|
||||
//! \file rapidxml_utils.hpp This file contains high-level rapidxml utilities that can be useful
|
||||
//! in certain simple scenarios. They should probably not be used if maximizing performance is the main objective.
|
||||
|
||||
#include "rapidxml.hpp"
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <fstream>
|
||||
#include <stdexcept>
|
||||
|
||||
namespace rapidxml
|
||||
{
|
||||
|
||||
//! Represents data loaded from a file
|
||||
template<class Ch = char>
|
||||
class file
|
||||
{
|
||||
|
||||
public:
|
||||
|
||||
//! Loads file into the memory. Data will be automatically destroyed by the destructor.
|
||||
//! \param filename Filename to load.
|
||||
file(const char *filename)
|
||||
{
|
||||
using namespace std;
|
||||
|
||||
// Open stream
|
||||
basic_ifstream<Ch> stream(filename, ios::binary);
|
||||
if (!stream)
|
||||
throw runtime_error(string("cannot open file ") + filename);
|
||||
stream.unsetf(ios::skipws);
|
||||
|
||||
// Determine stream size
|
||||
stream.seekg(0, ios::end);
|
||||
size_t size = stream.tellg();
|
||||
stream.seekg(0);
|
||||
|
||||
// Load data and add terminating 0
|
||||
m_data.resize(size + 1);
|
||||
stream.read(&m_data.front(), static_cast<streamsize>(size));
|
||||
m_data[size] = 0;
|
||||
}
|
||||
|
||||
//! Loads file into the memory. Data will be automatically destroyed by the destructor
|
||||
//! \param stream Stream to load from
|
||||
file(std::basic_istream<Ch> &stream)
|
||||
{
|
||||
using namespace std;
|
||||
|
||||
// Load data and add terminating 0
|
||||
stream.unsetf(ios::skipws);
|
||||
m_data.assign(istreambuf_iterator<Ch>(stream), istreambuf_iterator<Ch>());
|
||||
if (stream.fail() || stream.bad())
|
||||
throw runtime_error("error reading stream");
|
||||
m_data.push_back(0);
|
||||
}
|
||||
|
||||
//! Gets file data.
|
||||
//! \return Pointer to data of file.
|
||||
Ch *data()
|
||||
{
|
||||
return &m_data.front();
|
||||
}
|
||||
|
||||
//! Gets file data.
|
||||
//! \return Pointer to data of file.
|
||||
const Ch *data() const
|
||||
{
|
||||
return &m_data.front();
|
||||
}
|
||||
|
||||
//! Gets file data size.
|
||||
//! \return Size of file data, in characters.
|
||||
std::size_t size() const
|
||||
{
|
||||
return m_data.size();
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
std::vector<Ch> m_data; // File data
|
||||
|
||||
};
|
||||
|
||||
//! Counts children of node. Time complexity is O(n).
|
||||
//! \return Number of children of node
|
||||
template<class Ch>
|
||||
inline std::size_t count_children(xml_node<Ch> *node)
|
||||
{
|
||||
xml_node<Ch> *child = node->first_node();
|
||||
std::size_t count = 0;
|
||||
while (child)
|
||||
{
|
||||
++count;
|
||||
child = child->next_sibling();
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
//! Counts attributes of node. Time complexity is O(n).
|
||||
//! \return Number of attributes of node
|
||||
template<class Ch>
|
||||
inline std::size_t count_attributes(xml_node<Ch> *node)
|
||||
{
|
||||
xml_attribute<Ch> *attr = node->first_attribute();
|
||||
std::size_t count = 0;
|
||||
while (attr)
|
||||
{
|
||||
++count;
|
||||
attr = attr->next_attribute();
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
|
@ -786,6 +786,9 @@ public:
|
|||
((Sim2Listener*)peerp->listener.getPtr())->incomingConnection( 0.5*g_random->random01(), Reference<IConnection>(peerc) );
|
||||
return onConnect( ::delay(0.5*g_random->random01()), myc );
|
||||
}
|
||||
virtual Future<std::vector<NetworkAddress>> resolveTCPEndpoint( std::string host, std::string service) {
|
||||
throw lookup_failed();
|
||||
}
|
||||
ACTOR static Future<Reference<IConnection>> onConnect( Future<Void> ready, Reference<Sim2Conn> conn ) {
|
||||
Void _ = wait(ready);
|
||||
if (conn->isPeerGone() && g_random->random01()<0.5) {
|
||||
|
|
|
@ -0,0 +1,273 @@
|
|||
#ifndef XML2JSON_HPP_INCLUDED
|
||||
#define XML2JSON_HPP_INCLUDED
|
||||
|
||||
// Copyright (C) 2015 Alan Zhuang (Cheedoong) HKUST. [Updated to the latest version of rapidjson]
|
||||
// Copyright (C) 2013 Alan Zhuang (Cheedoong) Tencent, Inc.
|
||||
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <cctype>
|
||||
|
||||
#include "rapidxml/rapidxml.hpp"
|
||||
#include "rapidxml/rapidxml_utils.hpp"
|
||||
#include "rapidxml/rapidxml_print.hpp"
|
||||
|
||||
#include "rapidjson/document.h"
|
||||
#include "rapidjson/prettywriter.h"
|
||||
#include "rapidjson/encodedstream.h"
|
||||
#include "rapidjson/stringbuffer.h"
|
||||
#include "rapidjson/reader.h"
|
||||
#include "rapidjson/writer.h"
|
||||
#include "rapidjson/filereadstream.h"
|
||||
#include "rapidjson/filewritestream.h"
|
||||
#include "rapidjson/error/en.h"
|
||||
|
||||
/* [Start] This part is configurable */
|
||||
static const char xml2json_text_additional_name[] = "#text";
|
||||
static const char xml2json_attribute_name_prefix[] = "@";
|
||||
/* Example:
|
||||
<node_name attribute_name="attribute_value">value</node_name> ---> "node_name":{"#text":"value","@attribute_name":"attribute_value"}
|
||||
*/
|
||||
static const bool xml2json_numeric_support = false;
|
||||
/* Example:
|
||||
xml2json_numeric_support = false:
|
||||
<number>26.026</number> ---> "number":"26.026"
|
||||
xml2json_numeric_support = true:
|
||||
<number>26.026</number> ---> "number":26.026
|
||||
*/
|
||||
/* [End] This part is configurable */
|
||||
|
||||
// Avoided any namespace pollution.
|
||||
static bool xml2json_has_digits_only(const char * input, bool *hasDecimal)
|
||||
{
|
||||
if (input == nullptr)
|
||||
return false; // treat empty input as a string (probably will be an empty string)
|
||||
|
||||
const char * runPtr = input;
|
||||
|
||||
*hasDecimal = false;
|
||||
|
||||
while (*runPtr != '\0')
|
||||
{
|
||||
if (*runPtr == '.')
|
||||
{
|
||||
if (!(*hasDecimal))
|
||||
*hasDecimal = true;
|
||||
else
|
||||
return false; // we found two dots - not a number
|
||||
}
|
||||
else if (isalpha(*runPtr))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
runPtr++;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void xml2json_to_array_form(const char *name, rapidjson::Value &jsvalue, rapidjson::Value &jsvalue_chd, rapidjson::Document::AllocatorType& allocator)
|
||||
{
|
||||
rapidjson::Value jsvalue_target; // target to do some operation
|
||||
rapidjson::Value jn; // this is a must, partially because of the latest version of rapidjson
|
||||
jn.SetString(name, allocator);
|
||||
jsvalue_target = jsvalue.FindMember(name)->value;
|
||||
if(jsvalue_target.IsArray())
|
||||
{
|
||||
jsvalue_target.PushBack(jsvalue_chd, allocator);
|
||||
jsvalue.RemoveMember(name);
|
||||
jsvalue.AddMember(jn, jsvalue_target, allocator);
|
||||
}
|
||||
else
|
||||
{
|
||||
rapidjson::Value jsvalue_array;
|
||||
//jsvalue_array = jsvalue_target;
|
||||
jsvalue_array.SetArray();
|
||||
jsvalue_array.PushBack(jsvalue_target, allocator);
|
||||
jsvalue_array.PushBack(jsvalue_chd, allocator);
|
||||
jsvalue.RemoveMember(name);
|
||||
jsvalue.AddMember(jn, jsvalue_array, allocator);
|
||||
}
|
||||
}
|
||||
|
||||
void xml2json_add_attributes(rapidxml::xml_node<> *xmlnode, rapidjson::Value &jsvalue, rapidjson::Document::AllocatorType& allocator)
|
||||
{
|
||||
rapidxml::xml_attribute<> *myattr;
|
||||
for(myattr = xmlnode->first_attribute(); myattr; myattr = myattr->next_attribute())
|
||||
{
|
||||
rapidjson::Value jn, jv;
|
||||
jn.SetString((std::string(xml2json_attribute_name_prefix) + myattr->name()).c_str(), allocator);
|
||||
|
||||
if (xml2json_numeric_support == false)
|
||||
{
|
||||
jv.SetString(myattr->value(), allocator);
|
||||
}
|
||||
else
|
||||
{
|
||||
bool hasDecimal;
|
||||
if (xml2json_has_digits_only(myattr->value(), &hasDecimal) == false)
|
||||
{
|
||||
jv.SetString(myattr->value(), allocator);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (hasDecimal)
|
||||
{
|
||||
double value = std::strtod(myattr->value(),nullptr);
|
||||
jv.SetDouble(value);
|
||||
}
|
||||
else
|
||||
{
|
||||
long int value = std::strtol(myattr->value(), nullptr, 0);
|
||||
jv.SetInt(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
jsvalue.AddMember(jn, jv, allocator);
|
||||
}
|
||||
}
|
||||
|
||||
void xml2json_traverse_node(rapidxml::xml_node<> *xmlnode, rapidjson::Value &jsvalue, rapidjson::Document::AllocatorType& allocator)
|
||||
{
|
||||
//cout << "this: " << xmlnode->type() << " name: " << xmlnode->name() << " value: " << xmlnode->value() << endl;
|
||||
rapidjson::Value jsvalue_chd;
|
||||
|
||||
jsvalue.SetObject();
|
||||
jsvalue_chd.SetObject();
|
||||
rapidxml::xml_node<> *xmlnode_chd;
|
||||
|
||||
// classified discussion:
|
||||
if((xmlnode->type() == rapidxml::node_data || xmlnode->type() == rapidxml::node_cdata) && xmlnode->value())
|
||||
{
|
||||
// case: pure_text
|
||||
jsvalue.SetString(xmlnode->value(), allocator); // then addmember("#text" , jsvalue, allocator)
|
||||
}
|
||||
else if(xmlnode->type() == rapidxml::node_element)
|
||||
{
|
||||
if(xmlnode->first_attribute())
|
||||
{
|
||||
if(xmlnode->first_node() && xmlnode->first_node()->type() == rapidxml::node_data && count_children(xmlnode) == 1)
|
||||
{
|
||||
// case: <e attr="xxx">text</e>
|
||||
rapidjson::Value jn, jv;
|
||||
jn.SetString(xml2json_text_additional_name, allocator);
|
||||
jv.SetString(xmlnode->first_node()->value(), allocator);
|
||||
jsvalue.AddMember(jn, jv, allocator);
|
||||
xml2json_add_attributes(xmlnode, jsvalue, allocator);
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
// case: <e attr="xxx">...</e>
|
||||
xml2json_add_attributes(xmlnode, jsvalue, allocator);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if(!xmlnode->first_node())
|
||||
{
|
||||
// case: <e />
|
||||
jsvalue.SetNull();
|
||||
return;
|
||||
}
|
||||
else if(xmlnode->first_node()->type() == rapidxml::node_data && count_children(xmlnode) == 1)
|
||||
{
|
||||
// case: <e>text</e>
|
||||
if (xml2json_numeric_support == false)
|
||||
{
|
||||
jsvalue.SetString(rapidjson::StringRef(xmlnode->first_node()->value()), allocator);
|
||||
}
|
||||
else
|
||||
{
|
||||
bool hasDecimal;
|
||||
if (xml2json_has_digits_only(xmlnode->first_node()->value(), &hasDecimal) == false)
|
||||
{
|
||||
jsvalue.SetString(rapidjson::StringRef(xmlnode->first_node()->value()), allocator);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (hasDecimal)
|
||||
{
|
||||
double value = std::strtod(xmlnode->first_node()->value(), nullptr);
|
||||
jsvalue.SetDouble(value);
|
||||
}
|
||||
else
|
||||
{
|
||||
long int value = std::strtol(xmlnode->first_node()->value(), nullptr, 0);
|
||||
jsvalue.SetInt(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
if(xmlnode->first_node())
|
||||
{
|
||||
// case: complex else...
|
||||
std::map<std::string, int> name_count;
|
||||
for(xmlnode_chd = xmlnode->first_node(); xmlnode_chd; xmlnode_chd = xmlnode_chd->next_sibling())
|
||||
{
|
||||
std::string current_name;
|
||||
const char *name_ptr = NULL;
|
||||
rapidjson::Value jn, jv;
|
||||
if(xmlnode_chd->type() == rapidxml::node_data || xmlnode_chd->type() == rapidxml::node_cdata)
|
||||
{
|
||||
current_name = xml2json_text_additional_name;
|
||||
name_count[current_name]++;
|
||||
jv.SetString(xml2json_text_additional_name, allocator);
|
||||
name_ptr = jv.GetString();
|
||||
}
|
||||
else if(xmlnode_chd->type() == rapidxml::node_element)
|
||||
{
|
||||
current_name = xmlnode_chd->name();
|
||||
name_count[current_name]++;
|
||||
name_ptr = xmlnode_chd->name();
|
||||
}
|
||||
xml2json_traverse_node(xmlnode_chd, jsvalue_chd, allocator);
|
||||
if(name_count[current_name] > 1 && name_ptr)
|
||||
xml2json_to_array_form(name_ptr, jsvalue, jsvalue_chd, allocator);
|
||||
else
|
||||
{
|
||||
jn.SetString(name_ptr, allocator);
|
||||
jsvalue.AddMember(jn, jsvalue_chd, allocator);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cerr << "err data!!" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
std::string xml2json(const char *xml_str)
|
||||
{
|
||||
//file<> fdoc("track_orig.xml"); // could serve another use case
|
||||
rapidxml::xml_document<> *xml_doc = new rapidxml::xml_document<>();
|
||||
xml_doc->parse<0> (const_cast<char *>(xml_str));
|
||||
|
||||
rapidjson::Document js_doc;
|
||||
js_doc.SetObject();
|
||||
rapidjson::Document::AllocatorType& allocator = js_doc.GetAllocator();
|
||||
|
||||
rapidxml::xml_node<> *xmlnode_chd;
|
||||
|
||||
for(xmlnode_chd = xml_doc->first_node(); xmlnode_chd; xmlnode_chd = xmlnode_chd->next_sibling())
|
||||
{
|
||||
rapidjson::Value jsvalue_chd;
|
||||
jsvalue_chd.SetObject();
|
||||
//rapidjson::Value jsvalue_name(xmlnode_chd->name(), allocator);
|
||||
//js_doc.AddMember(jsvalue_name, jsvalue_chd, allocator);
|
||||
xml2json_traverse_node(xmlnode_chd, jsvalue_chd, allocator);
|
||||
js_doc.AddMember(rapidjson::StringRef(xmlnode_chd->name()), jsvalue_chd, allocator);
|
||||
}
|
||||
|
||||
rapidjson::StringBuffer buffer;
|
||||
rapidjson::Writer<rapidjson::StringBuffer> writer(buffer);
|
||||
js_doc.Accept(writer);
|
||||
delete xml_doc;
|
||||
return buffer.GetString();
|
||||
}
|
||||
|
||||
#endif
|
|
@ -67,7 +67,7 @@ struct AtomicRestoreWorkload : TestWorkload {
|
|||
|
||||
state std::string backupContainer = "file://simfdb/backups/";
|
||||
try {
|
||||
Void _ = wait(backupAgent.submitBackup(cx, StringRef(backupContainer), BackupAgentBase::getDefaultTagName(), self->backupRanges, false));
|
||||
Void _ = wait(backupAgent.submitBackup(cx, StringRef(backupContainer), g_random->randomInt(0, 100), BackupAgentBase::getDefaultTagName(), self->backupRanges, false));
|
||||
}
|
||||
catch (Error& e) {
|
||||
if (e.code() != error_code_backup_unneeded && e.code() != error_code_backup_duplicate)
|
||||
|
|
|
@ -138,7 +138,7 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
|
|||
|
||||
state std::string backupContainer = "file://simfdb/backups/";
|
||||
try {
|
||||
Void _ = wait(backupAgent->submitBackup(cx, StringRef(backupContainer), tag.toString(), backupRanges, stopDifferentialDelay ? false : true));
|
||||
Void _ = wait(backupAgent->submitBackup(cx, StringRef(backupContainer), g_random->randomInt(0, 100), tag.toString(), backupRanges, stopDifferentialDelay ? false : true));
|
||||
}
|
||||
catch (Error& e) {
|
||||
TraceEvent("BARW_doBackupSubmitBackupException", randomID).detail("tag", printable(tag)).error(e);
|
||||
|
@ -162,36 +162,42 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
|
|||
state int resultWait = wait(backupAgent->waitBackup(cx, backupTag.tagName, false));
|
||||
UidAndAbortedFlagT uidFlag = wait(backupTag.getOrThrow(cx));
|
||||
state UID logUid = uidFlag.first;
|
||||
state std::string lastBackupContainer = wait(BackupConfig(logUid).backupContainer().getOrThrow(cx, false, backup_unneeded()));
|
||||
state Reference<IBackupContainer> lastBackupContainer = wait(BackupConfig(logUid).backupContainer().getD(cx));
|
||||
|
||||
state std::string restorableFile = joinPath(lastBackupContainer, "restorable");
|
||||
TraceEvent("BARW_lastBackupContainer", randomID).detail("backupTag", printable(tag)).detail("lastBackupContainer", lastBackupContainer)
|
||||
.detail("logUid", logUid).detail("waitStatus", resultWait).detail("restorable", restorableFile);
|
||||
state bool restorable = false;
|
||||
if(lastBackupContainer) {
|
||||
BackupDescription desc = wait(lastBackupContainer->describeBackup());
|
||||
restorable = desc.maxRestorableVersion.present();
|
||||
}
|
||||
|
||||
TraceEvent("BARW_lastBackupContainer", randomID)
|
||||
.detail("backupTag", printable(tag))
|
||||
.detail("lastBackupContainer", lastBackupContainer ? lastBackupContainer->getURL() : "")
|
||||
.detail("logUid", logUid).detail("waitStatus", resultWait).detail("restorable", restorable);
|
||||
|
||||
// Do not check the backup, if aborted
|
||||
if (resultWait == BackupAgentBase::STATE_ABORTED) {
|
||||
}
|
||||
|
||||
// Ensure that a backup container was found
|
||||
else if (lastBackupContainer.empty()) {
|
||||
else if (!lastBackupContainer) {
|
||||
TraceEvent("BARW_missingBackupContainer", randomID).detail("logUid", logUid).detail("backupTag", printable(tag)).detail("waitStatus", resultWait);
|
||||
printf("BackupCorrectnessMissingBackupContainer tag: %s status: %d\n", printable(tag).c_str(), resultWait);
|
||||
}
|
||||
|
||||
// Ensure that the restorable file is present
|
||||
// Check that backup is restorable
|
||||
else {
|
||||
bool rfExists = wait(IBackupContainer::openContainer(lastBackupContainer)->fileExists(restorableFile));
|
||||
if(!rfExists) {
|
||||
TraceEvent("BARW_missingBackupRestoreFile", randomID).detail("logUid", logUid).detail("backupTag", printable(tag))
|
||||
.detail("backupFolder", lastBackupContainer).detail("restorable", restorableFile).detail("waitStatus", resultWait);
|
||||
printf("BackupCorrectnessMissingRestorable: %s tag: %s\n", restorableFile.c_str(), printable(tag).c_str());
|
||||
if(!restorable) {
|
||||
TraceEvent("BARW_notRestorable", randomID).detail("logUid", logUid).detail("backupTag", printable(tag))
|
||||
.detail("backupFolder", lastBackupContainer->getURL()).detail("waitStatus", resultWait);
|
||||
printf("BackupCorrectnessNotRestorable: tag: %s\n", printable(tag).c_str());
|
||||
}
|
||||
}
|
||||
|
||||
// Abort the backup, if not the first backup because the second backup may have aborted the backup by now
|
||||
if (startDelay) {
|
||||
TraceEvent("BARW_doBackupAbortBackup2", randomID).detail("tag", printable(tag))
|
||||
.detail("waitStatus", resultWait).detail("lastBackupContainer", lastBackupContainer).detail("restorable", restorableFile);
|
||||
.detail("waitStatus", resultWait)
|
||||
.detail("lastBackupContainer", lastBackupContainer ? lastBackupContainer->getURL() : "")
|
||||
.detail("restorable", restorable);
|
||||
Void _ = wait(backupAgent->abortBackup(cx, tag.toString()));
|
||||
}
|
||||
else {
|
||||
|
@ -309,13 +315,13 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
|
|||
state KeyBackedTag keyBackedTag = makeBackupTag(self->backupTag.toString());
|
||||
UidAndAbortedFlagT uidFlag = wait(keyBackedTag.getOrThrow(cx));
|
||||
state UID logUid = uidFlag.first;
|
||||
state std::string lastBackupContainer = wait(BackupConfig(logUid).backupContainer().getOrThrow(cx));
|
||||
state Reference<IBackupContainer> lastBackupContainer = wait(BackupConfig(logUid).backupContainer().getD(cx));
|
||||
|
||||
// Occasionally start yet another backup that might still be running when we restore
|
||||
if (!self->locked && BUGGIFY) {
|
||||
TraceEvent("BARW_submitBackup2", randomID).detail("tag", printable(self->backupTag));
|
||||
try {
|
||||
extraBackup = backupAgent.submitBackup(cx, LiteralStringRef("file://simfdb/backups/"), self->backupTag.toString(), self->backupRanges, true);
|
||||
extraBackup = backupAgent.submitBackup(cx, LiteralStringRef("file://simfdb/backups/"), g_random->randomInt(0, 100), self->backupTag.toString(), self->backupRanges, true);
|
||||
}
|
||||
catch (Error& e) {
|
||||
TraceEvent("BARW_submitBackup2Exception", randomID).detail("backupTag", printable(self->backupTag)).error(e);
|
||||
|
@ -327,9 +333,9 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
|
|||
TEST(!startRestore.isReady()); //Restore starts at specified time
|
||||
Void _ = wait(startRestore);
|
||||
|
||||
if ((lastBackupContainer.size()) && (self->performRestore)) {
|
||||
if (lastBackupContainer && self->performRestore) {
|
||||
if (g_random->random01() < 0.5) {
|
||||
Void _ = wait(attemptDirtyRestore(self, cx, &backupAgent, StringRef(lastBackupContainer), randomID));
|
||||
Void _ = wait(attemptDirtyRestore(self, cx, &backupAgent, StringRef(lastBackupContainer->getURL()), randomID));
|
||||
}
|
||||
Void _ = wait(runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) -> Future<Void> {
|
||||
for (auto &kvrange : self->backupRanges)
|
||||
|
@ -338,7 +344,7 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
|
|||
}));
|
||||
|
||||
// restore database
|
||||
TraceEvent("BARW_restore", randomID).detail("lastBackupContainer", lastBackupContainer).detail("restoreAfter", self->restoreAfter).detail("backupTag", printable(self->backupTag));
|
||||
TraceEvent("BARW_restore", randomID).detail("lastBackupContainer", lastBackupContainer->getURL()).detail("restoreAfter", self->restoreAfter).detail("backupTag", printable(self->backupTag));
|
||||
|
||||
state std::vector<Future<Version>> restores;
|
||||
state std::vector<Standalone<StringRef>> restoreTags;
|
||||
|
@ -348,7 +354,7 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
|
|||
auto range = self->backupRanges[restoreIndex];
|
||||
Standalone<StringRef> restoreTag(self->backupTag.toString() + "_" + std::to_string(restoreIndex));
|
||||
restoreTags.push_back(restoreTag);
|
||||
restores.push_back(backupAgent.restore(cx, restoreTag, KeyRef(lastBackupContainer), true, -1, true, range, Key(), Key(), self->locked));
|
||||
restores.push_back(backupAgent.restore(cx, restoreTag, KeyRef(lastBackupContainer->getURL()), true, -1, true, range, Key(), Key(), self->locked));
|
||||
}
|
||||
|
||||
// Sometimes kill and restart the restore
|
||||
|
@ -363,7 +369,7 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
|
|||
tr->clear(self->backupRanges[restoreIndex]);
|
||||
return Void();
|
||||
}));
|
||||
restores[restoreIndex] = backupAgent.restore(cx, restoreTags[restoreIndex], KeyRef(lastBackupContainer), true, -1, true, self->backupRanges[restoreIndex], Key(), Key(), self->locked);
|
||||
restores[restoreIndex] = backupAgent.restore(cx, restoreTags[restoreIndex], KeyRef(lastBackupContainer->getURL()), true, -1, true, self->backupRanges[restoreIndex], Key(), Key(), self->locked);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -137,28 +137,28 @@ struct CycleWorkload : TestWorkload {
|
|||
}
|
||||
bool cycleCheckData( const VectorRef<KeyValueRef>& data, Version v ) {
|
||||
if (data.size() != nodeCount) {
|
||||
TraceEvent(SevError, "TestFailure").detail("Reason", "Node count changed").detail("Before", nodeCount).detail("After", data.size()).detail("Version", v);
|
||||
TraceEvent(SevError, "TestFailure").detail("Reason", "Node count changed").detail("Before", nodeCount).detail("After", data.size()).detail("Version", v).detail("KeyPrefix", keyPrefix.printable());
|
||||
return false;
|
||||
}
|
||||
int i=0;
|
||||
for(int c=0; c<nodeCount; c++) {
|
||||
if (c && !i) {
|
||||
TraceEvent(SevError, "TestFailure").detail("Reason", "Cycle got shorter");
|
||||
TraceEvent(SevError, "TestFailure").detail("Reason", "Cycle got shorter").detail("Before", nodeCount).detail("After", c).detail("KeyPrefix", keyPrefix.printable());
|
||||
return false;
|
||||
}
|
||||
if (data[i].key != key(i)) {
|
||||
TraceEvent(SevError, "TestFailure").detail("Reason", "Key changed");
|
||||
TraceEvent(SevError, "TestFailure").detail("Reason", "Key changed").detail("KeyPrefix", keyPrefix.printable());
|
||||
return false;
|
||||
}
|
||||
double d = testKeyToDouble(data[i].value, keyPrefix);
|
||||
i = (int)d;
|
||||
if ( i != d || i<0 || i>=nodeCount) {
|
||||
TraceEvent(SevError, "TestFailure").detail("Reason", "Invalid value");
|
||||
TraceEvent(SevError, "TestFailure").detail("Reason", "Invalid value").detail("KeyPrefix", keyPrefix.printable());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (i != 0) {
|
||||
TraceEvent(SevError, "TestFailure").detail("Reason", "Cycle got longer");
|
||||
TraceEvent(SevError, "TestFailure").detail("Reason", "Cycle got longer").detail("KeyPrefix", keyPrefix.printable());
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -36,7 +36,7 @@ struct DiskDurabilityWorkload : public AsyncFileWorkload
|
|||
Reference<FlowLock> lock;
|
||||
|
||||
ACTOR static Future<Void> test_impl(FileBlock *self, Reference<AsyncFileHandle> file, int pages, Reference<AsyncFileBuffer> buffer) {
|
||||
Void _ = wait(self->lock->take(1));
|
||||
Void _ = wait(self->lock->take());
|
||||
|
||||
state int64_t offset = (int64_t)self->blockNum * pages * _PAGE_SIZE;
|
||||
state int size = pages * _PAGE_SIZE;
|
||||
|
|
|
@ -123,6 +123,7 @@ public:
|
|||
|
||||
// INetworkConnections interface
|
||||
virtual Future<Reference<IConnection>> connect( NetworkAddress toAddr );
|
||||
virtual Future<std::vector<NetworkAddress>> resolveTCPEndpoint( std::string host, std::string service);
|
||||
virtual Reference<IListener> listen( NetworkAddress localAddr );
|
||||
|
||||
// INetwork interface
|
||||
|
@ -157,6 +158,7 @@ public:
|
|||
|
||||
ASIOReactor reactor;
|
||||
INetworkConnections *network; // initially this, but can be changed
|
||||
tcp::resolver tcpResolver;
|
||||
|
||||
int64_t tsc_begin, tsc_end;
|
||||
double taskBegin;
|
||||
|
@ -475,6 +477,7 @@ Net2::Net2(NetworkAddress localAddress, bool useThreadPool, bool useMetrics)
|
|||
: useThreadPool(useThreadPool),
|
||||
network(this),
|
||||
reactor(this),
|
||||
tcpResolver(reactor.ios),
|
||||
stopped(false),
|
||||
tasksIssued(0),
|
||||
// Until run() is called, yield() will always yield
|
||||
|
@ -828,6 +831,34 @@ Future< Reference<IConnection> > Net2::connect( NetworkAddress toAddr ) {
|
|||
return Connection::connect(&this->reactor.ios, toAddr);
|
||||
}
|
||||
|
||||
ACTOR static Future<std::vector<NetworkAddress>> resolveTCPEndpoint_impl( Net2 *self, std::string host, std::string service) {
|
||||
state Promise<std::vector<NetworkAddress>> result;
|
||||
|
||||
self->tcpResolver.async_resolve(tcp::resolver::query(host, service), [=](const boost::system::error_code &ec, tcp::resolver::iterator iter) {
|
||||
if(ec)
|
||||
result.sendError(lookup_failed());
|
||||
std::vector<NetworkAddress> addrs;
|
||||
|
||||
tcp::resolver::iterator end;
|
||||
while(iter != end) {
|
||||
// The easiest way to get an ip:port formatted endpoint with this interface is with a string stream because
|
||||
// endpoint::to_string doesn't exist but operator<< does.
|
||||
std::stringstream s;
|
||||
s << iter->endpoint();
|
||||
addrs.push_back(NetworkAddress::parse(s.str()));
|
||||
++iter;
|
||||
}
|
||||
result.send(addrs);
|
||||
});
|
||||
|
||||
std::vector<NetworkAddress> addresses = wait(result.getFuture());
|
||||
return addresses;
|
||||
}
|
||||
|
||||
Future<std::vector<NetworkAddress>> Net2::resolveTCPEndpoint( std::string host, std::string service) {
|
||||
return resolveTCPEndpoint_impl(this, host, service);
|
||||
}
|
||||
|
||||
bool Net2::isAddressOnThisHost( NetworkAddress const& addr ) {
|
||||
auto it = addressOnHostCache.find( addr.ip );
|
||||
if (it != addressOnHostCache.end())
|
||||
|
|
|
@ -1921,6 +1921,21 @@ std::vector<std::string> listFiles( std::string const& directory, std::string co
|
|||
std::vector<std::string> listDirectories( std::string const& directory ) {
|
||||
return findFiles( directory, "", &acceptDirectory );
|
||||
}
|
||||
|
||||
void findFilesRecursively(std::string path, std::vector<std::string> &out) {
|
||||
// Add files to output, prefixing path
|
||||
std::vector<std::string> files = platform::listFiles(path);
|
||||
for(auto const &f : files)
|
||||
out.push_back(joinPath(path, f));
|
||||
|
||||
// Recurse for directories
|
||||
std::vector<std::string> directories = platform::listDirectories(path);
|
||||
for(auto const &dir : directories) {
|
||||
if(dir != "." && dir != "..")
|
||||
findFilesRecursively(joinPath(path, dir), out);
|
||||
}
|
||||
};
|
||||
|
||||
}; // namespace platform
|
||||
|
||||
|
||||
|
|
|
@ -330,6 +330,8 @@ std::vector<std::string> listFiles( std::string const& directory, std::string co
|
|||
// returns directory names relative to directory
|
||||
std::vector<std::string> listDirectories( std::string const& directory );
|
||||
|
||||
void findFilesRecursively(std::string path, std::vector<std::string> &out);
|
||||
|
||||
// Tag the given file as "temporary", i.e. not really needing commits to disk
|
||||
void makeTemporary( const char* filename );
|
||||
|
||||
|
|
|
@ -61,6 +61,7 @@ ERROR( process_behind, 1037, "Storage process does not have recent mutations" )
|
|||
ERROR( database_locked, 1038, "Database is locked" )
|
||||
ERROR( cluster_version_changed, 1039, "Cluster has been upgraded to a new protocol version" )
|
||||
ERROR( external_client_already_loaded, 1040, "External client has already been loaded" )
|
||||
ERROR( lookup_failed, 1041, "DNS lookup failed" )
|
||||
|
||||
ERROR( broken_promise, 1100, "Broken promise" )
|
||||
ERROR( operation_cancelled, 1101, "Asynchronous operation cancelled" )
|
||||
|
@ -168,18 +169,21 @@ ERROR( restore_error, 2301, "Restore error")
|
|||
ERROR( backup_duplicate, 2311, "Backup duplicate request")
|
||||
ERROR( backup_unneeded, 2312, "Backup unneeded request")
|
||||
ERROR( backup_bad_block_size, 2313, "Backup file block size too small")
|
||||
ERROR( backup_invalid_url, 2314, "Backup Container URL invalid")
|
||||
ERROR( backup_invalid_info, 2315, "Backup Container URL invalid")
|
||||
ERROR( restore_invalid_version, 2361, "Invalid restore version")
|
||||
ERROR( restore_corrupted_data, 2362, "Corrupted backup data")
|
||||
ERROR( restore_missing_data, 2363, "Missing backup data")
|
||||
ERROR( restore_duplicate_tag, 2364, "Restore duplicate request")
|
||||
ERROR( restore_unknown_tag, 2365, "Restore tag does not exist")
|
||||
ERROR( restore_unknown_file_type, 2366, "Unknown backup file type")
|
||||
ERROR( restore_unknown_file_type, 2366, "Unknown backup/restore file type")
|
||||
ERROR( restore_unsupported_file_version, 2367, "Unsupported backup file version")
|
||||
ERROR( restore_bad_read, 2368, "Unexpected number of bytes read")
|
||||
ERROR( restore_corrupted_data_padding, 2369, "Backup file has unexpected padding bytes")
|
||||
ERROR( restore_destination_not_empty, 2370, "Attempted to restore into a non-empty destination database")
|
||||
ERROR( restore_duplicate_uid, 2371, "Attempted to restore using a UID that had been used for an aborted restore")
|
||||
ERROR( task_invalid_version, 2381, "Invalid task version")
|
||||
ERROR( task_interrupted, 2382, "Task execution stopped due to timeout, abort, or completion by another worker")
|
||||
|
||||
ERROR( key_not_found, 2400, "Expected key is missing")
|
||||
|
||||
|
|
|
@ -279,6 +279,11 @@ Future<Void> holdWhileVoid(X object, Future<T> what)
|
|||
return Void();
|
||||
}
|
||||
|
||||
template<class T>
|
||||
Future<Void> store(Future<T> what, T &out) {
|
||||
return map(what, [&out](T const &v) { out = v; return Void(); });
|
||||
}
|
||||
|
||||
//Waits for a future to be ready, and then applies an asynchronous function to it.
|
||||
ACTOR template<class T, class F, class U = decltype( fake<F>()(fake<T>()).getValue() )>
|
||||
Future<U> mapAsync(Future<T> what, F actorFunc)
|
||||
|
@ -1189,7 +1194,7 @@ private:
|
|||
}
|
||||
|
||||
ACTOR static Future<Void> takeMoreActor(FlowLock* lock, int* amount) {
|
||||
Void _ = wait(lock->take(1));
|
||||
Void _ = wait(lock->take());
|
||||
int extra = std::min( lock->available(), *amount-1 );
|
||||
lock->active += extra;
|
||||
*amount = 1 + extra;
|
||||
|
|
|
@ -66,3 +66,19 @@ std::string toIPVectorString(std::vector<uint32_t> ips) {
|
|||
return output;
|
||||
}
|
||||
|
||||
Future<Reference<IConnection>> INetworkConnections::connect( std::string host, std::string service, bool useTLS ) {
|
||||
// Use map to create an actor that returns an endpoint or throws
|
||||
Future<NetworkAddress> pickEndpoint = map(resolveTCPEndpoint(host, service), [=](std::vector<NetworkAddress> const &addresses) -> NetworkAddress {
|
||||
NetworkAddress addr = addresses[g_random->randomInt(0, addresses.size())];
|
||||
if(useTLS)
|
||||
addr.flags = NetworkAddress::FLAG_TLS;
|
||||
return addr;
|
||||
});
|
||||
|
||||
// Wait for the endpoint to return, then wait for connect(endpoint) and return it.
|
||||
// Template types are being provided explicitly because they can't be automatically deduced for some reason.
|
||||
return mapAsync<NetworkAddress, std::function<Future<Reference<IConnection>>(NetworkAddress const &)>, Reference<IConnection> >
|
||||
(pickEndpoint, [=](NetworkAddress const &addr) -> Future<Reference<IConnection>> {
|
||||
return connect(addr);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -270,6 +270,13 @@ public:
|
|||
// Make an outgoing connection to the given address. May return an error or block indefinitely in case of connection problems!
|
||||
virtual Future<Reference<IConnection>> connect( NetworkAddress toAddr ) = 0;
|
||||
|
||||
// Resolve host name and service name (such as "http" or can be a plain number like "80") to a list of 1 or more NetworkAddresses
|
||||
virtual Future<std::vector<NetworkAddress>> resolveTCPEndpoint( std::string host, std::string service ) = 0;
|
||||
|
||||
// Convenience function to resolve host/service and connect to one of its NetworkAddresses randomly
|
||||
// useTLS has to be a parameter here because it is passed to connect() as part of the toAddr object.
|
||||
virtual Future<Reference<IConnection>> connect( std::string host, std::string service, bool useTLS = false);
|
||||
|
||||
// Listen for connections on the given local address
|
||||
virtual Reference<IListener> listen( NetworkAddress localAddr ) = 0;
|
||||
|
||||
|
|
|
@ -3,5 +3,5 @@ testName=UnitTests
|
|||
startDelay=0
|
||||
useDB=false
|
||||
maxTestCases=0
|
||||
testsMatching=backup/containers/
|
||||
testsMatching=backup/containers/url
|
||||
|
||||
|
|
Loading…
Reference in New Issue