Merge branch 'release-6.0'

# Conflicts:
#	bindings/go/README.md
#	documentation/sphinx/source/release-notes.rst
#	fdbserver/MasterProxyServer.actor.cpp
#	versions.target
This commit is contained in:
Evan Tschannen 2018-10-15 18:38:51 -07:00
commit 0217aed74c
18 changed files with 113 additions and 80 deletions

View File

@ -87,6 +87,13 @@ func fdb_future_block_until_ready(f *C.FDBFuture) {
return
}
// The mutex here is used as a signal that the callback is complete.
// We first lock it, then pass it to the callback, and then lock it
// again. The second call to lock won't return until the callback has
// fired.
//
// See https://groups.google.com/forum/#!topic/golang-nuts/SPjQEcsdORA
// for the history of why this pattern came to be used.
m := &sync.Mutex{}
m.Lock()
C.go_set_callback(unsafe.Pointer(f), unsafe.Pointer(m))

View File

@ -1524,15 +1524,20 @@ def init(event_model=None):
pass
class ThreadEvent(object):
has_async_ = hasattr(gevent.get_hub().loop, 'async_')
def __init__(self):
self.async = gevent.get_hub().loop.async()
self.async.start(nullf)
if ThreadEvent.has_async_:
self.gevent_async = gevent.get_hub().loop.async_()
else:
self.gevent_async = getattr(gevent.get_hub().loop, 'async')()
self.gevent_async.start(nullf)
def set(self):
self.async.send()
self.gevent_async.send()
def wait(self):
gevent.get_hub().wait(self.async)
gevent.get_hub().wait(self.gevent_async)
else:
# gevent 0.x doesn't have async, so use a pipe. This doesn't work on Windows.
if platform.system() == 'Windows':

View File

@ -10,38 +10,38 @@ macOS
The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server.
* `FoundationDB-6.0.11.pkg <https://www.foundationdb.org/downloads/6.0.11/macOS/installers/FoundationDB-6.0.11.pkg>`_
* `FoundationDB-6.0.13.pkg <https://www.foundationdb.org/downloads/6.0.13/macOS/installers/FoundationDB-6.0.13.pkg>`_
Ubuntu
------
The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x.
* `foundationdb-clients-6.0.11-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.11/ubuntu/installers/foundationdb-clients_6.0.11-1_amd64.deb>`_
* `foundationdb-server-6.0.11-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.11/ubuntu/installers/foundationdb-server_6.0.11-1_amd64.deb>`_ (depends on the clients package)
* `foundationdb-clients-6.0.13-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.13/ubuntu/installers/foundationdb-clients_6.0.13-1_amd64.deb>`_
* `foundationdb-server-6.0.13-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.13/ubuntu/installers/foundationdb-server_6.0.13-1_amd64.deb>`_ (depends on the clients package)
RHEL/CentOS EL6
---------------
The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x.
* `foundationdb-clients-6.0.11-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.11/rhel6/installers/foundationdb-clients-6.0.11-1.el6.x86_64.rpm>`_
* `foundationdb-server-6.0.11-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.11/rhel6/installers/foundationdb-server-6.0.11-1.el6.x86_64.rpm>`_ (depends on the clients package)
* `foundationdb-clients-6.0.13-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.13/rhel6/installers/foundationdb-clients-6.0.13-1.el6.x86_64.rpm>`_
* `foundationdb-server-6.0.13-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.13/rhel6/installers/foundationdb-server-6.0.13-1.el6.x86_64.rpm>`_ (depends on the clients package)
RHEL/CentOS EL7
---------------
The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x.
* `foundationdb-clients-6.0.11-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.11/rhel7/installers/foundationdb-clients-6.0.11-1.el7.x86_64.rpm>`_
* `foundationdb-server-6.0.11-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.11/rhel7/installers/foundationdb-server-6.0.11-1.el7.x86_64.rpm>`_ (depends on the clients package)
* `foundationdb-clients-6.0.13-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.13/rhel7/installers/foundationdb-clients-6.0.13-1.el7.x86_64.rpm>`_
* `foundationdb-server-6.0.13-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.13/rhel7/installers/foundationdb-server-6.0.13-1.el7.x86_64.rpm>`_ (depends on the clients package)
Windows
-------
The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server.
* `foundationdb-6.0.11-x64.msi <https://www.foundationdb.org/downloads/6.0.11/windows/installers/foundationdb-6.0.11-x64.msi>`_
* `foundationdb-6.0.13-x64.msi <https://www.foundationdb.org/downloads/6.0.13/windows/installers/foundationdb-6.0.13-x64.msi>`_
API Language Bindings
=====================
@ -58,18 +58,18 @@ On macOS and Windows, the FoundationDB Python API bindings are installed as part
If you need to use the FoundationDB Python API from other Python installations or paths, download the Python package:
* `foundationdb-6.0.11.tar.gz <https://www.foundationdb.org/downloads/6.0.11/bindings/python/foundationdb-6.0.11.tar.gz>`_
* `foundationdb-6.0.13.tar.gz <https://www.foundationdb.org/downloads/6.0.13/bindings/python/foundationdb-6.0.13.tar.gz>`_
Ruby 1.9.3/2.0.0+
-----------------
* `fdb-6.0.11.gem <https://www.foundationdb.org/downloads/6.0.11/bindings/ruby/fdb-6.0.11.gem>`_
* `fdb-6.0.13.gem <https://www.foundationdb.org/downloads/6.0.13/bindings/ruby/fdb-6.0.13.gem>`_
Java 8+
-------
* `fdb-java-6.0.11.jar <https://www.foundationdb.org/downloads/6.0.11/bindings/java/fdb-java-6.0.11.jar>`_
* `fdb-java-6.0.11-javadoc.jar <https://www.foundationdb.org/downloads/6.0.11/bindings/java/fdb-java-6.0.11-javadoc.jar>`_
* `fdb-java-6.0.13.jar <https://www.foundationdb.org/downloads/6.0.13/bindings/java/fdb-java-6.0.13.jar>`_
* `fdb-java-6.0.13-javadoc.jar <https://www.foundationdb.org/downloads/6.0.13/bindings/java/fdb-java-6.0.13-javadoc.jar>`_
Go 1.1+
-------

View File

@ -80,7 +80,7 @@ Default Values
Certificate file default location
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The default behavior when the certificate or key file is not specified is to look for a file named ``fdb.pem`` in the current working directory. If this file is not present, an attempt is made to load a file from a system-dependent location:
The default behavior when the certificate or key file is not specified is to look for a file named ``fdb.pem`` in the current working directory. If this file is not present, an attempt is made to load a file from a system-dependent location as follows:
* Linux: ``/etc/foundationdb/fdb.pem``
* macOS: ``/usr/local/etc/foundationdb/fdb.pem``
@ -96,11 +96,6 @@ Default Password
There is no default password. If no password is specified, it is assumed that the private key is unencrypted.
CA file default location
^^^^^^^^^^^^^^^^^^^^^^^^^
If a value is not specified, the software searches for certs in the default openssl certs location.
Parameters and client bindings
------------------------------
@ -109,7 +104,7 @@ The default LibreSSL-based implementation
FoundationDB offers TLS based on the LibreSSL library. By default, it will be enabled automatically when participating in a TLS-enabled cluster.
For TLS to operate, each process (both server and client) must have an X509 certificate, its corresponding private key, and potentially the certificates with which is was signed. When a process begins to communicate with a FoundationDB server process, the peer's certificate is checked to see if it is trusted and the fields of the peer certificate are verified. Peers must share the same root trusted certificate, and they must both present certificates whose signing chain includes this root certificate.
For TLS to operate, each process (both server and client) must have an X509 certificate, its corresponding private key, and the certificates with which it was signed. When a process begins to communicate with a FoundationDB server process, the peer's certificate is checked to see if it is trusted and the fields of the peer certificate are verified. Peers must share the same root trusted certificate, and they must both present certificates whose signing chain includes this root certificate.
If the local certificate and chain is invalid, a FoundationDB server process bound to a TLS address will not start. In the case of invalid certificates on a client, the client will be able to start but will be unable to connect any TLS-enabled cluster.

View File

@ -1081,8 +1081,8 @@ ACTOR Future<std::string> getLayerStatus(Reference<ReadYourWritesTransaction> tr
backupTagUids.push_back(config.getUid());
tagStates.push_back(config.stateEnum().getOrThrow(tr));
tagRangeBytes.push_back(config.rangeBytesWritten().getD(tr, 0));
tagLogBytes.push_back(config.logBytesWritten().getD(tr, 0));
tagRangeBytes.push_back(config.rangeBytesWritten().getD(tr, false, 0));
tagLogBytes.push_back(config.logBytesWritten().getD(tr, false, 0));
tagContainers.push_back(config.backupContainer().getOrThrow(tr));
tagLastRestorableVersions.push_back(fba.getLastRestorable(tr, StringRef(tag->tagName)));
}

View File

@ -808,7 +808,7 @@ namespace fileBackup {
state UidAndAbortedFlagT current = wait(tag.getOrThrow(tr, false, backup_unneeded()));
state BackupConfig config(current.first);
EBackupState status = wait(config.stateEnum().getD(tr, EBackupState::STATE_NEVERRAN));
EBackupState status = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
if (!backupAgent->isRunnable((BackupAgentBase::enumState)status)) {
throw backup_unneeded();
@ -3377,7 +3377,7 @@ public:
}
state BackupConfig config(oldUidAndAborted.get().first);
state EBackupState status = wait(config.stateEnum().getD(tr, EBackupState::STATE_NEVERRAN));
state EBackupState status = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
// Break, if no longer runnable
if (!FileBackupAgent::isRunnable(status)) {
@ -3412,7 +3412,7 @@ public:
Optional<UidAndAbortedFlagT> uidAndAbortedFlag = wait(tag.get(tr));
if (uidAndAbortedFlag.present()) {
state BackupConfig prevConfig(uidAndAbortedFlag.get().first);
state EBackupState prevBackupStatus = wait(prevConfig.stateEnum().getD(tr, EBackupState::STATE_NEVERRAN));
state EBackupState prevBackupStatus = wait(prevConfig.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
if (FileBackupAgent::isRunnable(prevBackupStatus)) {
throw backup_duplicate();
}
@ -3619,7 +3619,7 @@ public:
state KeyBackedTag tag = makeBackupTag(tagName.toString());
state UidAndAbortedFlagT current = wait(tag.getOrThrow(tr, false, backup_unneeded()));
state BackupConfig config(current.first);
state EBackupState status = wait(config.stateEnum().getD(tr, EBackupState::STATE_NEVERRAN));
state EBackupState status = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
if (!FileBackupAgent::isRunnable(status)) {
throw backup_unneeded();
@ -3670,7 +3670,7 @@ public:
state BackupConfig config(current.first);
state Key destUidValue = wait(config.destUidValue().getOrThrow(tr));
EBackupState status = wait(config.stateEnum().getD(tr, EBackupState::STATE_NEVERRAN));
EBackupState status = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
if (!backupAgent->isRunnable((BackupAgentBase::enumState)status)) {
throw backup_unneeded();
@ -3709,7 +3709,7 @@ public:
state Future<Optional<Value>> fPaused = tr->get(backupAgent->taskBucket->getPauseKey());
if (uidAndAbortedFlag.present()) {
config = BackupConfig(uidAndAbortedFlag.get().first);
EBackupState status = wait(config.stateEnum().getD(tr, EBackupState::STATE_NEVERRAN));
EBackupState status = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
backupState = status;
}

View File

@ -39,8 +39,8 @@ ClientKnobs::ClientKnobs(bool randomize) {
init( FAILURE_MIN_DELAY, 4.0 ); if( randomize && BUGGIFY ) FAILURE_MIN_DELAY = 1.0;
init( FAILURE_TIMEOUT_DELAY, FAILURE_MIN_DELAY );
init( CLIENT_FAILURE_TIMEOUT_DELAY, FAILURE_MIN_DELAY );
init( FAILURE_EMERGENCY_DELAY, 60.0 );
init( FAILURE_MAX_GENERATIONS, 4 );
init( FAILURE_EMERGENCY_DELAY, 30.0 );
init( FAILURE_MAX_GENERATIONS, 10 );
// wrong_shard_server sometimes comes from the only nonfailed server, so we need to avoid a fast spin

View File

@ -734,6 +734,11 @@ public:
else if(newTimeoutVersion <= version) // Ensure that the time extension is to the future
newTimeoutVersion = version + 1;
// This can happen if extendTimeout is called shortly after task start and the task's timeout was jittered to be longer
if(newTimeoutVersion <= task->timeoutVersion) {
newTimeoutVersion = task->timeoutVersion + 1;
}
// This is where the task definition is being moved to
state Subspace newTimeoutSpace = taskBucket->timeouts.get(newTimeoutVersion).get(task->key);

View File

@ -151,6 +151,7 @@ Future<Void> AsyncFileCached::truncate( int64_t size ) {
++countCacheWrites;
std::vector<Future<Void>> actors;
int64_t oldLength = length;
int offsetInPage = size % pageCache->pageSize;
int64_t pageOffset = size - offsetInPage;
@ -176,25 +177,45 @@ Future<Void> AsyncFileCached::truncate( int64_t size ) {
pageOffset += pageCache->pageSize;
}
/*
for ( auto p = pages.lower_bound( pageOffset ); p != pages.end(); p = pages.erase(p) ) {
auto f = p->second->truncate();
if ( !f.isReady() || f.isError())
// if this call to truncate results in a larger file, there is no
// need to erase any pages
if(oldLength > pageOffset) {
// Iterating through all pages results in better cache locality than
// looking up pages one by one in the hash table. However, if we only need
// to truncate a small portion of data, looking up pages one by one should
// be faster. So for now we do single key lookup for each page if it results
// in less than a fixed percentage of the unordered map being accessed.
int64_t numLookups = (oldLength + (pageCache->pageSize-1) - pageOffset) / pageCache->pageSize;
if(numLookups < pages.size() * FLOW_KNOBS->PAGE_CACHE_TRUNCATE_LOOKUP_FRACTION) {
for(int64_t offset = pageOffset; offset < oldLength; offset += pageCache->pageSize) {
auto iter = pages.find(offset);
if(iter != pages.end()) {
auto f = iter->second->truncate();
if(!f.isReady() || f.isError()) {
actors.push_back(f);
}
*/
pages.erase(iter);
}
}
}
else {
for(auto p = pages.begin(); p != pages.end();) {
if(p->first >= pageOffset) {
auto f = p->second->truncate();
if ( !f.isReady() || f.isError() )
if(!f.isReady() || f.isError()) {
actors.push_back(f);
}
auto last = p;
++p;
pages.erase(last);
} else
}
else {
++p;
}
}
}
}
return truncate_impl( this, size, waitForAll( actors ) );
}

View File

@ -164,27 +164,26 @@ public:
fullSnapshot(data);
resetSnapshot = true;
committedWriteBytes = notifiedCommittedWriteBytes.get();
overheadWriteBytes = 0;
if(disableSnapshot) {
return Void();
}
log_op(OpCommit, StringRef(), StringRef());
}
else {
int64_t bytesWritten = commit_queue(queue, !disableSnapshot, sequential);
if(!disableSnapshot) {
committedWriteBytes += bytesWritten + OP_DISK_OVERHEAD; //OP_DISK_OVERHEAD is for the following log_op(OpCommit)
}
//If there have been no mutations since the last commit, do nothing
if( notifiedCommittedWriteBytes.get() == committedWriteBytes )
return Void();
notifiedCommittedWriteBytes.set(committedWriteBytes);
}
if(disableSnapshot) {
return Void();
}
if(bytesWritten > 0 || committedWriteBytes > notifiedCommittedWriteBytes.get()) {
committedWriteBytes += bytesWritten + overheadWriteBytes + OP_DISK_OVERHEAD; //OP_DISK_OVERHEAD is for the following log_op(OpCommit)
notifiedCommittedWriteBytes.set(committedWriteBytes); //This set will cause snapshot items to be written, so it must happen before the OpCommit
log_op(OpCommit, StringRef(), StringRef());
if(!transactionIsLarge) {
committedWriteBytes += log->getCommitOverhead();
overheadWriteBytes = log->getCommitOverhead();
}
}
auto c = log->commit();
@ -347,6 +346,7 @@ private:
IDiskQueue *log;
Future<Void> recovering, snapshotting;
int64_t committedWriteBytes;
int64_t overheadWriteBytes;
NotifiedVersion notifiedCommittedWriteBytes;
Key recoveredSnapshotKey; // After recovery, the next key in the currently uncompleted snapshot
IDiskQueue::location currentSnapshotEnd; //The end of the most recently completed snapshot (this snapshot cannot be discarded)
@ -710,7 +710,7 @@ private:
};
KeyValueStoreMemory::KeyValueStoreMemory( IDiskQueue* log, UID id, int64_t memoryLimit, bool disableSnapshot, bool replaceContent, bool exactRecovery )
: log(log), id(id), previousSnapshotEnd(-1), currentSnapshotEnd(-1), resetSnapshot(false), memoryLimit(memoryLimit), committedWriteBytes(0),
: log(log), id(id), previousSnapshotEnd(-1), currentSnapshotEnd(-1), resetSnapshot(false), memoryLimit(memoryLimit), committedWriteBytes(0), overheadWriteBytes(0),
committedDataSize(0), transactionSize(0), transactionIsLarge(false), disableSnapshot(disableSnapshot), replaceContent(replaceContent), snapshotCount(0), firstCommitWithSnapshot(true)
{
recovering = recover( this, exactRecovery );

View File

@ -840,7 +840,7 @@ ACTOR Future<Void> commitBatch(
wait(yield());
if(!self->txsPopVersions.size() || msg.popTo > self->txsPopVersions.back().second) {
if(self->txsPopVersions.size() > SERVER_KNOBS->MAX_TXS_POP_VERSION_HISTORY) {
if(self->txsPopVersions.size() >= SERVER_KNOBS->MAX_TXS_POP_VERSION_HISTORY) {
TraceEvent(SevWarnAlways, "DiscardingTxsPopHistory").suppressFor(1.0);
self->txsPopVersions.pop_front();
}
@ -1228,7 +1228,7 @@ ACTOR Future<Void> monitorRemoteCommitted(ProxyCommitData* self, Reference<Async
for(auto &it : remoteLogs.get()) {
replies.push_back(brokenPromiseToNever( it.interf().getQueuingMetrics.getReply( TLogQueuingMetricsRequest() ) ));
}
wait( waitForAll(replies) );
wait( waitForAll(replies) || onChange );
if(onChange.isReady()) {
break;

View File

@ -414,7 +414,7 @@ struct RolesInfo {
obj["input_bytes"] = StatusCounter(metrics.getValue("BytesInput")).getStatus();
obj["durable_bytes"] = StatusCounter(metrics.getValue("BytesDurable")).getStatus();
obj.setKeyRawNumber("query_queue_max", metrics.getValue("QueryQueueMax"));
obj["total_queries"] = StatusCounter(metrics.getValue("AllQueries")).getStatus();
obj["total_queries"] = StatusCounter(metrics.getValue("QueryQueue")).getStatus();
obj["finished_queries"] = StatusCounter(metrics.getValue("FinishedQueries")).getStatus();
obj["bytes_queried"] = StatusCounter(metrics.getValue("BytesQueried")).getStatus();
obj["keys_queried"] = StatusCounter(metrics.getValue("RowsQueried")).getStatus();
@ -1398,7 +1398,7 @@ ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<
StatusCounter readBytes;
for(auto &ss : storageServers.get()) {
readRequests.updateValues( StatusCounter(ss.second.getValue("AllQueries")));
readRequests.updateValues( StatusCounter(ss.second.getValue("QueryQueue")));
reads.updateValues( StatusCounter(ss.second.getValue("FinishedQueries")));
readKeys.updateValues( StatusCounter(ss.second.getValue("RowsQueried")));
readBytes.updateValues( StatusCounter(ss.second.getValue("BytesQueried")));

View File

@ -1381,6 +1381,7 @@ void getQueuingMetrics( TLogData* self, Reference<LogData> logData, TLogQueuingM
reply.bytesInput = self->bytesInput;
reply.bytesDurable = self->bytesDurable;
reply.storageBytes = self->persistentData->getStorageBytes();
//FIXME: Add the knownCommittedVersion to this message and change ratekeeper to use that version.
reply.v = logData->durableKnownCommittedVersion;
req.reply.send( reply );
}

View File

@ -2342,11 +2342,8 @@ private:
.detail("AtVersion", currentVersion)
.detail("StorageVersion", data->storageVersion());
ASSERT( rollbackVersion >= data->storageVersion() );
}
// Don't let oldestVersion (and thus storageVersion) go into the rolled back range of versions
// Since we currently don't read from uncommitted log systems, seeing the lastEpochEnd implies that currentVersion is fully committed, so we can safely make it durable
if ( rollbackVersion < fromVersion && rollbackVersion > restoredVersion )
rollback( data, rollbackVersion, currentVersion );
}
data->recoveryVersionSkips.push_back(std::make_pair(rollbackVersion, currentVersion - rollbackVersion));
} else if ((m.type == MutationRef::SetValue || m.type == MutationRef::ClearRange) && m.param1.substr(1).startsWith(serverTagPrefix)) {
@ -2573,17 +2570,17 @@ ACTOR Future<Void> update( StorageServer* data, bool* pReceivedUpdate )
data->version.set( ver ); // Triggers replies to waiting gets for new version(s)
if (data->otherError.getFuture().isReady()) data->otherError.getFuture().get();
Version maxVersionInMemory = SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS;
Version maxVersionsInMemory = SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS;
for(int i = 0; i < data->recoveryVersionSkips.size(); i++) {
maxVersionInMemory += data->recoveryVersionSkips[i].second;
maxVersionsInMemory += data->recoveryVersionSkips[i].second;
}
//TraceEvent("StorageServerUpdated", data->thisServerID).detail("Ver", ver).detail("DataVersion", data->version.get())
// .detail("LastTLogVersion", data->lastTLogVersion).detail("NewOldest", updater.newOldestVersion).detail("DesiredOldest",data->desiredOldestVersion.get())
// .detail("MaxVersionInMemory", maxVersionInMemory);
// .detail("MaxVersionInMemory", maxVersionsInMemory);
// Trigger updateStorage if necessary
Version proposedOldestVersion = std::max(data->version.get(), data->lastTLogVersion) - maxVersionInMemory;
Version proposedOldestVersion = std::max(data->version.get(), data->lastTLogVersion) - maxVersionsInMemory;
proposedOldestVersion = std::min(proposedOldestVersion, data->version.get()-1);
proposedOldestVersion = std::max(proposedOldestVersion, data->oldestVersion.get());
proposedOldestVersion = std::max(proposedOldestVersion, data->desiredOldestVersion.get());

View File

@ -71,6 +71,7 @@ FlowKnobs::FlowKnobs(bool randomize, bool isSimulated) {
init( BUGGIFY_SIM_PAGE_CACHE_4K, 1e6 );
init( BUGGIFY_SIM_PAGE_CACHE_64K, 1e6 );
init( MAX_EVICT_ATTEMPTS, 100 ); if( randomize && BUGGIFY ) MAX_EVICT_ATTEMPTS = 2;
init( PAGE_CACHE_TRUNCATE_LOOKUP_FRACTION, 0.1 ); if( randomize && BUGGIFY ) PAGE_CACHE_TRUNCATE_LOOKUP_FRACTION = 0.0; else if( randomize && BUGGIFY ) PAGE_CACHE_TRUNCATE_LOOKUP_FRACTION = 1.0;
//AsyncFileKAIO
init( MAX_OUTSTANDING, 64 );

View File

@ -91,6 +91,7 @@ public:
int64_t BUGGIFY_SIM_PAGE_CACHE_4K;
int64_t BUGGIFY_SIM_PAGE_CACHE_64K;
int MAX_EVICT_ATTEMPTS;
double PAGE_CACHE_TRUNCATE_LOOKUP_FRACTION;
//AsyncFileKAIO
int MAX_OUTSTANDING;

View File

@ -32,7 +32,7 @@
<Wix xmlns='http://schemas.microsoft.com/wix/2006/wi'>
<Product Name='$(var.Title)'
Id='{C4C2FA24-FC64-42B0-AB27-9F396843CEAB}'
Id='{47C44A33-2992-42BC-B683-2B531E3B5B0B}'
UpgradeCode='{A95EA002-686E-4164-8356-C715B7F8B1C8}'
Version='$(var.Version)'
Manufacturer='$(var.Manufacturer)'