Merge branch 'release-6.1'

# Conflicts:
#	documentation/sphinx/source/release-notes.rst
#	versions.target
This commit is contained in:
Evan Tschannen 2019-05-12 20:13:49 -07:00
commit 8c3516951a
9 changed files with 64 additions and 46 deletions

View File

@ -243,11 +243,11 @@ std::tuple<bool,std::string> FDBLibTLSSession::check_verify(Reference<FDBLibTLSV
// Verify the certificate. // Verify the certificate.
if ((store_ctx = X509_STORE_CTX_new()) == NULL) { if ((store_ctx = X509_STORE_CTX_new()) == NULL) {
TraceEvent(SevError, "FDBLibTLSOutOfMemory", uid); TraceEvent(SevError, "FDBLibTLSOutOfMemory", uid);
reason = "FDBLibTLSOutOfMemory"; reason = "Out of memory";
goto err; goto err;
} }
if (!X509_STORE_CTX_init(store_ctx, NULL, sk_X509_value(certs, 0), certs)) { if (!X509_STORE_CTX_init(store_ctx, NULL, sk_X509_value(certs, 0), certs)) {
reason = "FDBLibTLSStoreCtxInit"; reason = "Store ctx init";
goto err; goto err;
} }
X509_STORE_CTX_trusted_stack(store_ctx, policy->roots); X509_STORE_CTX_trusted_stack(store_ctx, policy->roots);
@ -256,31 +256,31 @@ std::tuple<bool,std::string> FDBLibTLSSession::check_verify(Reference<FDBLibTLSV
X509_VERIFY_PARAM_set_flags(X509_STORE_CTX_get0_param(store_ctx), X509_V_FLAG_NO_CHECK_TIME); X509_VERIFY_PARAM_set_flags(X509_STORE_CTX_get0_param(store_ctx), X509_V_FLAG_NO_CHECK_TIME);
if (X509_verify_cert(store_ctx) <= 0) { if (X509_verify_cert(store_ctx) <= 0) {
const char *errstr = X509_verify_cert_error_string(X509_STORE_CTX_get_error(store_ctx)); const char *errstr = X509_verify_cert_error_string(X509_STORE_CTX_get_error(store_ctx));
reason = "FDBLibTLSVerifyCert VerifyError " + std::string(errstr); reason = "Verify cert error: " + std::string(errstr);
goto err; goto err;
} }
// Check subject criteria. // Check subject criteria.
cert = sk_X509_value(store_ctx->chain, 0); cert = sk_X509_value(store_ctx->chain, 0);
if ((subject = X509_get_subject_name(cert)) == NULL) { if ((subject = X509_get_subject_name(cert)) == NULL) {
reason = "FDBLibTLSCertSubjectError"; reason = "Cert subject error";
goto err; goto err;
} }
for (auto &pair: verify->subject_criteria) { for (auto &pair: verify->subject_criteria) {
if (!match_criteria(cert, subject, pair.first, pair.second.criteria, pair.second.match_type, pair.second.location)) { if (!match_criteria(cert, subject, pair.first, pair.second.criteria, pair.second.match_type, pair.second.location)) {
reason = "FDBLibTLSCertSubjectMatchFailure"; reason = "Cert subject match failure";
goto err; goto err;
} }
} }
// Check issuer criteria. // Check issuer criteria.
if ((issuer = X509_get_issuer_name(cert)) == NULL) { if ((issuer = X509_get_issuer_name(cert)) == NULL) {
reason = "FDBLibTLSCertIssuerError"; reason = "Cert issuer error";
goto err; goto err;
} }
for (auto &pair: verify->issuer_criteria) { for (auto &pair: verify->issuer_criteria) {
if (!match_criteria(cert, issuer, pair.first, pair.second.criteria, pair.second.match_type, pair.second.location)) { if (!match_criteria(cert, issuer, pair.first, pair.second.criteria, pair.second.match_type, pair.second.location)) {
reason = "FDBLibTLSCertIssuerMatchFailure"; reason = "Cert issuer match failure";
goto err; goto err;
} }
} }
@ -288,12 +288,12 @@ std::tuple<bool,std::string> FDBLibTLSSession::check_verify(Reference<FDBLibTLSV
// Check root criteria - this is the subject of the final certificate in the stack. // Check root criteria - this is the subject of the final certificate in the stack.
cert = sk_X509_value(store_ctx->chain, sk_X509_num(store_ctx->chain) - 1); cert = sk_X509_value(store_ctx->chain, sk_X509_num(store_ctx->chain) - 1);
if ((subject = X509_get_subject_name(cert)) == NULL) { if ((subject = X509_get_subject_name(cert)) == NULL) {
reason = "FDBLibTLSRootSubjectError"; reason = "Root subject error";
goto err; goto err;
} }
for (auto &pair: verify->root_criteria) { for (auto &pair: verify->root_criteria) {
if (!match_criteria(cert, subject, pair.first, pair.second.criteria, pair.second.match_type, pair.second.location)) { if (!match_criteria(cert, subject, pair.first, pair.second.criteria, pair.second.match_type, pair.second.location)) {
reason = "FDBLibTLSRootSubjectMatchFailure"; reason = "Root subject match failure";
goto err; goto err;
} }
} }
@ -343,7 +343,7 @@ bool FDBLibTLSSession::verify_peer() {
if (!rc) { if (!rc) {
// log the various failure reasons // log the various failure reasons
for (std::string reason : verify_failure_reasons) { for (std::string reason : verify_failure_reasons) {
TraceEvent(reason.c_str(), uid).suppressFor(1.0); TraceEvent("FDBLibTLSVerifyFailure", uid).detail("Reason", reason).suppressFor(1.0);
} }
} }

View File

@ -10,38 +10,38 @@ macOS
The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server. The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server.
* `FoundationDB-6.1.5.pkg <https://www.foundationdb.org/downloads/6.1.5/macOS/installers/FoundationDB-6.1.5.pkg>`_ * `FoundationDB-6.1.6.pkg <https://www.foundationdb.org/downloads/6.1.6/macOS/installers/FoundationDB-6.1.6.pkg>`_
Ubuntu Ubuntu
------ ------
The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x. The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x.
* `foundationdb-clients-6.1.5-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.5/ubuntu/installers/foundationdb-clients_6.1.5-1_amd64.deb>`_ * `foundationdb-clients-6.1.6-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.6/ubuntu/installers/foundationdb-clients_6.1.6-1_amd64.deb>`_
* `foundationdb-server-6.1.5-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.5/ubuntu/installers/foundationdb-server_6.1.5-1_amd64.deb>`_ (depends on the clients package) * `foundationdb-server-6.1.6-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.6/ubuntu/installers/foundationdb-server_6.1.6-1_amd64.deb>`_ (depends on the clients package)
RHEL/CentOS EL6 RHEL/CentOS EL6
--------------- ---------------
The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x. The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x.
* `foundationdb-clients-6.1.5-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.5/rhel6/installers/foundationdb-clients-6.1.5-1.el6.x86_64.rpm>`_ * `foundationdb-clients-6.1.6-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.6/rhel6/installers/foundationdb-clients-6.1.6-1.el6.x86_64.rpm>`_
* `foundationdb-server-6.1.5-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.5/rhel6/installers/foundationdb-server-6.1.5-1.el6.x86_64.rpm>`_ (depends on the clients package) * `foundationdb-server-6.1.6-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.6/rhel6/installers/foundationdb-server-6.1.6-1.el6.x86_64.rpm>`_ (depends on the clients package)
RHEL/CentOS EL7 RHEL/CentOS EL7
--------------- ---------------
The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x. The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x.
* `foundationdb-clients-6.1.5-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.5/rhel7/installers/foundationdb-clients-6.1.5-1.el7.x86_64.rpm>`_ * `foundationdb-clients-6.1.6-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.6/rhel7/installers/foundationdb-clients-6.1.6-1.el7.x86_64.rpm>`_
* `foundationdb-server-6.1.5-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.5/rhel7/installers/foundationdb-server-6.1.5-1.el7.x86_64.rpm>`_ (depends on the clients package) * `foundationdb-server-6.1.6-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.6/rhel7/installers/foundationdb-server-6.1.6-1.el7.x86_64.rpm>`_ (depends on the clients package)
Windows Windows
------- -------
The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server. The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server.
* `foundationdb-6.1.5-x64.msi <https://www.foundationdb.org/downloads/6.1.5/windows/installers/foundationdb-6.1.5-x64.msi>`_ * `foundationdb-6.1.6-x64.msi <https://www.foundationdb.org/downloads/6.1.6/windows/installers/foundationdb-6.1.6-x64.msi>`_
API Language Bindings API Language Bindings
===================== =====================
@ -58,18 +58,18 @@ On macOS and Windows, the FoundationDB Python API bindings are installed as part
If you need to use the FoundationDB Python API from other Python installations or paths, download the Python package: If you need to use the FoundationDB Python API from other Python installations or paths, download the Python package:
* `foundationdb-6.1.5.tar.gz <https://www.foundationdb.org/downloads/6.1.5/bindings/python/foundationdb-6.1.5.tar.gz>`_ * `foundationdb-6.1.6.tar.gz <https://www.foundationdb.org/downloads/6.1.6/bindings/python/foundationdb-6.1.6.tar.gz>`_
Ruby 1.9.3/2.0.0+ Ruby 1.9.3/2.0.0+
----------------- -----------------
* `fdb-6.1.5.gem <https://www.foundationdb.org/downloads/6.1.5/bindings/ruby/fdb-6.1.5.gem>`_ * `fdb-6.1.6.gem <https://www.foundationdb.org/downloads/6.1.6/bindings/ruby/fdb-6.1.6.gem>`_
Java 8+ Java 8+
------- -------
* `fdb-java-6.1.5.jar <https://www.foundationdb.org/downloads/6.1.5/bindings/java/fdb-java-6.1.5.jar>`_ * `fdb-java-6.1.6.jar <https://www.foundationdb.org/downloads/6.1.6/bindings/java/fdb-java-6.1.6.jar>`_
* `fdb-java-6.1.5-javadoc.jar <https://www.foundationdb.org/downloads/6.1.5/bindings/java/fdb-java-6.1.5-javadoc.jar>`_ * `fdb-java-6.1.6-javadoc.jar <https://www.foundationdb.org/downloads/6.1.6/bindings/java/fdb-java-6.1.6-javadoc.jar>`_
Go 1.1+ Go 1.1+
------- -------

View File

@ -2,7 +2,7 @@
Release Notes Release Notes
############# #############
6.1.5 6.1.6
===== =====
Features Features
@ -126,6 +126,7 @@ Fixes only impacting 6.1.0+
* The transaction log spill-by-reference policy could read too much data from disk. [6.1.5] `(PR #1527) <https://github.com/apple/foundationdb/pull/1527>`_ * The transaction log spill-by-reference policy could read too much data from disk. [6.1.5] `(PR #1527) <https://github.com/apple/foundationdb/pull/1527>`_
* Memory tracking trace events could cause the program to crash when called from inside a trace event. [6.1.5] `(PR #1541) <https://github.com/apple/foundationdb/pull/1541>`_ * Memory tracking trace events could cause the program to crash when called from inside a trace event. [6.1.5] `(PR #1541) <https://github.com/apple/foundationdb/pull/1541>`_
* TLogs will replace a large file with an empty file rather than doing a large truncate operation. [6.1.5] `(PR #1545) <https://github.com/apple/foundationdb/pull/1545>`_ * TLogs will replace a large file with an empty file rather than doing a large truncate operation. [6.1.5] `(PR #1545) <https://github.com/apple/foundationdb/pull/1545>`_
* Fix PR #1545 to work on Windows and Linux. [6.1.6] `(PR #1556) <https://github.com/apple/foundationdb/pull/1556>`_
Earlier release notes Earlier release notes
--------------------- ---------------------

View File

@ -265,7 +265,7 @@ public:
result = fallocate( fd, 0, 0, size); result = fallocate( fd, 0, 0, size);
if (result != 0) { if (result != 0) {
int fallocateErrCode = errno; int fallocateErrCode = errno;
TraceEvent("AsyncFileKAIOAllocateError").detail("Fd",fd).detail("Filename", filename).GetLastError(); TraceEvent("AsyncFileKAIOAllocateError").detail("Fd",fd).detail("Filename", filename).detail("Size", size).GetLastError();
if ( fallocateErrCode == EOPNOTSUPP ) { if ( fallocateErrCode == EOPNOTSUPP ) {
// Mark fallocate as unsupported. Try again with truncate. // Mark fallocate as unsupported. Try again with truncate.
ctx.fallocateSupported = false; ctx.fallocateSupported = false;

View File

@ -603,11 +603,16 @@ private:
if (randLog) if (randLog)
fprintf( randLog, "SFT1 %s %s %s %" PRId64 "\n", self->dbgId.shortString().c_str(), self->filename.c_str(), opId.shortString().c_str(), size ); fprintf( randLog, "SFT1 %s %s %s %" PRId64 "\n", self->dbgId.shortString().c_str(), self->filename.c_str(), opId.shortString().c_str(), size );
if (size == 0) {
// KAIO will return EINVAL, as len==0 is an error.
throw io_error();
}
if(self->delayOnWrite) if(self->delayOnWrite)
wait( waitUntilDiskReady( self->diskParameters, 0 ) ); wait( waitUntilDiskReady( self->diskParameters, 0 ) );
if( _chsize( self->h, (long) size ) == -1 ) { if( _chsize( self->h, (long) size ) == -1 ) {
TraceEvent(SevWarn, "SimpleFileIOError").detail("Location", 6); TraceEvent(SevWarn, "SimpleFileIOError").detail("Location", 6).detail("Filename", self->filename).detail("Size", size).detail("Fd", self->h).GetLastError();
throw io_error(); throw io_error();
} }

View File

@ -164,7 +164,7 @@ public:
readyToPush(Void()), fileSizeWarningLimit(fileSizeWarningLimit), lastCommit(Void()), isFirstCommit(true) readyToPush(Void()), fileSizeWarningLimit(fileSizeWarningLimit), lastCommit(Void()), isFirstCommit(true)
{ {
if (BUGGIFY) if (BUGGIFY)
fileExtensionBytes = 1<<10 * g_random->randomSkewedUInt32( 1, 40<<10 ); fileExtensionBytes = _PAGE_SIZE * g_random->randomSkewedUInt32( 1, 10<<10 );
if (BUGGIFY) if (BUGGIFY)
fileShrinkBytes = _PAGE_SIZE * g_random->randomSkewedUInt32( 1, 10<<10 ); fileShrinkBytes = _PAGE_SIZE * g_random->randomSkewedUInt32( 1, 10<<10 );
files[0].dbgFilename = filename(0); files[0].dbgFilename = filename(0);
@ -283,21 +283,29 @@ public:
TraceEvent("DiskQueueReplaceTruncateEnded").detail("Filename", file->getFilename()); TraceEvent("DiskQueueReplaceTruncateEnded").detail("Filename", file->getFilename());
} }
#if defined(_WIN32)
ACTOR static Future<Reference<IAsyncFile>> replaceFile(Reference<IAsyncFile> toReplace) {
// Windows doesn't support a rename over an open file.
wait( toReplace->truncate(4<<10) );
return toReplace;
}
#else
ACTOR static Future<Reference<IAsyncFile>> replaceFile(Reference<IAsyncFile> toReplace) { ACTOR static Future<Reference<IAsyncFile>> replaceFile(Reference<IAsyncFile> toReplace) {
incrementalTruncate( toReplace ); incrementalTruncate( toReplace );
Reference<IAsyncFile> _replacement = wait( IAsyncFileSystem::filesystem()->open( toReplace->getFilename(), IAsyncFile::OPEN_ATOMIC_WRITE_AND_CREATE | IAsyncFile::OPEN_READWRITE | IAsyncFile::OPEN_UNCACHED | IAsyncFile::OPEN_UNBUFFERED | IAsyncFile::OPEN_LOCK, 0 ) ); Reference<IAsyncFile> _replacement = wait( IAsyncFileSystem::filesystem()->open( toReplace->getFilename(), IAsyncFile::OPEN_ATOMIC_WRITE_AND_CREATE | IAsyncFile::OPEN_CREATE | IAsyncFile::OPEN_READWRITE | IAsyncFile::OPEN_UNCACHED | IAsyncFile::OPEN_UNBUFFERED | IAsyncFile::OPEN_LOCK, 0600 ) );
state Reference<IAsyncFile> replacement = _replacement; state Reference<IAsyncFile> replacement = _replacement;
wait( replacement->sync() ); wait( replacement->sync() );
return replacement; return replacement;
} }
#endif
Future<Void> push(StringRef pageData, vector<Reference<SyncQueue>>* toSync) { Future<Future<Void>> push(StringRef pageData, vector<Reference<SyncQueue>>* toSync) {
return push( this, pageData, toSync ); return push( this, pageData, toSync );
} }
ACTOR static Future<Void> push(RawDiskQueue_TwoFiles* self, StringRef pageData, vector<Reference<SyncQueue>>* toSync) { ACTOR static Future<Future<Void>> push(RawDiskQueue_TwoFiles* self, StringRef pageData, vector<Reference<SyncQueue>>* toSync) {
// Write the given data to the queue files, swapping or extending them if necessary. // Write the given data to the queue files, swapping or extending them if necessary.
// Don't do any syncs, but push the modified file(s) onto toSync. // Don't do any syncs, but push the modified file(s) onto toSync.
ASSERT( self->readingFile == 2 ); ASSERT( self->readingFile == 2 );
@ -325,21 +333,27 @@ public:
std::swap(self->firstPages[0], self->firstPages[1]); std::swap(self->firstPages[0], self->firstPages[1]);
self->files[1].popped = 0; self->files[1].popped = 0;
self->writingPos = 0; self->writingPos = 0;
*self->firstPages[1] = *(const Page*)pageData.begin();
const int64_t activeDataVolume = pageCeiling(self->files[0].size - self->files[0].popped + self->fileExtensionBytes + self->fileShrinkBytes); const int64_t activeDataVolume = pageCeiling(self->files[0].size - self->files[0].popped + self->fileExtensionBytes + self->fileShrinkBytes);
const int64_t desiredMaxFileSize = std::max( activeDataVolume, SERVER_KNOBS->TLOG_HARD_LIMIT_BYTES * 2 ); const int64_t desiredMaxFileSize = pageCeiling( std::max( activeDataVolume, SERVER_KNOBS->TLOG_HARD_LIMIT_BYTES * 2 ) );
if (self->files[1].size > desiredMaxFileSize) { const bool frivolouslyTruncate = BUGGIFY_WITH_PROB(0.1);
if (self->files[1].size > desiredMaxFileSize || frivolouslyTruncate) {
// Either shrink self->files[1] to the size of self->files[0], or chop off fileShrinkBytes // Either shrink self->files[1] to the size of self->files[0], or chop off fileShrinkBytes
int64_t maxShrink = std::max( pageFloor(self->files[1].size - desiredMaxFileSize), self->fileShrinkBytes ); int64_t maxShrink = pageFloor( std::max( self->files[1].size - desiredMaxFileSize, self->fileShrinkBytes ) );
if (maxShrink / SERVER_KNOBS->DISK_QUEUE_FILE_EXTENSION_BYTES > if ((maxShrink > SERVER_KNOBS->DISK_QUEUE_MAX_TRUNCATE_BYTES) ||
SERVER_KNOBS->DISK_QUEUE_MAX_TRUNCATE_EXTENTS) { (frivolouslyTruncate && g_random->random01() < 0.3)) {
TEST(true); // Replacing DiskQueue file TEST(true); // Replacing DiskQueue file
TraceEvent("DiskQueueReplaceFile", self->dbgid).detail("Filename", self->files[1].f->getFilename()).detail("OldFileSize", self->files[1].size).detail("ElidedTruncateSize", maxShrink); TraceEvent("DiskQueueReplaceFile", self->dbgid).detail("Filename", self->files[1].f->getFilename()).detail("OldFileSize", self->files[1].size).detail("ElidedTruncateSize", maxShrink);
Reference<IAsyncFile> newFile = wait( replaceFile(self->files[1].f) ); Reference<IAsyncFile> newFile = wait( replaceFile(self->files[1].f) );
self->files[1].setFile(newFile); self->files[1].setFile(newFile);
self->files[1].size = 0; waitfor.push_back( self->files[1].f->truncate( self->fileExtensionBytes ) );
self->files[1].size = self->fileExtensionBytes;
} else { } else {
self->files[1].size -= maxShrink; const int64_t startingSize = self->files[1].size;
self->files[1].size -= std::min(maxShrink, self->files[1].size);
self->files[1].size = std::max(self->files[1].size, self->fileExtensionBytes);
TraceEvent("DiskQueueTruncate", self->dbgid).detail("Filename", self->files[1].f->getFilename()).detail("OldFileSize", startingSize).detail("NewFileSize", self->files[1].size);
waitfor.push_back( self->files[1].f->truncate( self->files[1].size ) ); waitfor.push_back( self->files[1].f->truncate( self->files[1].size ) );
} }
} }
@ -355,9 +369,8 @@ public:
TraceEvent(SevWarnAlways, "DiskQueueFileTooLarge", self->dbgid).suppressFor(1.0).detail("Filename", self->filename(1)).detail("Size", self->files[1].size); TraceEvent(SevWarnAlways, "DiskQueueFileTooLarge", self->dbgid).suppressFor(1.0).detail("Filename", self->filename(1)).detail("Size", self->files[1].size);
} }
} }
} } else if (self->writingPos == 0) {
// If this is the first write to a brand new disk queue file.
if (self->writingPos == 0) {
*self->firstPages[1] = *(const Page*)pageData.begin(); *self->firstPages[1] = *(const Page*)pageData.begin();
} }
@ -368,8 +381,7 @@ public:
waitfor.push_back( self->files[1].f->write( pageData.begin(), pageData.size(), self->writingPos ) ); waitfor.push_back( self->files[1].f->write( pageData.begin(), pageData.size(), self->writingPos ) );
self->writingPos += pageData.size(); self->writingPos += pageData.size();
wait( waitForAll(waitfor) ); return waitForAll(waitfor);
return Void();
} }
ACTOR static UNCANCELLABLE Future<Void> pushAndCommit(RawDiskQueue_TwoFiles* self, StringRef pageData, StringBuffer* pageMem, uint64_t poppedPages) { ACTOR static UNCANCELLABLE Future<Void> pushAndCommit(RawDiskQueue_TwoFiles* self, StringRef pageData, StringBuffer* pageMem, uint64_t poppedPages) {
@ -396,11 +408,11 @@ public:
TEST( pageData.size() > sizeof(Page) ); // push more than one page of data TEST( pageData.size() > sizeof(Page) ); // push more than one page of data
Future<Void> pushed = self->push( pageData, &syncFiles ); Future<Void> pushed = wait( self->push( pageData, &syncFiles ) );
pushing.send(Void()); pushing.send(Void());
wait( pushed );
ASSERT( syncFiles.size() >= 1 && syncFiles.size() <= 2 ); ASSERT( syncFiles.size() >= 1 && syncFiles.size() <= 2 );
TEST(2==syncFiles.size()); // push spans both files TEST(2==syncFiles.size()); // push spans both files
wait( pushed );
delete pageMem; delete pageMem;
pageMem = 0; pageMem = 0;

View File

@ -75,7 +75,7 @@ ServerKnobs::ServerKnobs(bool randomize, ClientKnobs* clientKnobs) {
init( TLOG_SPILL_REFERENCE_MAX_BYTES_PER_BATCH, 16<<10 ); if ( randomize && BUGGIFY ) TLOG_SPILL_REFERENCE_MAX_BYTES_PER_BATCH = 500; init( TLOG_SPILL_REFERENCE_MAX_BYTES_PER_BATCH, 16<<10 ); if ( randomize && BUGGIFY ) TLOG_SPILL_REFERENCE_MAX_BYTES_PER_BATCH = 500;
init( DISK_QUEUE_FILE_EXTENSION_BYTES, 10<<20 ); // BUGGIFYd per file within the DiskQueue init( DISK_QUEUE_FILE_EXTENSION_BYTES, 10<<20 ); // BUGGIFYd per file within the DiskQueue
init( DISK_QUEUE_FILE_SHRINK_BYTES, 100<<20 ); // BUGGIFYd per file within the DiskQueue init( DISK_QUEUE_FILE_SHRINK_BYTES, 100<<20 ); // BUGGIFYd per file within the DiskQueue
init( DISK_QUEUE_MAX_TRUNCATE_EXTENTS, 1<<10 ); if ( randomize && BUGGIFY ) DISK_QUEUE_MAX_TRUNCATE_EXTENTS = 0; init( DISK_QUEUE_MAX_TRUNCATE_BYTES, 2<<30 ); if ( randomize && BUGGIFY ) DISK_QUEUE_MAX_TRUNCATE_BYTES = 0;
init( TLOG_DEGRADED_DELAY_COUNT, 5 ); init( TLOG_DEGRADED_DELAY_COUNT, 5 );
init( TLOG_DEGRADED_DURATION, 5.0 ); init( TLOG_DEGRADED_DURATION, 5.0 );

View File

@ -79,7 +79,7 @@ public:
int64_t TLOG_SPILL_REFERENCE_MAX_BYTES_PER_BATCH; int64_t TLOG_SPILL_REFERENCE_MAX_BYTES_PER_BATCH;
int64_t DISK_QUEUE_FILE_EXTENSION_BYTES; // When we grow the disk queue, by how many bytes should it grow? int64_t DISK_QUEUE_FILE_EXTENSION_BYTES; // When we grow the disk queue, by how many bytes should it grow?
int64_t DISK_QUEUE_FILE_SHRINK_BYTES; // When we shrink the disk queue, by how many bytes should it shrink? int64_t DISK_QUEUE_FILE_SHRINK_BYTES; // When we shrink the disk queue, by how many bytes should it shrink?
int DISK_QUEUE_MAX_TRUNCATE_EXTENTS; int DISK_QUEUE_MAX_TRUNCATE_BYTES; // A truncate larger than this will cause the file to be replaced instead.
int TLOG_DEGRADED_DELAY_COUNT; int TLOG_DEGRADED_DELAY_COUNT;
double TLOG_DEGRADED_DURATION; double TLOG_DEGRADED_DURATION;

View File

@ -32,7 +32,7 @@
<Wix xmlns='http://schemas.microsoft.com/wix/2006/wi'> <Wix xmlns='http://schemas.microsoft.com/wix/2006/wi'>
<Product Name='$(var.Title)' <Product Name='$(var.Title)'
Id='{32F74616-4B66-4A17-972F-765FF2C03728}' Id='{92B71EC3-E4A1-4473-8D72-7A4D61435C8F}'
UpgradeCode='{A95EA002-686E-4164-8356-C715B7F8B1C8}' UpgradeCode='{A95EA002-686E-4164-8356-C715B7F8B1C8}'
Version='$(var.Version)' Version='$(var.Version)'
Manufacturer='$(var.Manufacturer)' Manufacturer='$(var.Manufacturer)'