Fix external timeout with ShardedRocksDB and re-enable ShardedRocksDB in simulation tests (#11638)
* speedup sharded rocksdb in simulation * re-enable shardedrocksdb and disable physical shard move
This commit is contained in:
parent
b4a2fba52e
commit
5ee0db13e6
|
@ -191,8 +191,8 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
|
|||
init( MAX_LARGE_SHARD_BYTES, 1000000000 ); // 1G
|
||||
init( SHARD_ENCODE_LOCATION_METADATA, false ); if( randomize && BUGGIFY ) SHARD_ENCODE_LOCATION_METADATA = true;
|
||||
init( ENABLE_DD_PHYSICAL_SHARD, false ); // EXPERIMENTAL; If true, SHARD_ENCODE_LOCATION_METADATA must be true; When true, optimization of data move between DCs is disabled
|
||||
init( DD_PHYSICAL_SHARD_MOVE_PROBABILITY, 0.0 ); if( isSimulated ) DD_PHYSICAL_SHARD_MOVE_PROBABILITY = 0.5;
|
||||
init( ENABLE_PHYSICAL_SHARD_MOVE_EXPERIMENT, false ); if( isSimulated ) ENABLE_PHYSICAL_SHARD_MOVE_EXPERIMENT = deterministicRandom()->coinflip();
|
||||
init( DD_PHYSICAL_SHARD_MOVE_PROBABILITY, 0.0 ); // FIXME: re-enable after ShardedRocksDB is well tested by simulation
|
||||
init( ENABLE_PHYSICAL_SHARD_MOVE_EXPERIMENT, false ); // FIXME: re-enable after ShardedRocksDB is well tested by simulation
|
||||
init( MAX_PHYSICAL_SHARD_BYTES, 10000000 ); // 10 MB; for ENABLE_DD_PHYSICAL_SHARD; smaller leads to larger number of physicalShard per storage server
|
||||
init( PHYSICAL_SHARD_METRICS_DELAY, 300.0 ); // 300 seconds; for ENABLE_DD_PHYSICAL_SHARD
|
||||
init( ANONYMOUS_PHYSICAL_SHARD_TRANSITION_TIME, 600.0 ); if( randomize && BUGGIFY ) ANONYMOUS_PHYSICAL_SHARD_TRANSITION_TIME = 0.0; // 600 seconds; for ENABLE_DD_PHYSICAL_SHARD
|
||||
|
@ -210,7 +210,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
|
|||
bool buggifySmallShards = randomize && BUGGIFY;
|
||||
bool simulationMediumShards = !buggifySmallShards && isSimulated && randomize && !BUGGIFY; // prefer smaller shards in simulation
|
||||
// FIXME: increase this even more eventually
|
||||
init( MIN_SHARD_BYTES, 10000000 ); if( buggifySmallShards ) MIN_SHARD_BYTES = 40000; if (simulationMediumShards) MIN_SHARD_BYTES = 200000; //FIXME: data distribution tracker (specifically StorageMetrics) relies on this number being larger than the maximum size of a key value pair
|
||||
init( MIN_SHARD_BYTES, 10000000 ); if( buggifySmallShards ) MIN_SHARD_BYTES = 400000; if (simulationMediumShards) MIN_SHARD_BYTES = 2000000; //FIXME: data distribution tracker (specifically StorageMetrics) relies on this number being larger than the maximum size of a key value pair
|
||||
init( SHARD_BYTES_RATIO, 4 );
|
||||
init( SHARD_BYTES_PER_SQRT_BYTES, 45 ); if( buggifySmallShards ) SHARD_BYTES_PER_SQRT_BYTES = 0;//Approximately 10000 bytes per shard
|
||||
init( MAX_SHARD_BYTES, 500000000 );
|
||||
|
@ -236,7 +236,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
|
|||
*/
|
||||
init( SHARD_MAX_BYTES_READ_PER_KSEC_JITTER, 0.1 );
|
||||
bool buggifySmallBandwidthSplit = randomize && BUGGIFY;
|
||||
init( SHARD_MAX_BYTES_PER_KSEC, 1LL*1000000*1000 ); if( buggifySmallBandwidthSplit ) SHARD_MAX_BYTES_PER_KSEC = 10LL*1000*1000;
|
||||
init( SHARD_MAX_BYTES_PER_KSEC, 1LL*1000000*1000 ); if( buggifySmallBandwidthSplit ) SHARD_MAX_BYTES_PER_KSEC = 1LL*100000*1000;
|
||||
/* 1*1MB/sec * 1000sec/ksec
|
||||
Shards with more than this bandwidth will be split immediately.
|
||||
For a large shard (100MB), it will be split into multiple shards with sizes < SHARD_SPLIT_BYTES_PER_KSEC;
|
||||
|
@ -247,7 +247,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
|
|||
team indefinitely, limiting performance.
|
||||
*/
|
||||
|
||||
init( SHARD_MIN_BYTES_PER_KSEC, 100 * 1000 * 1000 ); if( buggifySmallBandwidthSplit ) SHARD_MIN_BYTES_PER_KSEC = 200*1*1000;
|
||||
init( SHARD_MIN_BYTES_PER_KSEC, 100 * 1000 * 1000 ); if( buggifySmallBandwidthSplit ) SHARD_MIN_BYTES_PER_KSEC = 20*1000*1000;
|
||||
/* 100*1KB/sec * 1000sec/ksec
|
||||
Shards with more than this bandwidth will not be merged.
|
||||
Obviously this needs to be significantly less than SHARD_MAX_BYTES_PER_KSEC, else we will repeatedly merge and split.
|
||||
|
@ -598,8 +598,8 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
|
|||
init( SHARDED_ROCKSDB_COMPACTION_PERIOD, isSimulated? 3600 : 2592000 ); // 30d
|
||||
init( SHARDED_ROCKSDB_COMPACTION_ACTOR_DELAY, 3600 ); // 1h
|
||||
init( SHARDED_ROCKSDB_COMPACTION_SHARD_LIMIT, -1 );
|
||||
init( SHARDED_ROCKSDB_WRITE_BUFFER_SIZE, 16 << 20 ); // 16MB
|
||||
init( SHARDED_ROCKSDB_TOTAL_WRITE_BUFFER_SIZE, 1 << 30 ); // 1GB
|
||||
init( SHARDED_ROCKSDB_WRITE_BUFFER_SIZE, (isSimulated && !buggifySmallShards && !buggifySmallBandwidthSplit && !simulationMediumShards) ? 128 << 20 : 16 << 20 ); // 16MB
|
||||
init( SHARDED_ROCKSDB_TOTAL_WRITE_BUFFER_SIZE, isSimulated ? 0 : 1 << 30 ); // 1GB
|
||||
init( SHARDED_ROCKSDB_MEMTABLE_BUDGET, 64 << 20 ); // 64MB
|
||||
init( SHARDED_ROCKSDB_MAX_WRITE_BUFFER_NUMBER, 6 ); // RocksDB default.
|
||||
init( SHARDED_ROCKSDB_TARGET_FILE_SIZE_BASE, 16 << 20 ); // 16MB
|
||||
|
@ -615,7 +615,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
|
|||
init( SHARDED_ROCKSDB_LEVEL0_FILENUM_COMPACTION_TRIGGER, 4 );
|
||||
init( SHARDED_ROCKSDB_LEVEL0_SLOWDOWN_WRITES_TRIGGER, 20 ); // RocksDB default.
|
||||
init( SHARDED_ROCKSDB_LEVEL0_STOP_WRITES_TRIGGER, 36 ); // RocksDB default.
|
||||
init( SHARDED_ROCKSDB_DELAY_COMPACTION_FOR_DATA_MOVE, false ); if (isSimulated) SHARDED_ROCKSDB_DELAY_COMPACTION_FOR_DATA_MOVE = deterministicRandom()->coinflip();
|
||||
init( SHARDED_ROCKSDB_DELAY_COMPACTION_FOR_DATA_MOVE, false ); if (isSimulated) SHARDED_ROCKSDB_DELAY_COMPACTION_FOR_DATA_MOVE = true;
|
||||
init( SHARDED_ROCKSDB_MAX_OPEN_FILES, 50000 ); // Should be smaller than OS's fd limit.
|
||||
init (SHARDED_ROCKSDB_READ_ASYNC_IO, false ); if (isSimulated) SHARDED_ROCKSDB_READ_ASYNC_IO = deterministicRandom()->coinflip();
|
||||
init( SHARDED_ROCKSDB_PREFIX_LEN, 0 ); if( randomize && BUGGIFY ) SHARDED_ROCKSDB_PREFIX_LEN = deterministicRandom()->randomInt(1, 20);
|
||||
|
|
|
@ -577,6 +577,11 @@ rocksdb::ColumnFamilyOptions getCFOptions() {
|
|||
}
|
||||
|
||||
rocksdb::ColumnFamilyOptions getCFOptionsForInactiveShard() {
|
||||
if (g_network->isSimulated()) {
|
||||
return getCFOptions();
|
||||
} else {
|
||||
ASSERT(false); // FIXME: remove when SHARDED_ROCKSDB_DELAY_COMPACTION_FOR_DATA_MOVE feature is well tuned
|
||||
}
|
||||
auto options = getCFOptions();
|
||||
// never slowdown ingest.
|
||||
options.level0_file_num_compaction_trigger = (1 << 30);
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
extraDatabaseMode = 'LocalOrSingle'
|
||||
# DR is not currently supported in required tenant mode
|
||||
tenantModes = ['disabled', 'optional']
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'BackupAndRestore'
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
allowDefaultTenant = false
|
||||
tenantModes = ['optional', 'required']
|
||||
blobGranulesEnabled = true
|
||||
# FIXME: re-enable rocks at some point
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[knobs]]
|
||||
audit_logging_enabled = false
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
testClass = "Backup"
|
||||
|
||||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'BackupAndRestore'
|
||||
|
|
|
@ -4,7 +4,6 @@ testClass = "Backup"
|
|||
extraDatabaseMode = 'LocalOrSingle'
|
||||
# DR is not currently supported in required tenant mode
|
||||
tenantModes = ['disabled', 'optional']
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'BackupAndRestore'
|
||||
|
|
|
@ -4,7 +4,6 @@ testClass = "Backup"
|
|||
extraDatabaseMode = 'LocalOrSingle'
|
||||
# DR is not currently supported in required tenant mode
|
||||
tenantModes = ['disabled', 'optional']
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'BackupAndRestore'
|
||||
|
|
|
@ -4,11 +4,9 @@ blobGranulesEnabled = true
|
|||
extraDatabaseMode = 'Single'
|
||||
allowDefaultTenant = false
|
||||
disableTss = true
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[knobs]]
|
||||
bg_consistency_check_enabled = 0
|
||||
shard_encode_location_metadata = false
|
||||
bw_throttling_enabled = false
|
||||
blob_restore_skip_empty_ranges = false
|
||||
|
||||
|
|
|
@ -4,11 +4,9 @@ blobGranulesEnabled = true
|
|||
extraDatabaseMode = 'Single'
|
||||
allowDefaultTenant = false
|
||||
disableTss = true
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[knobs]]
|
||||
bg_consistency_check_enabled = 0
|
||||
shard_encode_location_metadata = false
|
||||
bw_throttling_enabled = false
|
||||
blob_restore_skip_empty_ranges = false
|
||||
|
||||
|
|
|
@ -6,14 +6,12 @@ tenantModes = ['optional', 'required']
|
|||
injectTargetedSSRestart = true
|
||||
injectSSDelay = true
|
||||
disableTss = true
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[knobs]]
|
||||
bg_metadata_source = "tenant"
|
||||
bg_key_tuple_truncate_offset = 1
|
||||
enable_rest_kms_communication = true
|
||||
bg_consistency_check_enabled = 0
|
||||
shard_encode_location_metadata = false
|
||||
bw_throttling_enabled = false
|
||||
|
||||
[[test]]
|
||||
|
|
|
@ -4,11 +4,9 @@ blobGranulesEnabled = true
|
|||
extraDatabaseMode = 'Single'
|
||||
allowDefaultTenant = false
|
||||
disableTss = true
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[knobs]]
|
||||
bg_consistency_check_enabled = 0
|
||||
shard_encode_location_metadata = false
|
||||
bw_throttling_enabled = false
|
||||
blob_restore_skip_empty_ranges = false
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
[configuration]
|
||||
allowDefaultTenant = false
|
||||
tenantModes = ['optional', 'required']
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'TenantCreation'
|
||||
|
|
|
@ -4,7 +4,6 @@ disableTss = true # There's no TSS in MGS this prevent the DD operate TSS mappin
|
|||
|
||||
[[knobs]]
|
||||
max_added_sources_multiplier = 0 # set to 0 because it's impossible to make sure SS and mock SS will finish fetch keys at the same time.
|
||||
shard_encode_location_metadata = false
|
||||
|
||||
[[test]]
|
||||
testTitle = 'IDDTxnProcessorRawStartMovement'
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
[configuration]
|
||||
minimumRegions = 2
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'KillRegionCycle'
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'MemoryLifetimeTest'
|
||||
|
||||
|
|
|
@ -3,7 +3,6 @@ testClass = 'MockDD'
|
|||
|
||||
[[knobs]]
|
||||
enable_dd_physical_shard = false
|
||||
shard_encode_location_metadata = false
|
||||
dd_tenant_awareness_enabled = false
|
||||
storage_quota_enabled = false
|
||||
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'MutationLogReaderCorrectness'
|
||||
useDB = true
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'StreamingRangeReadTest'
|
||||
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'WatchesTest'
|
||||
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'RandomReadWriteTest'
|
||||
simCheckRelocationDuration = true
|
||||
|
|
|
@ -5,7 +5,6 @@ machineCount = 20
|
|||
commitProxyCount = 4
|
||||
config = 'triple'
|
||||
desiredTLogCount = 6
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[knobs]]
|
||||
enable_worker_health_monitor = true
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'ApiCorrectnessTest'
|
||||
clearAfterTest = true
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'ApiCorrectnessTest'
|
||||
clearAfterTest = true
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'ApiCorrectnessTest'
|
||||
clearAfterTest = true
|
||||
|
|
|
@ -2,9 +2,6 @@
|
|||
# Priority 1.0 is the default in TestHarness2
|
||||
testPriority = '20'
|
||||
|
||||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle='ThrottlingTest'
|
||||
[[test.workload]]
|
||||
|
|
|
@ -2,9 +2,6 @@
|
|||
# Priority 1.0 is the default in TestHarness2
|
||||
testPriority = '20'
|
||||
|
||||
[configuration]
|
||||
storageEngineExcludeTypes=[5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'TransactionCostTest'
|
||||
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'TransactionTagWithApiCorrectness'
|
||||
clearAfterTest = true
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'TransactionTagWithSwizzledApiCorrectnessTest'
|
||||
clearAfterTest = true
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'withoutWriteThrottling'
|
||||
|
||||
|
|
|
@ -6,8 +6,6 @@ blobGranulesEnabled = true
|
|||
allowDefaultTenant = false
|
||||
injectTargetedSSRestart = true
|
||||
injectSSDelay = true
|
||||
# FIXME: re-enable rocks at some point
|
||||
storageEngineExcludeTypes = [5]
|
||||
tenantModes = ['disabled']
|
||||
|
||||
[[knobs]]
|
||||
|
|
|
@ -6,8 +6,6 @@ blobGranulesEnabled = true
|
|||
allowDefaultTenant = false
|
||||
injectTargetedSSRestart = true
|
||||
injectSSDelay = true
|
||||
# FIXME: re-enable rocks at some point
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'BlobGranuleRestartCycle'
|
||||
|
|
|
@ -6,8 +6,6 @@ blobGranulesEnabled = true
|
|||
allowDefaultTenant = false
|
||||
injectTargetedSSRestart = true
|
||||
injectSSDelay = true
|
||||
# FIXME: re-enable rocks at some point
|
||||
storageEngineExcludeTypes = [5]
|
||||
tenantModes = ['disabled']
|
||||
|
||||
[[knobs]]
|
||||
|
|
|
@ -6,8 +6,6 @@ blobGranulesEnabled = true
|
|||
allowDefaultTenant = false
|
||||
injectTargetedSSRestart = true
|
||||
injectSSDelay = true
|
||||
# FIXME: re-enable rocks at some point
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'BlobGranuleRestartLarge'
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes=[3,5]
|
||||
storageEngineExcludeTypes=[3]
|
||||
tenantModes=['disabled']
|
||||
|
||||
[[test]]
|
||||
|
|
|
@ -2,14 +2,12 @@
|
|||
extraMachineCountDC = 2
|
||||
maxTLogVersion=6
|
||||
disableHostname=true
|
||||
storageEngineExcludeTypes=[3, 5]
|
||||
storageEngineExcludeTypes=[3]
|
||||
tenantModes=['disabled']
|
||||
encryptModes=['disabled']
|
||||
simHTTPServerEnabled=false
|
||||
|
||||
[[knobs]]
|
||||
# This can be removed once the lower bound of this downgrade test is a version that understands the new protocol
|
||||
shard_encode_location_metadata = false
|
||||
# Mutation checksum and accumulative checksum is not compatible with release-7.1.x
|
||||
enable_mutation_checksum = false
|
||||
enable_accumulative_checksum = false
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [3, 5]
|
||||
storageEngineExcludeTypes = [3]
|
||||
maxTLogVersion = 6
|
||||
disableTss = true
|
||||
disableHostname = true
|
||||
|
@ -8,8 +8,6 @@ tenantModes=['disabled']
|
|||
simHTTPServerEnabled=false
|
||||
|
||||
[[knobs]]
|
||||
# This can be removed once the lower bound of this downgrade test is a version that understands the new protocol
|
||||
shard_encode_location_metadata = false
|
||||
# Mutation checksum and accumulative checksum is not compatible with release-7.1.x
|
||||
enable_mutation_checksum = false
|
||||
enable_accumulative_checksum = false
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'ApiCorrectnessTest'
|
||||
clearAfterTest = true
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
[configuration]
|
||||
# Tenant lookups fail during the atomic restore because they aren't affected by locking
|
||||
allowDefaultTenant = false
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[knobs]]
|
||||
rocksdb_read_value_timeout=300.0
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
extraDatabaseMode = 'Single'
|
||||
# required tenant mode is not supported for Disaster Recovery yet
|
||||
tenantModes = ['disabled', 'optional']
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'ApiCorrectnessTest'
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[knobs]]
|
||||
enable_replica_consistency_check_on_reads = true
|
||||
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'CloggedCycleTest'
|
||||
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'DDBalance_Test'
|
||||
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes=[5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'DDBalance_Test'
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@ machineCount = 20
|
|||
minimumRegions = 2
|
||||
coordinators = 1
|
||||
remoteConfig = 'remote_double'
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[knobs]]
|
||||
max_write_transaction_life_versions = 5000000
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
[configuration]
|
||||
allowDefaultTenant = false
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'BackupAndParallelRestoreWithAtomicOp'
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
[configuration]
|
||||
# tenants are not supported with parallel restore
|
||||
allowDefaultTenant = false
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'BackupAndParallelRestoreWithAtomicOp'
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
extraDatabaseMode = 'LocalOrSingle'
|
||||
# DR is not currently supported in required tenant mode
|
||||
tenantModes = ['disabled', 'optional']
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'BackupAndRestore'
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
extraDatabaseMode = 'Single'
|
||||
# required tenant mode is not supported for Disaster Recovery yet
|
||||
tenantModes = ['disabled', 'optional']
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'SharedDefaultBackupToFileThenDB'
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'ApiCorrectnessTest'
|
||||
clearAfterTest = true
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'DDBalance_Test'
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
extraDatabaseMode = 'Single'
|
||||
# required tenant mode is not supported for Disaster Recovery yet
|
||||
tenantModes = ['disabled', 'optional']
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'VersionStampBackupToDB'
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[configuration]
|
||||
storageEngineExcludeTypes = [5]
|
||||
|
||||
[[test]]
|
||||
testTitle = 'DDBalance_Test'
|
||||
|
||||
|
|
Loading…
Reference in New Issue