Rename knobs and clean up some trace events. (#11135)
This commit is contained in:
parent
2329e8327a
commit
55d88c17c9
|
@ -568,6 +568,9 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
|
|||
init( SHARDED_ROCKSDB_TARGET_FILE_SIZE_MULTIPLIER, 1 ); // RocksDB default.
|
||||
init( SHARDED_ROCKSDB_SUGGEST_COMPACT_CLEAR_RANGE, true );
|
||||
init( SHARDED_ROCKSDB_MAX_BACKGROUND_JOBS, 4 );
|
||||
init( SHARDED_ROCKSDB_BLOCK_CACHE_SIZE, isSimulated? 16 * 1024 : 134217728 /* 128MB */);
|
||||
// Set to 0 to disable rocksdb write rate limiting. Rate limiter unit: bytes per second.
|
||||
init( SHARDED_ROCKSDB_WRITE_RATE_LIMITER_BYTES_PER_SEC, 33554432 );
|
||||
|
||||
// Leader election
|
||||
bool longLeaderElection = randomize && BUGGIFY;
|
||||
|
|
|
@ -524,6 +524,8 @@ public:
|
|||
int SHARDED_ROCKSDB_TARGET_FILE_SIZE_MULTIPLIER;
|
||||
bool SHARDED_ROCKSDB_SUGGEST_COMPACT_CLEAR_RANGE;
|
||||
int SHARDED_ROCKSDB_MAX_BACKGROUND_JOBS;
|
||||
int64_t SHARDED_ROCKSDB_BLOCK_CACHE_SIZE;
|
||||
int64_t SHARDED_ROCKSDB_WRITE_RATE_LIMITER_BYTES_PER_SEC;
|
||||
|
||||
// Leader election
|
||||
int MAX_NOTIFICATIONS;
|
||||
|
|
|
@ -141,11 +141,12 @@ public:
|
|||
void OnStallConditionsChanged(const rocksdb::WriteStallInfo& info) override {
|
||||
auto curState = getWriteStallState(info.condition.cur);
|
||||
auto prevState = getWriteStallState(info.condition.prev);
|
||||
auto severity = curState == 1 ? SevWarnAlways : SevInfo;
|
||||
TraceEvent(severity, "WriteStallInfo", logId)
|
||||
.detail("CF", info.cf_name)
|
||||
.detail("CurrentState", curState)
|
||||
.detail("PrevState", prevState);
|
||||
if (curState == 1) {
|
||||
TraceEvent(SevWarn, "WriteStallInfo", logId)
|
||||
.detail("CF", info.cf_name)
|
||||
.detail("CurrentState", curState)
|
||||
.detail("PrevState", prevState);
|
||||
}
|
||||
}
|
||||
|
||||
// Flush reason code:
|
||||
|
@ -749,8 +750,8 @@ rocksdb::ColumnFamilyOptions getCFOptions() {
|
|||
bbOpts.whole_key_filtering = false;
|
||||
}
|
||||
|
||||
if (rocksdb_block_cache == nullptr && SERVER_KNOBS->ROCKSDB_BLOCK_CACHE_SIZE > 0) {
|
||||
rocksdb_block_cache = rocksdb::NewLRUCache(SERVER_KNOBS->ROCKSDB_BLOCK_CACHE_SIZE);
|
||||
if (rocksdb_block_cache == nullptr && SERVER_KNOBS->SHARDED_ROCKSDB_BLOCK_CACHE_SIZE > 0) {
|
||||
rocksdb_block_cache = rocksdb::NewLRUCache(SERVER_KNOBS->SHARDED_ROCKSDB_BLOCK_CACHE_SIZE);
|
||||
}
|
||||
bbOpts.block_cache = rocksdb_block_cache;
|
||||
|
||||
|
@ -1274,7 +1275,7 @@ public:
|
|||
const double start = now();
|
||||
// Open instance.
|
||||
TraceEvent(SevInfo, "ShardedRocksDBInitBegin", this->logId).detail("DataPath", path);
|
||||
if (SERVER_KNOBS->ROCKSDB_WRITE_RATE_LIMITER_BYTES_PER_SEC > 0) {
|
||||
if (SERVER_KNOBS->SHARDED_ROCKSDB_WRITE_RATE_LIMITER_BYTES_PER_SEC > 0) {
|
||||
// Set rate limiter to a higher rate to avoid blocking storage engine initialization.
|
||||
auto rateLimiter = rocksdb::NewGenericRateLimiter((int64_t)5 << 30, // 5GB
|
||||
100 * 1000, // refill_period_us
|
||||
|
@ -1433,8 +1434,8 @@ public:
|
|||
writeBatch = std::make_unique<rocksdb::WriteBatch>();
|
||||
dirtyShards = std::make_unique<std::set<PhysicalShard*>>();
|
||||
|
||||
if (SERVER_KNOBS->ROCKSDB_WRITE_RATE_LIMITER_BYTES_PER_SEC > 0) {
|
||||
dbOptions.rate_limiter->SetBytesPerSecond(SERVER_KNOBS->ROCKSDB_WRITE_RATE_LIMITER_BYTES_PER_SEC);
|
||||
if (SERVER_KNOBS->SHARDED_ROCKSDB_WRITE_RATE_LIMITER_BYTES_PER_SEC > 0) {
|
||||
dbOptions.rate_limiter->SetBytesPerSecond(SERVER_KNOBS->SHARDED_ROCKSDB_WRITE_RATE_LIMITER_BYTES_PER_SEC);
|
||||
}
|
||||
TraceEvent(SevInfo, "ShardedRocksDBInitEnd", this->logId)
|
||||
.detail("DataPath", path)
|
||||
|
@ -2891,7 +2892,7 @@ struct ShardedRocksDBKeyValueStore : IKeyValueStore {
|
|||
}
|
||||
}
|
||||
|
||||
if (SERVER_KNOBS->ROCKSDB_SUGGEST_COMPACT_CLEAR_RANGE) {
|
||||
if (SERVER_KNOBS->SHARDED_ROCKSDB_SUGGEST_COMPACT_CLEAR_RANGE) {
|
||||
for (const auto& [id, range] : deletes) {
|
||||
auto cf = columnFamilyMap->find(id);
|
||||
ASSERT(cf != columnFamilyMap->end());
|
||||
|
|
Loading…
Reference in New Issue