revert changes inside KVStoreTest, ready for code review
This commit is contained in:
parent
f597aa7e18
commit
6836e370a7
|
@ -51,18 +51,18 @@ public:
|
|||
// The total size of the returned value (less the last entry) will be less than byteLimit
|
||||
virtual Future<Standalone<VectorRef<KeyValueRef>>> readRange( KeyRangeRef keys, int rowLimit = 1<<30, int byteLimit = 1<<30 ) = 0;
|
||||
|
||||
// Returns the amount of free and total space for this store, in bytes
|
||||
// To debug MEMORY_RADIXTREE type ONLY
|
||||
// Returns (1) how many key & value pairs have been inserted (2) how many nodes have been created (3) how many
|
||||
// key size is less than 12 bytes
|
||||
virtual std::tuple<size_t, size_t, size_t> getSize() { return std::make_tuple(0, 0, 0); }
|
||||
|
||||
//Returns how many key & value pairs have been inserted and how many nodes have been created
|
||||
// Returns the amount of free and total space for this store, in bytes
|
||||
virtual StorageBytes getStorageBytes() = 0;
|
||||
|
||||
virtual void resyncLog() {}
|
||||
|
||||
virtual void enableSnapshot() {}
|
||||
|
||||
// For debug, print out detailed node info
|
||||
virtual void printData() {}
|
||||
/*
|
||||
Concurrency contract
|
||||
Causal consistency:
|
||||
|
|
|
@ -32,26 +32,26 @@ class Histogram {
|
|||
public:
|
||||
Histogram(int minSamples = 100) : minSamples(minSamples) { reset(); }
|
||||
|
||||
void reset(){
|
||||
void reset() {
|
||||
N = 0;
|
||||
samplingRate = 1.0;
|
||||
sum = T();
|
||||
sumSQ = T();
|
||||
};
|
||||
|
||||
void addSample(const T& x){
|
||||
if (!N){
|
||||
void addSample(const T& x) {
|
||||
if (!N) {
|
||||
minSample = maxSample = x;
|
||||
} else {
|
||||
if (x < minSample) minSample = x;
|
||||
if (maxSample < x) maxSample = x;
|
||||
}
|
||||
sum += x;
|
||||
sumSQ += x*x;
|
||||
sumSQ += x * x;
|
||||
N++;
|
||||
if (deterministicRandom()->random01() < samplingRate){
|
||||
if (deterministicRandom()->random01() < samplingRate) {
|
||||
samples.push_back(x);
|
||||
if (samples.size() == minSamples * 2){
|
||||
if (samples.size() == minSamples * 2) {
|
||||
deterministicRandom()->randomShuffle(samples);
|
||||
samples.resize(minSamples);
|
||||
samplingRate /= 2;
|
||||
|
@ -60,24 +60,24 @@ public:
|
|||
}
|
||||
// void addHistogram(const Histrogram& h2);
|
||||
|
||||
T mean() const { return sum * (1.0 / N); } // exact
|
||||
T mean() const { return sum * (1.0 / N); } // exact
|
||||
const T& min() const { return minSample; }
|
||||
const T& max() const { return maxSample; }
|
||||
T stdDev() const {
|
||||
if (!N) return T();
|
||||
return sqrt((sumSQ * N - sum*sum) * (1.0 / (N*(N-1))));
|
||||
return sqrt((sumSQ * N - sum * sum) * (1.0 / (N * (N - 1))));
|
||||
}
|
||||
T percentileEstimate(double p){
|
||||
T percentileEstimate(double p) {
|
||||
ASSERT(p <= 1 && p >= 0);
|
||||
int size = samples.size();
|
||||
if (!size) return T();
|
||||
if (size == 1) return samples[0];
|
||||
std::sort(samples.begin(), samples.end());
|
||||
double fi = p * double(size-1);
|
||||
int li = p * double(size-1);
|
||||
if (li == size-1) return samples.back();
|
||||
double alpha = fi-li;
|
||||
return samples[li]*(1-alpha) + samples[li+1]*alpha;
|
||||
double fi = p * double(size - 1);
|
||||
int li = p * double(size - 1);
|
||||
if (li == size - 1) return samples.back();
|
||||
double alpha = fi - li;
|
||||
return samples[li] * (1 - alpha) + samples[li + 1] * alpha;
|
||||
}
|
||||
T medianEstimate() { return percentileEstimate(0.5); }
|
||||
uint64_t samplesCount() const { return N; }
|
||||
|
@ -104,17 +104,9 @@ struct KVTest {
|
|||
bool dispose;
|
||||
|
||||
explicit KVTest(int nodeCount, bool dispose, int keyBytes)
|
||||
: store(NULL),
|
||||
dispose(dispose),
|
||||
startVersion( Version(time(NULL)) << 30 ),
|
||||
lastSet(startVersion), lastCommit(startVersion), lastDurable(startVersion),
|
||||
nodeCount(nodeCount),
|
||||
keyBytes(keyBytes)
|
||||
{
|
||||
}
|
||||
~KVTest() {
|
||||
close();
|
||||
}
|
||||
: store(NULL), dispose(dispose), startVersion(Version(time(NULL)) << 30), lastSet(startVersion),
|
||||
lastCommit(startVersion), lastDurable(startVersion), nodeCount(nodeCount), keyBytes(keyBytes) {}
|
||||
~KVTest() { close(); }
|
||||
void close() {
|
||||
if (store) {
|
||||
TraceEvent("KVTestDestroy");
|
||||
|
@ -131,42 +123,42 @@ struct KVTest {
|
|||
if (s == allSets.end()) return startVersion;
|
||||
auto& sets = s->value;
|
||||
auto it = sets.lastLessOrEqual(version);
|
||||
return it!=sets.end() ? *it : startVersion;
|
||||
return it != sets.end() ? *it : startVersion;
|
||||
}
|
||||
void set( KeyValueRef kv ) {
|
||||
void set(KeyValueRef kv) {
|
||||
store->set(kv);
|
||||
auto s = allSets.find(kv.key);
|
||||
if (s == allSets.end()) {
|
||||
allSets.insert( MapPair<Key,IndexedSet<Version, NoMetric>>(Key(kv.key), IndexedSet<Version, NoMetric>()) );
|
||||
allSets.insert(MapPair<Key, IndexedSet<Version, NoMetric>>(Key(kv.key), IndexedSet<Version, NoMetric>()));
|
||||
s = allSets.find(kv.key);
|
||||
}
|
||||
s->value.insert( lastSet, NoMetric() );
|
||||
s->value.insert(lastSet, NoMetric());
|
||||
}
|
||||
|
||||
Key randomKey() { return makeKey( deterministicRandom()->randomInt(0,nodeCount) ); }
|
||||
Key randomKey() { return makeKey(deterministicRandom()->randomInt(0, nodeCount)); }
|
||||
Key makeKey(Version value) {
|
||||
Key k;
|
||||
((KeyRef&)k) = KeyRef( new (k.arena()) uint8_t[ keyBytes ], keyBytes );
|
||||
memcpy( (uint8_t*)k.begin(), doubleToTestKey( value ).begin(), 16 );
|
||||
memset( (uint8_t*)k.begin()+16, '.', keyBytes-16 );
|
||||
((KeyRef&)k) = KeyRef(new (k.arena()) uint8_t[keyBytes], keyBytes);
|
||||
memcpy((uint8_t*)k.begin(), doubleToTestKey(value).begin(), 16);
|
||||
memset((uint8_t*)k.begin() + 16, '.', keyBytes - 16);
|
||||
return k;
|
||||
}
|
||||
};
|
||||
|
||||
ACTOR Future<Void> testKVRead( KVTest* test, Key key, Histogram<float>* latency, PerfIntCounter* count ) {
|
||||
//state Version s1 = test->lastCommit;
|
||||
// state Version s1 = test->lastCommit;
|
||||
state Version s2 = test->lastDurable;
|
||||
|
||||
state double begin = timer();
|
||||
Optional<Value> val = wait( test->store->readValue(key) );
|
||||
latency->addSample( timer() - begin );
|
||||
Optional<Value> val = wait(test->store->readValue(key));
|
||||
latency->addSample(timer() - begin);
|
||||
++*count;
|
||||
Version v = val.present() ? BinaryReader::fromStringRef<Version>( val.get(), Unversioned() ) : test->startVersion;
|
||||
if (v < test->startVersion) v = test->startVersion; // ignore older data from the database
|
||||
Version v = val.present() ? BinaryReader::fromStringRef<Version>(val.get(), Unversioned()) : test->startVersion;
|
||||
if (v < test->startVersion) v = test->startVersion; // ignore older data from the database
|
||||
|
||||
//ASSERT( s1 <= v || test->get(key, s1)==v ); // Plan A
|
||||
ASSERT( s2 <= v || test->get(key, s2)==v ); // Causal consistency
|
||||
ASSERT( v <= test->lastCommit ); // read committed
|
||||
// ASSERT( s1 <= v || test->get(key, s1)==v ); // Plan A
|
||||
ASSERT(s2 <= v || test->get(key, s2) == v); // Causal consistency
|
||||
ASSERT(v <= test->lastCommit); // read committed
|
||||
// ASSERT( v <= test->lastSet ); // read uncommitted
|
||||
return Void();
|
||||
}
|
||||
|
@ -174,21 +166,21 @@ ACTOR Future<Void> testKVRead( KVTest* test, Key key, Histogram<float>* latency,
|
|||
ACTOR Future<Void> testKVReadSaturation( KVTest* test, Histogram<float>* latency, PerfIntCounter* count ) {
|
||||
while (true) {
|
||||
state double begin = timer();
|
||||
Optional<Value> val = wait( test->store->readValue(test->randomKey()) );
|
||||
latency->addSample( timer() - begin );
|
||||
Optional<Value> val = wait(test->store->readValue(test->randomKey()));
|
||||
latency->addSample(timer() - begin);
|
||||
++*count;
|
||||
wait(delay(0));
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> testKVCommit( KVTest* test, Histogram<float>* latency, PerfIntCounter* count ) {
|
||||
state Version v = test->lastSet;
|
||||
state Version v = test->lastSet;
|
||||
test->lastCommit = v;
|
||||
state double begin = timer();
|
||||
wait( test->store->commit() );
|
||||
wait(test->store->commit());
|
||||
++*count;
|
||||
latency->addSample( timer() - begin );
|
||||
test->lastDurable = std::max( test->lastDurable, v );
|
||||
latency->addSample(timer() - begin);
|
||||
test->lastDurable = std::max(test->lastDurable, v);
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
@ -206,41 +198,39 @@ struct KVStoreTestWorkload : TestWorkload {
|
|||
double setupTook;
|
||||
std::string storeType;
|
||||
|
||||
KVStoreTestWorkload( WorkloadContext const& wcx )
|
||||
: TestWorkload(wcx), reads("Reads"), sets("Sets"), commits("Commits"), setupTook(0)
|
||||
{
|
||||
KVStoreTestWorkload(WorkloadContext const& wcx)
|
||||
: TestWorkload(wcx), reads("Reads"), sets("Sets"), commits("Commits"), setupTook(0) {
|
||||
enabled = !clientId; // only do this on the "first" client
|
||||
testDuration = getOption( options, LiteralStringRef("testDuration"), 10.0 );
|
||||
operationsPerSecond = getOption( options, LiteralStringRef("operationsPerSecond"), 100e3 );
|
||||
commitFraction = getOption( options, LiteralStringRef("commitFraction"), .001 );
|
||||
setFraction = getOption( options, LiteralStringRef("setFraction"), .1 );
|
||||
nodeCount = getOption( options, LiteralStringRef("nodeCount"), 100000 );
|
||||
keyBytes = getOption( options, LiteralStringRef("keyBytes"), 8 );
|
||||
valueBytes = getOption( options, LiteralStringRef("valueBytes"), 8 );
|
||||
doSetup = getOption( options, LiteralStringRef("setup"), false );
|
||||
doClear = getOption( options, LiteralStringRef("clear"), false );
|
||||
doCount = getOption( options, LiteralStringRef("count"), false );
|
||||
filename = getOption( options, LiteralStringRef("filename"), Value() ).toString();
|
||||
saturation = getOption( options, LiteralStringRef("saturation"), false );
|
||||
storeType = getOption( options, LiteralStringRef("storeType"), LiteralStringRef("ssd") ).toString();
|
||||
testDuration = getOption(options, LiteralStringRef("testDuration"), 10.0);
|
||||
operationsPerSecond = getOption(options, LiteralStringRef("operationsPerSecond"), 100e3);
|
||||
commitFraction = getOption(options, LiteralStringRef("commitFraction"), .001);
|
||||
setFraction = getOption(options, LiteralStringRef("setFraction"), .1);
|
||||
nodeCount = getOption(options, LiteralStringRef("nodeCount"), 100000);
|
||||
keyBytes = getOption(options, LiteralStringRef("keyBytes"), 8);
|
||||
valueBytes = getOption(options, LiteralStringRef("valueBytes"), 8);
|
||||
doSetup = getOption(options, LiteralStringRef("setup"), false);
|
||||
doClear = getOption(options, LiteralStringRef("clear"), false);
|
||||
doCount = getOption(options, LiteralStringRef("count"), false);
|
||||
filename = getOption(options, LiteralStringRef("filename"), Value()).toString();
|
||||
saturation = getOption(options, LiteralStringRef("saturation"), false);
|
||||
storeType = getOption(options, LiteralStringRef("storeType"), LiteralStringRef("ssd")).toString();
|
||||
}
|
||||
virtual std::string description() { return "KVStoreTest"; }
|
||||
virtual Future<Void> setup( Database const& cx ) { return Void(); }
|
||||
virtual Future<Void> start( Database const& cx ) {
|
||||
if (enabled)
|
||||
return testKVStore(this);
|
||||
virtual Future<Void> setup(Database const& cx) { return Void(); }
|
||||
virtual Future<Void> start(Database const& cx) {
|
||||
if (enabled) return testKVStore(this);
|
||||
return Void();
|
||||
}
|
||||
virtual Future<bool> check( Database const& cx ) { return true; }
|
||||
void metricsFromHistogram(vector<PerfMetric>& m, std::string name, Histogram<float>& h){
|
||||
m.push_back( PerfMetric( "Min " + name, 1000.0 * h.min(), true) );
|
||||
m.push_back( PerfMetric( "Average " + name, 1000.0 * h.mean(), true) );
|
||||
m.push_back( PerfMetric( "Median " + name, 1000.0 * h.medianEstimate(), true) );
|
||||
m.push_back( PerfMetric( "95%% " + name, 1000.0 * h.percentileEstimate(0.95), true) );
|
||||
m.push_back( PerfMetric( "Max " + name, 1000.0 * h.max(), true) );
|
||||
virtual Future<bool> check(Database const& cx) { return true; }
|
||||
void metricsFromHistogram(vector<PerfMetric>& m, std::string name, Histogram<float>& h) {
|
||||
m.push_back(PerfMetric("Min " + name, 1000.0 * h.min(), true));
|
||||
m.push_back(PerfMetric("Average " + name, 1000.0 * h.mean(), true));
|
||||
m.push_back(PerfMetric("Median " + name, 1000.0 * h.medianEstimate(), true));
|
||||
m.push_back(PerfMetric("95%% " + name, 1000.0 * h.percentileEstimate(0.95), true));
|
||||
m.push_back(PerfMetric("Max " + name, 1000.0 * h.max(), true));
|
||||
}
|
||||
virtual void getMetrics( vector<PerfMetric>& m ) {
|
||||
if (setupTook) m.push_back( PerfMetric("SetupTook", setupTook, false) );
|
||||
virtual void getMetrics(vector<PerfMetric>& m) {
|
||||
if (setupTook) m.push_back(PerfMetric("SetupTook", setupTook, false));
|
||||
|
||||
m.push_back(reads.getMetric());
|
||||
m.push_back(sets.getMetric());
|
||||
|
@ -258,26 +248,25 @@ ACTOR Future<Void> testKVStoreMain( KVStoreTestWorkload* workload, KVTest* ptest
|
|||
state std::deque<Future<Void>> reads;
|
||||
state BinaryWriter wr(Unversioned());
|
||||
state int64_t commitsStarted = 0;
|
||||
//test.store = makeDummyKeyValueStore();
|
||||
// test.store = makeDummyKeyValueStore();
|
||||
state int extraBytes = workload->valueBytes - sizeof(test.lastSet);
|
||||
state int i;
|
||||
ASSERT( extraBytes >= 0 );
|
||||
ASSERT(extraBytes >= 0);
|
||||
state char* extraValue = new char[extraBytes];
|
||||
memset(extraValue, '.', extraBytes);
|
||||
|
||||
wait(delay(10));
|
||||
|
||||
if (workload->doCount) {
|
||||
state int64_t count = 0;
|
||||
state Key k;
|
||||
state double cst = timer();
|
||||
while (true) {
|
||||
Standalone<VectorRef<KeyValueRef>> kv = wait( test.store->readRange( KeyRangeRef(k, LiteralStringRef("\xff\xff\xff\xff")), 1000 ) );
|
||||
Standalone<VectorRef<KeyValueRef>> kv =
|
||||
wait(test.store->readRange(KeyRangeRef(k, LiteralStringRef("\xff\xff\xff\xff")), 1000));
|
||||
count += kv.size();
|
||||
if (kv.size() < 1000) break;
|
||||
k = keyAfter( kv[ kv.size()-1 ].key );
|
||||
k = keyAfter(kv[kv.size() - 1].key);
|
||||
}
|
||||
double elapsed = timer()-cst;
|
||||
double elapsed = timer() - cst;
|
||||
TraceEvent("KVStoreCount").detail("Count", count).detail("Took", elapsed);
|
||||
printf("Counted: %" PRId64 " in %0.1fs\n", count, elapsed);
|
||||
}
|
||||
|
@ -285,107 +274,81 @@ ACTOR Future<Void> testKVStoreMain( KVStoreTestWorkload* workload, KVTest* ptest
|
|||
if (workload->doSetup) {
|
||||
wr << Version(0);
|
||||
wr.serializeBytes(extraValue, extraBytes);
|
||||
|
||||
printf("Building %d nodes: ", workload->nodeCount);
|
||||
state double setupBegin = timer();
|
||||
state Future<Void> lastCommit = Void();
|
||||
for(i=0; i<workload->nodeCount; i++) {
|
||||
test.store->set( KeyValueRef( test.makeKey( i ), test.makeKey( i )) ) ;
|
||||
if (!((i+1) % 10000) || i+1==workload->nodeCount) {
|
||||
for (i = 0; i < workload->nodeCount; i++) {
|
||||
test.store->set(KeyValueRef(test.makeKey(i), wr.toValue()));
|
||||
if (!((i + 1) % 10000) || i + 1 == workload->nodeCount) {
|
||||
wait(lastCommit);
|
||||
lastCommit = test.store->commit();
|
||||
printf("ETA: %f seconds\n", (timer()-setupBegin) / i * (workload->nodeCount-i));
|
||||
}
|
||||
}
|
||||
wait( lastCommit );
|
||||
|
||||
for(i=0; i<workload->nodeCount; i++) {
|
||||
test.store->set( KeyValueRef( test.makeKey( i ), test.makeKey( i* 10)) ) ;
|
||||
if (!((i+1) % 10000) || i+1==workload->nodeCount) {
|
||||
wait(lastCommit);
|
||||
lastCommit = test.store->commit();
|
||||
printf("ETA: %f seconds\n", (timer()-setupBegin) / i * (workload->nodeCount-i));
|
||||
}
|
||||
}
|
||||
wait( lastCommit );
|
||||
|
||||
for(i=0; i<workload->nodeCount; i++) {
|
||||
Optional<Value> result = wait(test.store->readValue(test.makeKey( i )));
|
||||
if(result.present()){
|
||||
ASSERT(result.get() == test.makeKey( i* 10));
|
||||
} else {
|
||||
ASSERT (false);
|
||||
}
|
||||
}
|
||||
workload->setupTook = timer()-setupBegin;
|
||||
TraceEvent("KVStoreSetup").detail("Count", workload->nodeCount).detail("Took", workload->setupTook).detail("AvailableMemory", test.store->getStorageBytes().free);
|
||||
printf("ETA: %f seconds\n", (timer() - setupBegin) / i * (workload->nodeCount - i));
|
||||
}
|
||||
}
|
||||
wait(lastCommit);
|
||||
workload->setupTook = timer() - setupBegin;
|
||||
TraceEvent("KVStoreSetup").detail("Count", workload->nodeCount).detail("Took", workload->setupTook);
|
||||
}
|
||||
|
||||
state double t = now();
|
||||
// state double stopAt = t + workload->testDuration;
|
||||
// if (workload->saturation) {
|
||||
// if (workload->commitFraction) {
|
||||
// while (now() < stopAt) {
|
||||
// for(int s=0; s<1/workload->commitFraction; s++)
|
||||
// {
|
||||
// ++test.lastSet;
|
||||
// BinaryWriter wr(Unversioned()); wr << test.lastSet;
|
||||
// wr.serializeBytes(extraValue, extraBytes);
|
||||
// test.set( KeyValueRef( test.randomKey(), wr.toStringRef() ) );
|
||||
// ++workload->sets;
|
||||
// }
|
||||
// ++commitsStarted;
|
||||
// Void _ = wait( testKVCommit( &test, &workload->commitLatency, &workload->commits ) );
|
||||
// }
|
||||
// } else {
|
||||
// vector<Future<Void>> actors;
|
||||
// for(int a=0; a<100; a++)
|
||||
// actors.push_back( testKVReadSaturation( &test, &workload->readLatency, &workload->reads ) );
|
||||
// Void _ = wait( timeout( waitForAll(actors), workload->testDuration, Void() ) );
|
||||
// }
|
||||
// } else {
|
||||
// while (t < stopAt) {
|
||||
// double end = now();
|
||||
// loop {
|
||||
// t += 1.0 / workload->operationsPerSecond;
|
||||
// double op = g_random->random01();
|
||||
// if (op < workload->commitFraction) {
|
||||
// // Commit
|
||||
// if (workload->commits.getValue() == commitsStarted) {
|
||||
// ++commitsStarted;
|
||||
// ac.add( testKVCommit( &test, &workload->commitLatency, &workload->commits ) );
|
||||
// }
|
||||
// } else if (op < workload->commitFraction+workload->setFraction) {
|
||||
// // Set
|
||||
// ++test.lastSet;
|
||||
// BinaryWriter wr(Unversioned()); wr << test.lastSet;
|
||||
// wr.serializeBytes(extraValue, extraBytes);
|
||||
// test.set( KeyValueRef( test.randomKey(), wr.toStringRef() ) );
|
||||
// ++workload->sets;
|
||||
// } else {
|
||||
// // Read
|
||||
// ac.add( testKVRead( &test, test.randomKey(), &workload->readLatency, &workload->reads ) );
|
||||
// }
|
||||
// if (t >= end) break;
|
||||
// }
|
||||
// Void _ = wait( delayUntil(t) );
|
||||
// }
|
||||
// }
|
||||
state double stopAt = t + workload->testDuration;
|
||||
if (workload->saturation) {
|
||||
if (workload->commitFraction) {
|
||||
while (now() < stopAt) {
|
||||
for (int s = 0; s < 1 / workload->commitFraction; s++) {
|
||||
++test.lastSet;
|
||||
BinaryWriter wr(Unversioned());
|
||||
wr << test.lastSet;
|
||||
wr.serializeBytes(extraValue, extraBytes);
|
||||
test.set(KeyValueRef(test.randomKey(), wr.toValue()));
|
||||
++workload->sets;
|
||||
}
|
||||
++commitsStarted;
|
||||
wait(testKVCommit(&test, &workload->commitLatency, &workload->commits));
|
||||
}
|
||||
} else {
|
||||
vector<Future<Void>> actors;
|
||||
for (int a = 0; a < 100; a++)
|
||||
actors.push_back(testKVReadSaturation(&test, &workload->readLatency, &workload->reads));
|
||||
wait(timeout(waitForAll(actors), workload->testDuration, Void()));
|
||||
}
|
||||
} else {
|
||||
while (t < stopAt) {
|
||||
double end = now();
|
||||
loop {
|
||||
t += 1.0 / workload->operationsPerSecond;
|
||||
double op = deterministicRandom()->random01();
|
||||
if (op < workload->commitFraction) {
|
||||
// Commit
|
||||
if (workload->commits.getValue() == commitsStarted) {
|
||||
++commitsStarted;
|
||||
ac.add(testKVCommit(&test, &workload->commitLatency, &workload->commits));
|
||||
}
|
||||
} else if (op < workload->commitFraction + workload->setFraction) {
|
||||
// Set
|
||||
++test.lastSet;
|
||||
BinaryWriter wr(Unversioned());
|
||||
wr << test.lastSet;
|
||||
wr.serializeBytes(extraValue, extraBytes);
|
||||
test.set(KeyValueRef(test.randomKey(), wr.toValue()));
|
||||
++workload->sets;
|
||||
} else {
|
||||
// Read
|
||||
ac.add(testKVRead(&test, test.randomKey(), &workload->readLatency, &workload->reads));
|
||||
}
|
||||
if (t >= end) break;
|
||||
}
|
||||
wait(delayUntil(t));
|
||||
}
|
||||
}
|
||||
|
||||
if (workload->doClear) {
|
||||
state int chunk = 50;
|
||||
state int chunk = 1000000;
|
||||
t = timer();
|
||||
for (i = 0; i < workload->nodeCount - chunk; i += chunk) {
|
||||
test.store->clear( KeyRangeRef( test.makeKey( i ), test.makeKey( i + chunk ) ) );
|
||||
wait( test.store->commit() );
|
||||
}
|
||||
|
||||
for (i = workload->nodeCount - 50; i < workload->nodeCount; i++) {
|
||||
Optional<Value> result = wait(test.store->readValue(test.makeKey(i)));
|
||||
if (result.present()) {
|
||||
ASSERT(result.get() == test.makeKey(i * 10));
|
||||
} else {
|
||||
ASSERT(false);
|
||||
}
|
||||
for (i = 0; i < workload->nodeCount; i += chunk) {
|
||||
test.store->clear(KeyRangeRef(test.makeKey(i), test.makeKey(i + chunk)));
|
||||
wait(test.store->commit());
|
||||
}
|
||||
TraceEvent("KVStoreClear").detail("Took", timer() - t);
|
||||
}
|
||||
|
@ -394,36 +357,36 @@ ACTOR Future<Void> testKVStoreMain( KVStoreTestWorkload* workload, KVTest* ptest
|
|||
}
|
||||
|
||||
ACTOR Future<Void> testKVStore(KVStoreTestWorkload* workload) {
|
||||
state KVTest test( workload->nodeCount, !workload->filename.size(), workload->keyBytes );
|
||||
state KVTest test(workload->nodeCount, !workload->filename.size(), workload->keyBytes);
|
||||
state Error err;
|
||||
|
||||
//wait( delay(1) );
|
||||
// wait( delay(1) );
|
||||
TraceEvent("GO");
|
||||
|
||||
UID id = deterministicRandom()->randomUniqueID();
|
||||
std::string fn = workload->filename.size() ? workload->filename : id.toString();
|
||||
if (workload->storeType == "ssd")
|
||||
test.store = keyValueStoreSQLite( fn, id, KeyValueStoreType::SSD_BTREE_V2);
|
||||
test.store = keyValueStoreSQLite(fn, id, KeyValueStoreType::SSD_BTREE_V2);
|
||||
else if (workload->storeType == "ssd-1")
|
||||
test.store = keyValueStoreSQLite( fn, id, KeyValueStoreType::SSD_BTREE_V1);
|
||||
test.store = keyValueStoreSQLite(fn, id, KeyValueStoreType::SSD_BTREE_V1);
|
||||
else if (workload->storeType == "ssd-2")
|
||||
test.store = keyValueStoreSQLite( fn, id, KeyValueStoreType::SSD_REDWOOD_V1);
|
||||
test.store = keyValueStoreSQLite(fn, id, KeyValueStoreType::SSD_REDWOOD_V1);
|
||||
else if (workload->storeType == "ssd-redwood-experimental")
|
||||
test.store = keyValueStoreRedwoodV1( fn, id );
|
||||
test.store = keyValueStoreRedwoodV1(fn, id);
|
||||
else if (workload->storeType == "memory")
|
||||
test.store = keyValueStoreMemory( fn, id, 500e6 );
|
||||
else if (workload->storeType == "memory-radixtree")
|
||||
test.store = keyValueStoreMemory(fn, id, 500e6);
|
||||
else if (workload->storeType == "memory-radixtree")
|
||||
test.store = keyValueStoreMemory(fn, id, 500e6, "fdr", KeyValueStoreType::MEMORY_RADIXTREE);
|
||||
else
|
||||
ASSERT(false);
|
||||
|
||||
wait(test.store->init());
|
||||
|
||||
state Future<Void> main = testKVStoreMain( workload, &test );
|
||||
state Future<Void> main = testKVStoreMain(workload, &test);
|
||||
try {
|
||||
choose {
|
||||
when ( wait( main ) ) { }
|
||||
when ( wait( test.store->getError() ) ) { ASSERT( false ); }
|
||||
when(wait(main)) {}
|
||||
when(wait(test.store->getError())) { ASSERT(false); }
|
||||
}
|
||||
} catch (Error& e) {
|
||||
err = e;
|
||||
|
@ -432,7 +395,7 @@ ACTOR Future<Void> testKVStore(KVStoreTestWorkload* workload) {
|
|||
|
||||
Future<Void> c = test.store->onClosed();
|
||||
test.close();
|
||||
wait( c );
|
||||
wait(c);
|
||||
if (err.code() != invalid_error_code) throw err;
|
||||
return Void();
|
||||
}
|
||||
}
|
|
@ -295,7 +295,8 @@ public:
|
|||
iterator find(const StringRef& key);
|
||||
iterator begin();
|
||||
iterator end();
|
||||
// modifications
|
||||
iterator previous(iterator i);
|
||||
// modifications
|
||||
std::pair<iterator, bool> insert(const StringRef& key, const StringRef& val, bool replaceExisting = true);
|
||||
int insert(const std::vector<std::pair<KeyValueMapPair, uint64_t>>& pairs, bool replaceExisting = true) {
|
||||
// dummy method interface(to keep every interface same as IndexedSet )
|
||||
|
@ -309,7 +310,6 @@ public:
|
|||
iterator upper_bound(const StringRef& key);
|
||||
// access
|
||||
uint64_t sumTo(iterator to);
|
||||
iterator previous (iterator i);
|
||||
|
||||
private:
|
||||
size_type m_size;
|
||||
|
@ -563,7 +563,7 @@ const radix_tree::iterator& radix_tree::iterator::operator--() {
|
|||
}
|
||||
|
||||
/*
|
||||
* reconstruct the key, using @param arena to allocate memory
|
||||
* reconstruct the key
|
||||
*/
|
||||
StringRef radix_tree::iterator::getKey(uint8_t* content, int len) const {
|
||||
if (m_pointee == NULL) return StringRef();
|
||||
|
|
Loading…
Reference in New Issue