check the thoroughput without the keeping the size

This commit is contained in:
Fuheng Zhao 2021-09-03 10:24:21 -07:00
parent 3bb9ba0687
commit 82929fe40e
2 changed files with 46 additions and 45 deletions

View File

@ -105,6 +105,8 @@ public:
// Usable size, without checksum
int size() const { return logicalSize - sizeof(Checksum); }
int totalSize() const { return logicalSize; }
Standalone<StringRef> asStringRef() const { return Standalone<StringRef>(StringRef(begin(), size()), arena); }

View File

@ -1818,11 +1818,11 @@ template <class IndexType, class ObjectType>
class ObjectCache : NonCopyable {
struct Entry : public boost::intrusive::list_base_hook<> {
Entry() : hits(0), size(0) {}
Entry() : hits(0) /*size(0)*/ {}
IndexType index;
ObjectType item;
int hits;
int size;
/*int size*/;
};
typedef std::unordered_map<IndexType, Entry> CacheT;
@ -1831,10 +1831,10 @@ class ObjectCache : NonCopyable {
public:
ObjectCache(int sizeLimit = 1) : sizeLimit(sizeLimit), currentSize(0) {}
void setSizeLimit(int n) {
void setSizeLimit(int64_t n, int buckets) {
ASSERT(n > 0);
sizeLimit = n;
cache.reserve(n);
cache.reserve(buckets);
}
// Get the object for i if it exists, else return nullptr.
@ -1869,7 +1869,7 @@ public:
if (toEvict.hits == 0) {
++g_redwoodMetrics.metric.pagerEvictUnhit;
}
currentSize -= toEvict.size;
currentSize -= toEvict.item.byteSize();
evictionOrder.erase(evictionOrder.iterator_to(toEvict));
cache.erase(i);
return true;
@ -1879,7 +1879,7 @@ public:
// After a get(), the object for i is the last in evictionOrder.
// If noHit is set, do not consider this access to be cache hit if the object is present
// If noMiss is set, do not consider this access to be a cache miss if the object is not present
ObjectType& get(const IndexType& index, int size = 1, bool noHit = false) {
ObjectType& get(const IndexType& index, int size, bool noHit = false) {
Entry& entry = cache[index];
// If entry is linked into evictionOrder then move it to the back of the order
@ -1895,7 +1895,7 @@ public:
// Finish initializing entry
entry.index = index;
entry.hits = 0;
entry.size = size;
/*entry.size = size;*/
currentSize += size;
// Insert the newly created Entry at the back of the eviction order
evictionOrder.push_back(entry);
@ -1928,7 +1928,7 @@ public:
if (toEvict.hits == 0) {
++g_redwoodMetrics.metric.pagerEvictUnhit;
}
currentSize -= toEvict.size;
currentSize -= toEvict.item.byteSize();
debug_printf(
"Evicting %s to make room for %s\n", toString(toEvict.index).c_str(), toString(index).c_str());
evictionOrder.pop_front();
@ -1951,8 +1951,8 @@ public:
// structures so we know for sure that no page will become unevictable
// after it is either evictable or onEvictable() is ready.
cache.swap(self->cache);
evictionOrder.swap(self->evictionOrder);
currentSize = self->currentSize;
evictionOrder.swap(self->evictionOrder);
state typename EvictionOrderT::iterator i = evictionOrder.begin();
state typename EvictionOrderT::iterator iEnd = evictionOrder.end();
@ -1960,14 +1960,15 @@ public:
if (!i->item.evictable()) {
wait(i->item.onEvictable());
}
currentSize -= i->size;
self->currentSize -= i->size;
//currentSize -= i->item.byteSize();
//self->currentSize -= i->item.byteSize();
++i;
}
evictionOrder.clear();
cache.clear();
ASSERT(currentSize == 0);
//ASSERT(currentSize == 0);
self->currentSize -= currentSize;
return Void();
}
@ -1976,7 +1977,7 @@ public:
return clear_impl(this);
}
int count() const { return currentSize; }
int count() const { return evictionOrder.size(); }
private:
int64_t sizeLimit;
@ -2114,7 +2115,8 @@ public:
if (pHeader != nullptr) {
pHeader->pageSize = logicalPageSize;
}
pageCache.setSizeLimit(1 + ((pageCacheBytes - 1) / physicalPageSize));
//pageCache.setSizeLimit(1 + ((pageCacheBytes - 1) / physicalPageSize));
pageCache.setSizeLimit(pageCacheBytes, 1 + ((pageCacheBytes - 1) / physicalPageSize));
}
void setExtentSize(int size) {
@ -2132,7 +2134,7 @@ public:
}
// TODO: How should this cache be sized - not really a cache. it should hold all extentIDs?
extentCache.setSizeLimit(100000);
extentCache.setSizeLimit(100000 * physicalPageSize, 100000);
}
void updateCommittedHeader() {
@ -2587,7 +2589,7 @@ public:
// or as a cache miss because there is no benefit to the page already being in cache
// Similarly, this does not count as a point lookup for reason.
ASSERT(pageIDs.front() != invalidLogicalPageID);
PageCacheEntry& cacheEntry = pageCache.get(pageIDs.front(), pageIDs.size(), true);
PageCacheEntry& cacheEntry = pageCache.get(pageIDs.front(), pageIDs.size() * logicalPageSize, true);
debug_printf("DWALPager(%s) op=write %s cached=%d reading=%d writing=%d\n",
filename.c_str(),
toString(pageIDs).c_str(),
@ -2815,17 +2817,14 @@ public:
ACTOR static Future<Reference<ArenaPage>> readPhysicalMultiPage(DWALPager* self,
Standalone<VectorRef<PhysicalPageID>> pageIDs,
int priority,
bool header) {
int priority) {
ASSERT(!self->memoryOnly);
// if (g_network->getCurrentTask() > TaskPriority::DiskRead) {
// wait(delay(0, TaskPriority::DiskRead));
// }
state Reference<ArenaPage> page =
header ? Reference<ArenaPage>(new ArenaPage(smallestPhysicalBlock, smallestPhysicalBlock))
: self->newPageBuffer(pageIDs.size());
state Reference<ArenaPage> page = self->newPageBuffer(pageIDs.size());
debug_printf("DWALPager(%s) op=readPhysicalStart %s ptr=%p\n",
self->filename.c_str(),
toString(pageIDs).c_str(),
@ -2834,7 +2833,7 @@ public:
state PriorityMultiLock::Lock lock = wait(self->ioLock.lock(std::min(priority, ioMaxPriority)));
++g_redwoodMetrics.metric.pagerDiskRead;
// TODO: Could a dispatched read try to write to page after it has been destroyed if this actor is cancelled?
state int blockSize = header ? smallestPhysicalBlock : self->physicalPageSize;
state int blockSize = self->physicalPageSize;
state uint8_t* data = page->mutate();
std::vector<Future<int>> reads;
for (int i = 0; i < pageIDs.size(); ++i) {
@ -2850,22 +2849,20 @@ public:
page->begin(),
pageIDs.size() * blockSize);
// Header reads are checked explicitly during recovery
if (!header) {
if (!page->verifyChecksum(pageIDs.front())) {
debug_printf(
"DWALPager(%s) checksum failed for %s\n", self->filename.c_str(), toString(pageIDs).c_str());
Error e = checksum_failed();
TraceEvent(SevError, "RedwoodChecksumFailed")
.detail("Filename", self->filename.c_str())
.detail("PageID", pageIDs)
.detail("PageSize", self->physicalPageSize)
.detail("Offset", pageIDs.front() * self->physicalPageSize)
.detail("CalculatedChecksum", page->calculateChecksum(pageIDs.front()))
.detail("ChecksumInPage", page->getChecksum())
.error(e);
ASSERT(false);
throw e;
}
if (!page->verifyChecksum(pageIDs.front())) {
debug_printf(
"DWALPager(%s) checksum failed for %s\n", self->filename.c_str(), toString(pageIDs).c_str());
Error e = checksum_failed();
TraceEvent(SevError, "RedwoodChecksumFailed")
.detail("Filename", self->filename.c_str())
.detail("PageID", pageIDs)
.detail("PageSize", self->physicalPageSize)
.detail("Offset", pageIDs.front() * self->physicalPageSize)
.detail("CalculatedChecksum", page->calculateChecksum(pageIDs.front()))
.detail("ChecksumInPage", page->getChecksum())
.error(e);
ASSERT(false);
throw e;
}
return page;
}
@ -2905,8 +2902,7 @@ public:
debug_printf("DWALPager(%s) op=readUncachedMiss %s\n", filename.c_str(), toString(pageID).c_str());
return forwardError(readPhysicalPage(this, pageID, priority, false), errorPromise);
}
PageCacheEntry& cacheEntry = pageCache.get(pageID, 1, noHit);
PageCacheEntry& cacheEntry = pageCache.get(pageID, logicalPageSize, noHit);
debug_printf("DWALPager(%s) op=read %s cached=%d reading=%d writing=%d noHit=%d\n",
filename.c_str(),
toString(pageID).c_str(),
@ -2949,10 +2945,10 @@ public:
}
++g_redwoodMetrics.metric.pagerProbeMiss;
debug_printf("DWALPager(%s) op=readUncachedMiss %s\n", filename.c_str(), toString(pageIDs).c_str());
return forwardError(readPhysicalMultiPage(this, pageIDs, priority, false), errorPromise);
return forwardError(readPhysicalMultiPage(this, pageIDs, priority), errorPromise);
}
PageCacheEntry& cacheEntry = pageCache.get(pageIDs.front(), pageIDs.size(), noHit);
PageCacheEntry& cacheEntry = pageCache.get(pageIDs.front(), pageIDs.size() * logicalPageSize, noHit);
debug_printf("DWALPager(%s) op=read %s cached=%d reading=%d writing=%d noHit=%d\n",
filename.c_str(),
toString(pageIDs).c_str(),
@ -2962,7 +2958,7 @@ public:
noHit);
if (!cacheEntry.initialized()) {
debug_printf("DWALPager(%s) issuing actual read of %s\n", filename.c_str(), toString(pageIDs).c_str());
cacheEntry.readFuture = forwardError(readPhysicalMultiPage(this, pageIDs, priority, false), errorPromise);
cacheEntry.readFuture = forwardError(readPhysicalMultiPage(this, pageIDs, priority), errorPromise);
cacheEntry.writeFuture = Void();
++g_redwoodMetrics.metric.pagerCacheMiss;
@ -3143,7 +3139,7 @@ public:
readSize = (tailPageID - pageID + 1) * physicalPageSize;
}
PageCacheEntry& cacheEntry = extentCache.get(pageID);
PageCacheEntry& cacheEntry = extentCache.get(pageID, logicalPageSize);
if (!cacheEntry.initialized()) {
cacheEntry.writeFuture = Void();
cacheEntry.readFuture =
@ -3549,7 +3545,8 @@ public:
int64_t total;
if (memoryOnly) {
total = pageCacheBytes;
free = pageCacheBytes - ((int64_t)pageCache.count() * physicalPageSize);
//free = pageCacheBytes - ((int64_t)pageCache.count() * physicalPageSize);
free = pageCacheBytes - ((int64_t)pageCache.count());
} else {
g_network->getDiskBytes(parentDirectory(filename), free, total);
}
@ -3668,6 +3665,8 @@ private:
return !reading() && !writing();
}
int byteSize() const { return readFuture.get()->totalSize(); }
Future<Void> onEvictable() const { return ready(readFuture) && writeFuture; }
};