Merge pull request #817 from apple/release-6.0

Merge release-6.0 into master
This commit is contained in:
A.J. Beamon 2018-10-05 15:28:41 -07:00 committed by GitHub
commit 57ce1ad7c3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 42 additions and 19 deletions

View File

@ -151,6 +151,7 @@ Future<Void> AsyncFileCached::truncate( int64_t size ) {
++countCacheWrites;
std::vector<Future<Void>> actors;
int64_t oldLength = length;
int offsetInPage = size % pageCache->pageSize;
int64_t pageOffset = size - offsetInPage;
@ -176,25 +177,45 @@ Future<Void> AsyncFileCached::truncate( int64_t size ) {
pageOffset += pageCache->pageSize;
}
/*
for ( auto p = pages.lower_bound( pageOffset ); p != pages.end(); p = pages.erase(p) ) {
auto f = p->second->truncate();
if ( !f.isReady() || f.isError())
actors.push_back( f );
}
*/
for ( auto p = pages.begin(); p != pages.end(); ) {
if ( p->first >= pageOffset ) {
auto f = p->second->truncate();
if ( !f.isReady() || f.isError() )
actors.push_back( f );
auto last = p;
++p;
pages.erase(last);
} else
++p;
}
// if this call to truncate results in a larger file, there is no
// need to erase any pages
if(oldLength > pageOffset) {
// Iterating through all pages results in better cache locality than
// looking up pages one by one in the hash table. However, if we only need
// to truncate a small portion of data, looking up pages one by one should
// be faster. So for now we do single key lookup for each page if it results
// in less than a fixed percentage of the unordered map being accessed.
int64_t numLookups = (oldLength + (pageCache->pageSize-1) - pageOffset) / pageCache->pageSize;
if(numLookups < pages.size() * FLOW_KNOBS->PAGE_CACHE_TRUNCATE_LOOKUP_FRACTION) {
for(int64_t offset = pageOffset; offset < oldLength; offset += pageCache->pageSize) {
auto iter = pages.find(offset);
if(iter != pages.end()) {
auto f = iter->second->truncate();
if(!f.isReady() || f.isError()) {
actors.push_back(f);
}
pages.erase(iter);
}
}
}
else {
for(auto p = pages.begin(); p != pages.end();) {
if(p->first >= pageOffset) {
auto f = p->second->truncate();
if(!f.isReady() || f.isError()) {
actors.push_back(f);
}
auto last = p;
++p;
pages.erase(last);
}
else {
++p;
}
}
}
}
return truncate_impl( this, size, waitForAll( actors ) );
}

View File

@ -71,6 +71,7 @@ FlowKnobs::FlowKnobs(bool randomize, bool isSimulated) {
init( BUGGIFY_SIM_PAGE_CACHE_4K, 1e6 );
init( BUGGIFY_SIM_PAGE_CACHE_64K, 1e6 );
init( MAX_EVICT_ATTEMPTS, 100 ); if( randomize && BUGGIFY ) MAX_EVICT_ATTEMPTS = 2;
init( PAGE_CACHE_TRUNCATE_LOOKUP_FRACTION, 0.1 ); if( randomize && BUGGIFY ) PAGE_CACHE_TRUNCATE_LOOKUP_FRACTION = 0.0; else if( randomize && BUGGIFY ) PAGE_CACHE_TRUNCATE_LOOKUP_FRACTION = 1.0;
//AsyncFileKAIO
init( MAX_OUTSTANDING, 64 );

View File

@ -91,6 +91,7 @@ public:
int64_t BUGGIFY_SIM_PAGE_CACHE_4K;
int64_t BUGGIFY_SIM_PAGE_CACHE_64K;
int MAX_EVICT_ATTEMPTS;
double PAGE_CACHE_TRUNCATE_LOOKUP_FRACTION;
//AsyncFileKAIO
int MAX_OUTSTANDING;