bcache: Revert "bcache: shrink btree node cache after bch_btree_check()"
This reverts commit1df3877ff6
. In my testing, sometimes even all the cached btree nodes are freed, creating gc and allocator kernel threads may still fail. Finally it turns out that kthread_run() may fail if there is pending signal for current task. And the pending signal is sent from OOM killer which is triggered by memory consuption in bch_btree_check(). Therefore explicitly shrinking bcache btree node here does not help, and after the shrinker callback is improved, as well as pending signals are ignored before creating kernel threads, now such operation is unncessary anymore. This patch reverts the commit1df3877ff6
("bcache: shrink btree node cache after bch_btree_check()") because we have better improvement now. Signed-off-by: Coly Li <colyli@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
0b96da639a
commit
309cc719a2
|
@ -1917,23 +1917,6 @@ static int run_cache_set(struct cache_set *c)
|
|||
if (bch_btree_check(c))
|
||||
goto err;
|
||||
|
||||
/*
|
||||
* bch_btree_check() may occupy too much system memory which
|
||||
* has negative effects to user space application (e.g. data
|
||||
* base) performance. Shrink the mca cache memory proactively
|
||||
* here to avoid competing memory with user space workloads..
|
||||
*/
|
||||
if (!c->shrinker_disabled) {
|
||||
struct shrink_control sc;
|
||||
|
||||
sc.gfp_mask = GFP_KERNEL;
|
||||
sc.nr_to_scan = c->btree_cache_used * c->btree_pages;
|
||||
/* first run to clear b->accessed tag */
|
||||
c->shrink.scan_objects(&c->shrink, &sc);
|
||||
/* second run to reap non-accessed nodes */
|
||||
c->shrink.scan_objects(&c->shrink, &sc);
|
||||
}
|
||||
|
||||
bch_journal_mark(c, &journal);
|
||||
bch_initial_gc_finish(c);
|
||||
pr_debug("btree_check() done");
|
||||
|
|
Loading…
Reference in New Issue