bcache: Convert writeback to a kthread
This simplifies the writeback flow control quite a bit - previously, it was conceptually two coroutines, refill_dirty() and read_dirty(). This makes the code quite a bit more straightforward. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
This commit is contained in:
parent
72a44517f3
commit
5e6926daac
|
@ -509,7 +509,7 @@ struct cached_dev {
|
|||
|
||||
/* Limit number of writeback bios in flight */
|
||||
struct semaphore in_flight;
|
||||
struct closure_with_timer writeback;
|
||||
struct task_struct *writeback_thread;
|
||||
|
||||
struct keybuf writeback_keys;
|
||||
|
||||
|
@ -1038,7 +1038,11 @@ static inline void bkey_init(struct bkey *k)
|
|||
|
||||
#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
|
||||
#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
|
||||
#define MAX_KEY KEY(~(~0 << 20), ((uint64_t) ~0) >> 1, 0)
|
||||
|
||||
#define MAX_KEY_INODE (~(~0 << 20))
|
||||
#define MAX_KEY_OFFSET (((uint64_t) ~0) >> 1)
|
||||
#define MAX_KEY KEY(MAX_KEY_INODE, MAX_KEY_OFFSET, 0)
|
||||
|
||||
#define ZERO_KEY KEY(0, 0, 0)
|
||||
|
||||
/*
|
||||
|
@ -1214,8 +1218,6 @@ int bch_cache_allocator_init(struct cache *ca);
|
|||
|
||||
void bch_debug_exit(void);
|
||||
int bch_debug_init(struct kobject *);
|
||||
void bch_writeback_exit(void);
|
||||
int bch_writeback_init(void);
|
||||
void bch_request_exit(void);
|
||||
int bch_request_init(void);
|
||||
void bch_btree_exit(void);
|
||||
|
|
|
@ -1029,6 +1029,7 @@ static void cached_dev_free(struct closure *cl)
|
|||
struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
|
||||
|
||||
cancel_delayed_work_sync(&dc->writeback_rate_update);
|
||||
kthread_stop(dc->writeback_thread);
|
||||
|
||||
mutex_lock(&bch_register_lock);
|
||||
|
||||
|
@ -2006,7 +2007,6 @@ static struct notifier_block reboot = {
|
|||
static void bcache_exit(void)
|
||||
{
|
||||
bch_debug_exit();
|
||||
bch_writeback_exit();
|
||||
bch_request_exit();
|
||||
bch_btree_exit();
|
||||
if (bcache_kobj)
|
||||
|
@ -2039,7 +2039,6 @@ static int __init bcache_init(void)
|
|||
sysfs_create_files(bcache_kobj, files) ||
|
||||
bch_btree_init() ||
|
||||
bch_request_init() ||
|
||||
bch_writeback_init() ||
|
||||
bch_debug_init(bcache_kobj))
|
||||
goto err;
|
||||
|
||||
|
|
|
@ -11,18 +11,11 @@
|
|||
#include "debug.h"
|
||||
#include "writeback.h"
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <trace/events/bcache.h>
|
||||
|
||||
static struct workqueue_struct *dirty_wq;
|
||||
|
||||
static void read_dirty(struct closure *);
|
||||
|
||||
struct dirty_io {
|
||||
struct closure cl;
|
||||
struct cached_dev *dc;
|
||||
struct bio bio;
|
||||
};
|
||||
|
||||
/* Rate limiting */
|
||||
|
||||
static void __update_writeback_rate(struct cached_dev *dc)
|
||||
|
@ -72,9 +65,6 @@ out:
|
|||
dc->writeback_rate_derivative = derivative;
|
||||
dc->writeback_rate_change = change;
|
||||
dc->writeback_rate_target = target;
|
||||
|
||||
schedule_delayed_work(&dc->writeback_rate_update,
|
||||
dc->writeback_rate_update_seconds * HZ);
|
||||
}
|
||||
|
||||
static void update_writeback_rate(struct work_struct *work)
|
||||
|
@ -90,6 +80,9 @@ static void update_writeback_rate(struct work_struct *work)
|
|||
__update_writeback_rate(dc);
|
||||
|
||||
up_read(&dc->writeback_lock);
|
||||
|
||||
schedule_delayed_work(&dc->writeback_rate_update,
|
||||
dc->writeback_rate_update_seconds * HZ);
|
||||
}
|
||||
|
||||
static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
|
||||
|
@ -105,37 +98,11 @@ static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
|
|||
return min_t(uint64_t, ret, HZ);
|
||||
}
|
||||
|
||||
/* Background writeback */
|
||||
|
||||
static bool dirty_pred(struct keybuf *buf, struct bkey *k)
|
||||
{
|
||||
return KEY_DIRTY(k);
|
||||
}
|
||||
|
||||
static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k)
|
||||
{
|
||||
uint64_t stripe = KEY_START(k);
|
||||
unsigned nr_sectors = KEY_SIZE(k);
|
||||
struct cached_dev *dc = container_of(buf, struct cached_dev,
|
||||
writeback_keys);
|
||||
|
||||
if (!KEY_DIRTY(k))
|
||||
return false;
|
||||
|
||||
do_div(stripe, dc->disk.stripe_size);
|
||||
|
||||
while (1) {
|
||||
if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) ==
|
||||
dc->disk.stripe_size)
|
||||
return true;
|
||||
|
||||
if (nr_sectors <= dc->disk.stripe_size)
|
||||
return false;
|
||||
|
||||
nr_sectors -= dc->disk.stripe_size;
|
||||
stripe++;
|
||||
}
|
||||
}
|
||||
struct dirty_io {
|
||||
struct closure cl;
|
||||
struct cached_dev *dc;
|
||||
struct bio bio;
|
||||
};
|
||||
|
||||
static void dirty_init(struct keybuf_key *w)
|
||||
{
|
||||
|
@ -153,132 +120,6 @@ static void dirty_init(struct keybuf_key *w)
|
|||
bch_bio_map(bio, NULL);
|
||||
}
|
||||
|
||||
static void refill_dirty(struct closure *cl)
|
||||
{
|
||||
struct cached_dev *dc = container_of(cl, struct cached_dev,
|
||||
writeback.cl);
|
||||
struct keybuf *buf = &dc->writeback_keys;
|
||||
bool searched_from_start = false;
|
||||
struct bkey end = MAX_KEY;
|
||||
SET_KEY_INODE(&end, dc->disk.id);
|
||||
|
||||
if (!atomic_read(&dc->disk.detaching) &&
|
||||
!dc->writeback_running)
|
||||
closure_return(cl);
|
||||
|
||||
down_write(&dc->writeback_lock);
|
||||
|
||||
if (!atomic_read(&dc->has_dirty)) {
|
||||
SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
|
||||
bch_write_bdev_super(dc, NULL);
|
||||
|
||||
up_write(&dc->writeback_lock);
|
||||
closure_return(cl);
|
||||
}
|
||||
|
||||
if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
|
||||
buf->last_scanned = KEY(dc->disk.id, 0, 0);
|
||||
searched_from_start = true;
|
||||
}
|
||||
|
||||
if (dc->partial_stripes_expensive) {
|
||||
uint64_t i;
|
||||
|
||||
for (i = 0; i < dc->disk.nr_stripes; i++)
|
||||
if (atomic_read(dc->disk.stripe_sectors_dirty + i) ==
|
||||
dc->disk.stripe_size)
|
||||
goto full_stripes;
|
||||
|
||||
goto normal_refill;
|
||||
full_stripes:
|
||||
searched_from_start = false; /* not searching entire btree */
|
||||
bch_refill_keybuf(dc->disk.c, buf, &end,
|
||||
dirty_full_stripe_pred);
|
||||
} else {
|
||||
normal_refill:
|
||||
bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
|
||||
}
|
||||
|
||||
if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) {
|
||||
/* Searched the entire btree - delay awhile */
|
||||
|
||||
if (RB_EMPTY_ROOT(&buf->keys)) {
|
||||
atomic_set(&dc->has_dirty, 0);
|
||||
cached_dev_put(dc);
|
||||
}
|
||||
|
||||
if (!atomic_read(&dc->disk.detaching))
|
||||
closure_delay(&dc->writeback, dc->writeback_delay * HZ);
|
||||
}
|
||||
|
||||
up_write(&dc->writeback_lock);
|
||||
|
||||
bch_ratelimit_reset(&dc->writeback_rate);
|
||||
|
||||
/* Punt to workqueue only so we don't recurse and blow the stack */
|
||||
continue_at(cl, read_dirty, dirty_wq);
|
||||
}
|
||||
|
||||
void bch_writeback_queue(struct cached_dev *dc)
|
||||
{
|
||||
if (closure_trylock(&dc->writeback.cl, &dc->disk.cl)) {
|
||||
if (!atomic_read(&dc->disk.detaching))
|
||||
closure_delay(&dc->writeback, dc->writeback_delay * HZ);
|
||||
|
||||
continue_at(&dc->writeback.cl, refill_dirty, dirty_wq);
|
||||
}
|
||||
}
|
||||
|
||||
void bch_writeback_add(struct cached_dev *dc)
|
||||
{
|
||||
if (!atomic_read(&dc->has_dirty) &&
|
||||
!atomic_xchg(&dc->has_dirty, 1)) {
|
||||
atomic_inc(&dc->count);
|
||||
|
||||
if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
|
||||
SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
|
||||
/* XXX: should do this synchronously */
|
||||
bch_write_bdev_super(dc, NULL);
|
||||
}
|
||||
|
||||
bch_writeback_queue(dc);
|
||||
|
||||
if (dc->writeback_percent)
|
||||
schedule_delayed_work(&dc->writeback_rate_update,
|
||||
dc->writeback_rate_update_seconds * HZ);
|
||||
}
|
||||
}
|
||||
|
||||
void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
|
||||
uint64_t offset, int nr_sectors)
|
||||
{
|
||||
struct bcache_device *d = c->devices[inode];
|
||||
unsigned stripe_offset;
|
||||
uint64_t stripe = offset;
|
||||
|
||||
if (!d)
|
||||
return;
|
||||
|
||||
do_div(stripe, d->stripe_size);
|
||||
|
||||
stripe_offset = offset & (d->stripe_size - 1);
|
||||
|
||||
while (nr_sectors) {
|
||||
int s = min_t(unsigned, abs(nr_sectors),
|
||||
d->stripe_size - stripe_offset);
|
||||
|
||||
if (nr_sectors < 0)
|
||||
s = -s;
|
||||
|
||||
atomic_add(s, d->stripe_sectors_dirty + stripe);
|
||||
nr_sectors -= s;
|
||||
stripe_offset = 0;
|
||||
stripe++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Background writeback - IO loop */
|
||||
|
||||
static void dirty_io_destructor(struct closure *cl)
|
||||
{
|
||||
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
|
||||
|
@ -378,30 +219,33 @@ static void read_dirty_submit(struct closure *cl)
|
|||
continue_at(cl, write_dirty, system_wq);
|
||||
}
|
||||
|
||||
static void read_dirty(struct closure *cl)
|
||||
static void read_dirty(struct cached_dev *dc)
|
||||
{
|
||||
struct cached_dev *dc = container_of(cl, struct cached_dev,
|
||||
writeback.cl);
|
||||
unsigned delay = writeback_delay(dc, 0);
|
||||
unsigned delay = 0;
|
||||
struct keybuf_key *w;
|
||||
struct dirty_io *io;
|
||||
struct closure cl;
|
||||
|
||||
closure_init_stack(&cl);
|
||||
|
||||
/*
|
||||
* XXX: if we error, background writeback just spins. Should use some
|
||||
* mempools.
|
||||
*/
|
||||
|
||||
while (1) {
|
||||
while (!kthread_should_stop()) {
|
||||
try_to_freeze();
|
||||
|
||||
w = bch_keybuf_next(&dc->writeback_keys);
|
||||
if (!w)
|
||||
break;
|
||||
|
||||
BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
|
||||
|
||||
if (delay > 0 &&
|
||||
(KEY_START(&w->key) != dc->last_read ||
|
||||
jiffies_to_msecs(delay) > 50))
|
||||
delay = schedule_timeout_uninterruptible(delay);
|
||||
if (KEY_START(&w->key) != dc->last_read ||
|
||||
jiffies_to_msecs(delay) > 50)
|
||||
while (!kthread_should_stop() && delay)
|
||||
delay = schedule_timeout_interruptible(delay);
|
||||
|
||||
dc->last_read = KEY_OFFSET(&w->key);
|
||||
|
||||
|
@ -427,7 +271,7 @@ static void read_dirty(struct closure *cl)
|
|||
trace_bcache_writeback(&w->key);
|
||||
|
||||
down(&dc->in_flight);
|
||||
closure_call(&io->cl, read_dirty_submit, NULL, cl);
|
||||
closure_call(&io->cl, read_dirty_submit, NULL, &cl);
|
||||
|
||||
delay = writeback_delay(dc, KEY_SIZE(&w->key));
|
||||
}
|
||||
|
@ -443,7 +287,148 @@ err:
|
|||
* Wait for outstanding writeback IOs to finish (and keybuf slots to be
|
||||
* freed) before refilling again
|
||||
*/
|
||||
continue_at(cl, refill_dirty, dirty_wq);
|
||||
closure_sync(&cl);
|
||||
}
|
||||
|
||||
/* Scan for dirty data */
|
||||
|
||||
void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
|
||||
uint64_t offset, int nr_sectors)
|
||||
{
|
||||
struct bcache_device *d = c->devices[inode];
|
||||
unsigned stripe_offset;
|
||||
uint64_t stripe = offset;
|
||||
|
||||
if (!d)
|
||||
return;
|
||||
|
||||
do_div(stripe, d->stripe_size);
|
||||
|
||||
stripe_offset = offset & (d->stripe_size - 1);
|
||||
|
||||
while (nr_sectors) {
|
||||
int s = min_t(unsigned, abs(nr_sectors),
|
||||
d->stripe_size - stripe_offset);
|
||||
|
||||
if (nr_sectors < 0)
|
||||
s = -s;
|
||||
|
||||
atomic_add(s, d->stripe_sectors_dirty + stripe);
|
||||
nr_sectors -= s;
|
||||
stripe_offset = 0;
|
||||
stripe++;
|
||||
}
|
||||
}
|
||||
|
||||
static bool dirty_pred(struct keybuf *buf, struct bkey *k)
|
||||
{
|
||||
return KEY_DIRTY(k);
|
||||
}
|
||||
|
||||
static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k)
|
||||
{
|
||||
uint64_t stripe = KEY_START(k);
|
||||
unsigned nr_sectors = KEY_SIZE(k);
|
||||
struct cached_dev *dc = container_of(buf, struct cached_dev,
|
||||
writeback_keys);
|
||||
|
||||
if (!KEY_DIRTY(k))
|
||||
return false;
|
||||
|
||||
do_div(stripe, dc->disk.stripe_size);
|
||||
|
||||
while (1) {
|
||||
if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) ==
|
||||
dc->disk.stripe_size)
|
||||
return true;
|
||||
|
||||
if (nr_sectors <= dc->disk.stripe_size)
|
||||
return false;
|
||||
|
||||
nr_sectors -= dc->disk.stripe_size;
|
||||
stripe++;
|
||||
}
|
||||
}
|
||||
|
||||
static bool refill_dirty(struct cached_dev *dc)
|
||||
{
|
||||
struct keybuf *buf = &dc->writeback_keys;
|
||||
bool searched_from_start = false;
|
||||
struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
|
||||
|
||||
if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
|
||||
buf->last_scanned = KEY(dc->disk.id, 0, 0);
|
||||
searched_from_start = true;
|
||||
}
|
||||
|
||||
if (dc->partial_stripes_expensive) {
|
||||
uint64_t i;
|
||||
|
||||
for (i = 0; i < dc->disk.nr_stripes; i++)
|
||||
if (atomic_read(dc->disk.stripe_sectors_dirty + i) ==
|
||||
dc->disk.stripe_size)
|
||||
goto full_stripes;
|
||||
|
||||
goto normal_refill;
|
||||
full_stripes:
|
||||
searched_from_start = false; /* not searching entire btree */
|
||||
bch_refill_keybuf(dc->disk.c, buf, &end,
|
||||
dirty_full_stripe_pred);
|
||||
} else {
|
||||
normal_refill:
|
||||
bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
|
||||
}
|
||||
|
||||
return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
|
||||
}
|
||||
|
||||
static int bch_writeback_thread(void *arg)
|
||||
{
|
||||
struct cached_dev *dc = arg;
|
||||
bool searched_full_index;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
down_write(&dc->writeback_lock);
|
||||
if (!atomic_read(&dc->has_dirty) ||
|
||||
(!atomic_read(&dc->disk.detaching) &&
|
||||
!dc->writeback_running)) {
|
||||
up_write(&dc->writeback_lock);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
if (kthread_should_stop())
|
||||
return 0;
|
||||
|
||||
try_to_freeze();
|
||||
schedule();
|
||||
continue;
|
||||
}
|
||||
|
||||
searched_full_index = refill_dirty(dc);
|
||||
|
||||
if (searched_full_index &&
|
||||
RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
|
||||
atomic_set(&dc->has_dirty, 0);
|
||||
cached_dev_put(dc);
|
||||
SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
|
||||
bch_write_bdev_super(dc, NULL);
|
||||
}
|
||||
|
||||
up_write(&dc->writeback_lock);
|
||||
|
||||
bch_ratelimit_reset(&dc->writeback_rate);
|
||||
read_dirty(dc);
|
||||
|
||||
if (searched_full_index) {
|
||||
unsigned delay = dc->writeback_delay * HZ;
|
||||
|
||||
while (delay &&
|
||||
!kthread_should_stop() &&
|
||||
!atomic_read(&dc->disk.detaching))
|
||||
delay = schedule_timeout_interruptible(delay);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Init */
|
||||
|
@ -483,12 +468,10 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
|
|||
btree_root(sectors_dirty_init, dc->disk.c, &op, dc);
|
||||
}
|
||||
|
||||
void bch_cached_dev_writeback_init(struct cached_dev *dc)
|
||||
int bch_cached_dev_writeback_init(struct cached_dev *dc)
|
||||
{
|
||||
sema_init(&dc->in_flight, 64);
|
||||
closure_init_unlocked(&dc->writeback);
|
||||
init_rwsem(&dc->writeback_lock);
|
||||
|
||||
bch_keybuf_init(&dc->writeback_keys);
|
||||
|
||||
dc->writeback_metadata = true;
|
||||
|
@ -502,22 +485,16 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
|
|||
dc->writeback_rate_p_term_inverse = 64;
|
||||
dc->writeback_rate_d_smooth = 8;
|
||||
|
||||
dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
|
||||
"bcache_writeback");
|
||||
if (IS_ERR(dc->writeback_thread))
|
||||
return PTR_ERR(dc->writeback_thread);
|
||||
|
||||
set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE);
|
||||
|
||||
INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
|
||||
schedule_delayed_work(&dc->writeback_rate_update,
|
||||
dc->writeback_rate_update_seconds * HZ);
|
||||
}
|
||||
|
||||
void bch_writeback_exit(void)
|
||||
{
|
||||
if (dirty_wq)
|
||||
destroy_workqueue(dirty_wq);
|
||||
}
|
||||
|
||||
int __init bch_writeback_init(void)
|
||||
{
|
||||
dirty_wq = create_workqueue("bcache_writeback");
|
||||
if (!dirty_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -56,11 +56,30 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
|
|||
in_use <= CUTOFF_WRITEBACK;
|
||||
}
|
||||
|
||||
static inline void bch_writeback_queue(struct cached_dev *dc)
|
||||
{
|
||||
wake_up_process(dc->writeback_thread);
|
||||
}
|
||||
|
||||
static inline void bch_writeback_add(struct cached_dev *dc)
|
||||
{
|
||||
if (!atomic_read(&dc->has_dirty) &&
|
||||
!atomic_xchg(&dc->has_dirty, 1)) {
|
||||
atomic_inc(&dc->count);
|
||||
|
||||
if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
|
||||
SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
|
||||
/* XXX: should do this synchronously */
|
||||
bch_write_bdev_super(dc, NULL);
|
||||
}
|
||||
|
||||
bch_writeback_queue(dc);
|
||||
}
|
||||
}
|
||||
|
||||
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
|
||||
void bch_writeback_queue(struct cached_dev *);
|
||||
void bch_writeback_add(struct cached_dev *);
|
||||
|
||||
void bch_sectors_dirty_init(struct cached_dev *dc);
|
||||
void bch_cached_dev_writeback_init(struct cached_dev *);
|
||||
int bch_cached_dev_writeback_init(struct cached_dev *);
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue