From 08239ca2a053dbc3b082916bdfbd88e5a9ad9267 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 28 Nov 2013 10:31:35 +0800 Subject: [PATCH 01/20] bcache: fix sparse non static symbol warning Fixes the following sparse warning: drivers/md/bcache/btree.c:2220:5: warning: symbol 'btree_insert_fn' was not declared. Should it be static? Signed-off-by: Wei Yongjun Signed-off-by: Kent Overstreet --- drivers/md/bcache/btree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 5e2765aadce1..dc6e2be265b7 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -2217,7 +2217,7 @@ struct btree_insert_op { struct bkey *replace_key; }; -int btree_insert_fn(struct btree_op *b_op, struct btree *b) +static int btree_insert_fn(struct btree_op *b_op, struct btree *b) { struct btree_insert_op *op = container_of(b_op, struct btree_insert_op, op); From 8515736604941334bd9e8fc01edea685a228acd5 Mon Sep 17 00:00:00 2001 From: Andrey Vagin Date: Fri, 6 Dec 2013 09:06:41 +0400 Subject: [PATCH 02/20] block: fix memory leaks on unplugging block device All objects, which are allocated in blk_mq_register_disk, must be released in blk_mq_unregister_disk. I use a KVM virtual machine and virtio disk to reproduce this issue. kmemleak: 18 new suspected memory leaks (see /sys/kernel/debug/kmemleak) $ cat /sys/kernel/debug/kmemleak | head -n 30 unreferenced object 0xffff8800b6636150 (size 8): comm "kworker/0:2", pid 65, jiffies 4294809903 (age 86.358s) hex dump (first 8 bytes): 76 69 72 74 69 6f 34 00 virtio4. backtrace: [] kmemleak_alloc+0x4e/0xb0 [] __kmalloc_track_caller+0xf5/0x260 [] kstrdup+0x31/0x60 [] sysfs_new_dirent+0x2e/0x140 [] create_dir+0x38/0xe0 [] sysfs_create_dir_ns+0x73/0xc0 [] kobject_add_internal+0xc9/0x340 [] kobject_add+0x65/0xb0 [] device_add+0x128/0x660 [] device_register+0x1a/0x20 [] register_virtio_device+0x98/0xe0 [] virtio_pci_probe+0x12e/0x1c0 [] local_pci_probe+0x45/0xa0 [] pci_device_probe+0x121/0x130 [] driver_probe_device+0x87/0x390 [] __device_attach+0x3b/0x40 unreferenced object 0xffff8800b65aa1d8 (size 144): Fixes: 320ae51feed5 (blk-mq: new multi-queue block IO queueing mechanism) Cc: Jens Axboe Signed-off-by: Andrey Vagin Signed-off-by: Jens Axboe --- block/blk-mq-sysfs.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index ba6cf8e9aa0a..b91ce75bd35d 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -335,9 +335,22 @@ static struct kobj_type blk_mq_hw_ktype = { void blk_mq_unregister_disk(struct gendisk *disk) { struct request_queue *q = disk->queue; + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; + int i, j; + + queue_for_each_hw_ctx(q, hctx, i) { + hctx_for_each_ctx(hctx, ctx, j) { + kobject_del(&ctx->kobj); + kobject_put(&ctx->kobj); + } + kobject_del(&hctx->kobj); + kobject_put(&hctx->kobj); + } kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); kobject_del(&q->mq_kobj); + kobject_put(&q->mq_kobj); kobject_put(&disk_to_dev(disk)->kobj); } From f665c0f852316ff99e9eb7f71f34d43003f8e139 Mon Sep 17 00:00:00 2001 From: Stefan Priebe Date: Sat, 16 Nov 2013 21:26:52 +0100 Subject: [PATCH 03/20] bcache: kthread don't set writeback task to INTERUPTIBLE at the beginning (schedule_timout_interuptible) and others do his on their own This prevents wrong load average calculation (load of 1 per thread) Signed-off-by: Kent Overstreet --- drivers/md/bcache/writeback.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 99053b1251be..484e57d7012c 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -500,8 +500,6 @@ int bch_cached_dev_writeback_init(struct cached_dev *dc) if (IS_ERR(dc->writeback_thread)) return PTR_ERR(dc->writeback_thread); - set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE); - INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); From ce2b3f595e1c56639085645e0130426e443008c0 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 28 Nov 2013 17:28:37 -0800 Subject: [PATCH 04/20] bcache: Use uninterruptible sleep in writeback We're just waiting on kthread_should_stop(), nothing else, so interruptible sleep was wrong here. Signed-off-by: Kent Overstreet --- drivers/md/bcache/writeback.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 484e57d7012c..3cd931d3f26c 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -241,7 +241,7 @@ static void read_dirty(struct cached_dev *dc) if (KEY_START(&w->key) != dc->last_read || jiffies_to_msecs(delay) > 50) while (!kthread_should_stop() && delay) - delay = schedule_timeout_interruptible(delay); + delay = schedule_timeout_uninterruptible(delay); dc->last_read = KEY_OFFSET(&w->key); @@ -438,7 +438,7 @@ static int bch_writeback_thread(void *arg) while (delay && !kthread_should_stop() && !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) - delay = schedule_timeout_interruptible(delay); + delay = schedule_timeout_uninterruptible(delay); } } From d24a6e1087030b6da286df9433add5fa2f21b83b Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sun, 10 Nov 2013 21:55:27 -0800 Subject: [PATCH 05/20] bcache: Fix dirty_data accounting Dirty data accounting wasn't quite right - firstly, we were adding the key we're inserting after it could have merged with another dirty key already in the btree, and secondly we could sometimes pass the wrong offset to bcache_dev_sectors_dirty_add() for dirty data we were overwriting - which is important when tracking dirty data by stripe. NOTE FOR BACKPORTERS: For 3.10 (and 3.11?) there's other accounting fixes necessary that got squashed in with other patches; the full patch against 3.10 is 408cc2f47eeac93a, available at: git://evilpiepirate.org/~kent/linux-bcache.git bcache-3.10-writeback-fixes Signed-off-by: Kent Overstreet Cc: linux-stable # >= v3.10 diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 2a46036..4a12b2f 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1817,7 +1817,8 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert, if (KEY_START(k) > KEY_START(insert) + sectors_found) goto check_failed; - if (KEY_PTRS(replace_key) != KEY_PTRS(k)) + if (KEY_PTRS(k) != KEY_PTRS(replace_key) || + KEY_DIRTY(k) != KEY_DIRTY(replace_key)) goto check_failed; /* skip past gen */ --- drivers/md/bcache/btree.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index dc6e2be265b7..c36745e5986a 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1817,7 +1817,8 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert, if (KEY_START(k) > KEY_START(insert) + sectors_found) goto check_failed; - if (KEY_PTRS(replace_key) != KEY_PTRS(k)) + if (KEY_PTRS(k) != KEY_PTRS(replace_key) || + KEY_DIRTY(k) != KEY_DIRTY(replace_key)) goto check_failed; /* skip past gen */ From 9eb8ebeb2471b8cbaa078066aeb85fe5d46f5304 Mon Sep 17 00:00:00 2001 From: Nicholas Swenson Date: Tue, 22 Oct 2013 13:19:23 -0700 Subject: [PATCH 06/20] bcache: Fix for can_attach_cache() Signed-off-by: Nicholas Swenson Signed-off-by: Kent Overstreet --- drivers/md/bcache/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index dec15cd2d797..c57bfa071a57 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1676,7 +1676,7 @@ err: static bool can_attach_cache(struct cache *ca, struct cache_set *c) { return ca->sb.block_size == c->sb.block_size && - ca->sb.bucket_size == c->sb.block_size && + ca->sb.bucket_size == c->sb.bucket_size && ca->sb.nr_in_set == c->sb.nr_in_set; } From 97d11a660fd906dbea3dccd2638495d8497c3c81 Mon Sep 17 00:00:00 2001 From: Nicholas Swenson Date: Wed, 23 Oct 2013 17:35:26 -0700 Subject: [PATCH 07/20] bcache: Fix heap_peek() macro Signed-off-by: Nicholas Swenson Signed-off-by: Kent Overstreet --- drivers/md/bcache/util.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 362c4b3f8b4a..1030c6020e98 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -110,7 +110,7 @@ do { \ _r; \ }) -#define heap_peek(h) ((h)->size ? (h)->data[0] : NULL) +#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL) #define heap_full(h) ((h)->used == (h)->size) From bee63f40cb5f5e8ab2abfbc85acde99cc0acd4b5 Mon Sep 17 00:00:00 2001 From: Nicholas Swenson Date: Thu, 31 Oct 2013 19:25:18 -0700 Subject: [PATCH 08/20] bcache: fix for gc crashing when no sectors are used Signed-off-by: Nicholas Swenson Signed-off-by: Kent Overstreet --- drivers/md/bcache/movinggc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 7c1275e66025..46c952379fab 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -184,7 +184,8 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r) static unsigned bucket_heap_top(struct cache *ca) { - return GC_SECTORS_USED(heap_peek(&ca->heap)); + struct bucket *b; + return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0; } void bch_moving_gc(struct cache_set *c) From 981aa8c091e164ea51dd1e81b71a1f3852bbcceb Mon Sep 17 00:00:00 2001 From: Nicholas Swenson Date: Thu, 7 Nov 2013 17:53:19 -0800 Subject: [PATCH 09/20] bcache: bugfix - moving_gc now moves only correct buckets Removed gc_move_threshold because picking buckets only by threshold could lead moving extra buckets (ei. if there are buckets at the threshold that aren't supposed to be moved do to space considerations). This is replaced by a GC_MOVE bit in the gc_mark bitmask. Now only marked buckets get moved. Signed-off-by: Nicholas Swenson Signed-off-by: Kent Overstreet --- drivers/md/bcache/alloc.c | 2 ++ drivers/md/bcache/bcache.h | 6 +++--- drivers/md/bcache/movinggc.c | 8 +++----- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 2b46bf1d7e40..4c9852d92b0a 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -421,9 +421,11 @@ out: if (watermark <= WATERMARK_METADATA) { SET_GC_MARK(b, GC_MARK_METADATA); + SET_GC_MOVE(b, 0); b->prio = BTREE_PRIO; } else { SET_GC_MARK(b, GC_MARK_RECLAIMABLE); + SET_GC_MOVE(b, 0); b->prio = INITIAL_PRIO; } diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 4beb55a0ff30..a7b1a7631ed2 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -197,7 +197,7 @@ struct bucket { uint8_t disk_gen; uint8_t last_gc; /* Most out of date gen in the btree */ uint8_t gc_gen; - uint16_t gc_mark; + uint16_t gc_mark; /* Bitfield used by GC. See below for field */ }; /* @@ -209,7 +209,8 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); #define GC_MARK_RECLAIMABLE 0 #define GC_MARK_DIRTY 1 #define GC_MARK_METADATA 2 -BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14); +BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13); +BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); #include "journal.h" #include "stats.h" @@ -445,7 +446,6 @@ struct cache { * call prio_write() to keep gens from wrapping. */ uint8_t need_save_prio; - unsigned gc_move_threshold; /* * If nonzero, we know we aren't going to find any buckets to invalidate diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 46c952379fab..30f347d4e609 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k) unsigned i; for (i = 0; i < KEY_PTRS(k); i++) { - struct cache *ca = PTR_CACHE(c, k, i); struct bucket *g = PTR_BUCKET(c, k, i); - if (GC_SECTORS_USED(g) < ca->gc_move_threshold) + if (GC_MOVE(g)) return true; } @@ -227,9 +226,8 @@ void bch_moving_gc(struct cache_set *c) sectors_to_move -= GC_SECTORS_USED(b); } - ca->gc_move_threshold = bucket_heap_top(ca); - - pr_debug("threshold %u", ca->gc_move_threshold); + while (heap_pop(&ca->heap, b, bucket_cmp)) + SET_GC_MOVE(b, 1); } mutex_unlock(&c->bucket_lock); From bf0a628a95dba7f983b6047cea695fb066fb2512 Mon Sep 17 00:00:00 2001 From: Nicholas Swenson Date: Tue, 26 Nov 2013 19:14:23 -0800 Subject: [PATCH 10/20] bcache: fix for gc and writeback race Garbage collector needs to check keys in the writeback keybuf to make sure it's not invalidating buckets to which the writeback keys point to. Signed-off-by: Nicholas Swenson Signed-off-by: Kent Overstreet --- drivers/md/bcache/btree.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index c36745e5986a..31bb53fcc67a 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1561,6 +1561,28 @@ size_t bch_btree_gc_finish(struct cache_set *c) SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), GC_MARK_METADATA); + /* don't reclaim buckets to which writeback keys point */ + rcu_read_lock(); + for (i = 0; i < c->nr_uuids; i++) { + struct bcache_device *d = c->devices[i]; + struct cached_dev *dc; + struct keybuf_key *w, *n; + unsigned j; + + if (!d || UUID_FLASH_ONLY(&c->uuids[i])) + continue; + dc = container_of(d, struct cached_dev, disk); + + spin_lock(&dc->writeback_keys.lock); + rbtree_postorder_for_each_entry_safe(w, n, + &dc->writeback_keys.keys, node) + for (j = 0; j < KEY_PTRS(&w->key); j++) + SET_GC_MARK(PTR_BUCKET(c, &w->key, j), + GC_MARK_DIRTY); + spin_unlock(&dc->writeback_keys.lock); + } + rcu_read_unlock(); + for_each_cache(ca, c, i) { uint64_t *i; From 6d3d1a9c542b19dff1c7d7c8354d0869e4655287 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 16 Dec 2013 14:12:09 -0800 Subject: [PATCH 11/20] bcache: bugfix for race between moving_gc and bucket_invalidate There is a possibility for a bucket to be invalidated by the allocator while moving_gc was copying it's contents to another bucket, if the bucket only held cached data. To prevent this moving checks for a stale ptr (to an invalidated bucket), before and after reads. It it finds one, it simply ignores moving that data. This only affects bcache if the moving_gc was turned on, note that it's off by default. Signed-off-by: Nicholas Swenson Signed-off-by: Kent Overstreet --- drivers/md/bcache/movinggc.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 30f347d4e609..f2f0998c4a91 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -64,11 +64,16 @@ static void write_moving_finish(struct closure *cl) static void read_moving_endio(struct bio *bio, int error) { + struct bbio *b = container_of(bio, struct bbio, bio); struct moving_io *io = container_of(bio->bi_private, struct moving_io, cl); if (error) io->op.error = error; + else if (!KEY_DIRTY(&b->key) && + ptr_stale(io->op.c, &b->key, 0)) { + io->op.error = -EINTR; + } bch_bbio_endio(io->op.c, bio, error, "reading data to move"); } @@ -140,6 +145,11 @@ static void read_moving(struct cache_set *c) if (!w) break; + if (ptr_stale(c, &w->key, 0)) { + bch_keybuf_del(&c->moving_gc_keys, w); + continue; + } + io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), GFP_KERNEL); From 16749c23c00c686ed168471963e3ddb0f3fcd855 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 11 Nov 2013 13:58:34 -0800 Subject: [PATCH 12/20] bcache: New writeback PD controller The old writeback PD controller could get into states where it had throttled all the way down and take way too long to recover - it was too complicated to really understand what it was doing. This rewrites a good chunk of it to hopefully be simpler and make more sense, and it also pays more attention to units which should make the behaviour a bit easier to understand. Signed-off-by: Kent Overstreet --- drivers/md/bcache/bcache.h | 6 ++--- drivers/md/bcache/sysfs.c | 50 ++++++++++++++++++++--------------- drivers/md/bcache/util.c | 8 +++++- drivers/md/bcache/writeback.c | 47 ++++++++++++++++---------------- 4 files changed, 62 insertions(+), 49 deletions(-) diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index a7b1a7631ed2..754f43177483 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -373,14 +373,14 @@ struct cached_dev { unsigned char writeback_percent; unsigned writeback_delay; - int writeback_rate_change; - int64_t writeback_rate_derivative; uint64_t writeback_rate_target; + int64_t writeback_rate_proportional; + int64_t writeback_rate_derivative; + int64_t writeback_rate_change; unsigned writeback_rate_update_seconds; unsigned writeback_rate_d_term; unsigned writeback_rate_p_term_inverse; - unsigned writeback_rate_d_smooth; }; enum alloc_watermarks { diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 80d4c2bee18a..a1f85612f0b3 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -83,7 +83,6 @@ rw_attribute(writeback_rate); rw_attribute(writeback_rate_update_seconds); rw_attribute(writeback_rate_d_term); rw_attribute(writeback_rate_p_term_inverse); -rw_attribute(writeback_rate_d_smooth); read_attribute(writeback_rate_debug); read_attribute(stripe_size); @@ -129,31 +128,41 @@ SHOW(__bch_cached_dev) var_printf(writeback_running, "%i"); var_print(writeback_delay); var_print(writeback_percent); - sysfs_print(writeback_rate, dc->writeback_rate.rate); + sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9); var_print(writeback_rate_update_seconds); var_print(writeback_rate_d_term); var_print(writeback_rate_p_term_inverse); - var_print(writeback_rate_d_smooth); if (attr == &sysfs_writeback_rate_debug) { + char rate[20]; char dirty[20]; - char derivative[20]; char target[20]; - bch_hprint(dirty, - bcache_dev_sectors_dirty(&dc->disk) << 9); - bch_hprint(derivative, dc->writeback_rate_derivative << 9); + char proportional[20]; + char derivative[20]; + char change[20]; + s64 next_io; + + bch_hprint(rate, dc->writeback_rate.rate << 9); + bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9); bch_hprint(target, dc->writeback_rate_target << 9); + bch_hprint(proportional,dc->writeback_rate_proportional << 9); + bch_hprint(derivative, dc->writeback_rate_derivative << 9); + bch_hprint(change, dc->writeback_rate_change << 9); + + next_io = div64_s64(dc->writeback_rate.next - local_clock(), + NSEC_PER_MSEC); return sprintf(buf, - "rate:\t\t%u\n" - "change:\t\t%i\n" + "rate:\t\t%s/sec\n" "dirty:\t\t%s\n" + "target:\t\t%s\n" + "proportional:\t%s\n" "derivative:\t%s\n" - "target:\t\t%s\n", - dc->writeback_rate.rate, - dc->writeback_rate_change, - dirty, derivative, target); + "change:\t\t%s/sec\n" + "next io:\t%llims\n", + rate, dirty, target, proportional, + derivative, change, next_io); } sysfs_hprint(dirty_data, @@ -189,6 +198,7 @@ STORE(__cached_dev) struct kobj_uevent_env *env; #define d_strtoul(var) sysfs_strtoul(var, dc->var) +#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX) #define d_strtoi_h(var) sysfs_hatoi(var, dc->var) sysfs_strtoul(data_csum, dc->disk.data_csum); @@ -197,16 +207,15 @@ STORE(__cached_dev) d_strtoul(writeback_metadata); d_strtoul(writeback_running); d_strtoul(writeback_delay); - sysfs_strtoul_clamp(writeback_rate, - dc->writeback_rate.rate, 1, 1000000); + sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); - d_strtoul(writeback_rate_update_seconds); + sysfs_strtoul_clamp(writeback_rate, + dc->writeback_rate.rate, 1, INT_MAX); + + d_strtoul_nonzero(writeback_rate_update_seconds); d_strtoul(writeback_rate_d_term); - d_strtoul(writeback_rate_p_term_inverse); - sysfs_strtoul_clamp(writeback_rate_p_term_inverse, - dc->writeback_rate_p_term_inverse, 1, INT_MAX); - d_strtoul(writeback_rate_d_smooth); + d_strtoul_nonzero(writeback_rate_p_term_inverse); d_strtoi_h(sequential_cutoff); d_strtoi_h(readahead); @@ -313,7 +322,6 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_writeback_rate_update_seconds, &sysfs_writeback_rate_d_term, &sysfs_writeback_rate_p_term_inverse, - &sysfs_writeback_rate_d_smooth, &sysfs_writeback_rate_debug, &sysfs_dirty_data, &sysfs_stripe_size, diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 462214eeacbe..bb37618e7664 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -209,7 +209,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done) { uint64_t now = local_clock(); - d->next += div_u64(done, d->rate); + d->next += div_u64(done * NSEC_PER_SEC, d->rate); + + if (time_before64(now + NSEC_PER_SEC, d->next)) + d->next = now + NSEC_PER_SEC; + + if (time_after64(now - NSEC_PER_SEC * 2, d->next)) + d->next = now - NSEC_PER_SEC * 2; return time_after64(d->next, now) ? div_u64(d->next - now, NSEC_PER_SEC / HZ) diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 3cd931d3f26c..6c44fe059c27 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -30,38 +30,40 @@ static void __update_writeback_rate(struct cached_dev *dc) /* PD controller */ - int change = 0; - int64_t error; int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); int64_t derivative = dirty - dc->disk.sectors_dirty_last; + int64_t proportional = dirty - target; + int64_t change; dc->disk.sectors_dirty_last = dirty; - derivative *= dc->writeback_rate_d_term; - derivative = clamp(derivative, -dirty, dirty); + /* Scale to sectors per second */ + + proportional *= dc->writeback_rate_update_seconds; + proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse); + + derivative = div_s64(derivative, dc->writeback_rate_update_seconds); derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, - dc->writeback_rate_d_smooth, 0); + (dc->writeback_rate_d_term / + dc->writeback_rate_update_seconds) ?: 1, 0); - /* Avoid divide by zero */ - if (!target) - goto out; + derivative *= dc->writeback_rate_d_term; + derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse); - error = div64_s64((dirty + derivative - target) << 8, target); - - change = div_s64((dc->writeback_rate.rate * error) >> 8, - dc->writeback_rate_p_term_inverse); + change = proportional + derivative; /* Don't increase writeback rate if the device isn't keeping up */ if (change > 0 && time_after64(local_clock(), - dc->writeback_rate.next + 10 * NSEC_PER_MSEC)) + dc->writeback_rate.next + NSEC_PER_MSEC)) change = 0; dc->writeback_rate.rate = - clamp_t(int64_t, dc->writeback_rate.rate + change, + clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change, 1, NSEC_PER_MSEC); -out: + + dc->writeback_rate_proportional = proportional; dc->writeback_rate_derivative = derivative; dc->writeback_rate_change = change; dc->writeback_rate_target = target; @@ -87,15 +89,11 @@ static void update_writeback_rate(struct work_struct *work) static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) { - uint64_t ret; - if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || !dc->writeback_percent) return 0; - ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); - - return min_t(uint64_t, ret, HZ); + return bch_next_delay(&dc->writeback_rate, sectors); } struct dirty_io { @@ -476,6 +474,8 @@ void bch_sectors_dirty_init(struct cached_dev *dc) bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), sectors_dirty_init_fn, 0); + + dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk); } int bch_cached_dev_writeback_init(struct cached_dev *dc) @@ -490,10 +490,9 @@ int bch_cached_dev_writeback_init(struct cached_dev *dc) dc->writeback_delay = 30; dc->writeback_rate.rate = 1024; - dc->writeback_rate_update_seconds = 30; - dc->writeback_rate_d_term = 16; - dc->writeback_rate_p_term_inverse = 64; - dc->writeback_rate_d_smooth = 8; + dc->writeback_rate_update_seconds = 5; + dc->writeback_rate_d_term = 30; + dc->writeback_rate_p_term_inverse = 6000; dc->writeback_thread = kthread_create(bch_writeback_thread, dc, "bcache_writeback"); From a26ba7faddd51be3dd8957543a9d984a4ddd104a Mon Sep 17 00:00:00 2001 From: Rashika Kheria Date: Thu, 19 Dec 2013 15:02:22 +0530 Subject: [PATCH 13/20] drivers: block: Mark the functions as static in skd_main.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mark functions skd_skmsg_state_to_str() and skd_skreq_state_to_str() as static in skd_main.c because they are not used outside this file. This eliminates the following warnings in skd_main.c: drivers/block/skd_main.c:5272:13: warning: no previous prototype for ‘skd_skmsg_state_to_str’ [-Wmissing-prototypes] drivers/block/skd_main.c:5284:13: warning: no previous prototype for ‘skd_skreq_state_to_str’ [-Wmissing-prototypes] Signed-off-by: Rashika Kheria Reviewed-by: Josh Triplett Signed-off-by: Jens Axboe --- drivers/block/skd_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 9199c93be926..eb6e1e0e8db2 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -5269,7 +5269,7 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state) } } -const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) +static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) { switch (state) { case SKD_MSG_STATE_IDLE: @@ -5281,7 +5281,7 @@ const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) } } -const char *skd_skreq_state_to_str(enum skd_req_state state) +static const char *skd_skreq_state_to_str(enum skd_req_state state) { switch (state) { case SKD_REQ_STATE_IDLE: From 0c56010c83703e1f33325838eda9a2077827b6f1 Mon Sep 17 00:00:00 2001 From: Matias Bjorling Date: Tue, 10 Dec 2013 16:50:38 +0100 Subject: [PATCH 14/20] null_blk: mem garbage on NUMA systems during init For NUMA systems, initializing the blk-mq layer and using per node hctx. We initialize submit queues to 1, while blk-mq nr_hw_queues is initialized to the number of NUMA nodes. This makes the null_init_hctx function overwrite memory outside of what it allocated. In my case it lead to writing garbage into struct request_queue's mq_map. Signed-off-by: Matias Bjorling Cc: Jens Axboe Signed-off-by: Linus Torvalds --- drivers/block/null_blk.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index ea192ec029c4..f370fc13aea5 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -495,23 +495,23 @@ static int null_add_dev(void) spin_lock_init(&nullb->lock); + if (queue_mode == NULL_Q_MQ && use_per_node_hctx) + submit_queues = nr_online_nodes; + if (setup_queues(nullb)) goto err; if (queue_mode == NULL_Q_MQ) { null_mq_reg.numa_node = home_node; null_mq_reg.queue_depth = hw_queue_depth; + null_mq_reg.nr_hw_queues = submit_queues; if (use_per_node_hctx) { null_mq_reg.ops->alloc_hctx = null_alloc_hctx; null_mq_reg.ops->free_hctx = null_free_hctx; - - null_mq_reg.nr_hw_queues = nr_online_nodes; } else { null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; - - null_mq_reg.nr_hw_queues = submit_queues; } nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); From 12f8f4fc0314103d47f9b1cbc812597b8d893ce1 Mon Sep 17 00:00:00 2001 From: Matias Bjorling Date: Wed, 18 Dec 2013 13:41:42 +0100 Subject: [PATCH 15/20] null_blk: documentation Add description of module and its parameters. Signed-off-by: Matias Bjorling Signed-off-by: Jens Axboe --- Documentation/block/null_blk.txt | 71 ++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 Documentation/block/null_blk.txt diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt new file mode 100644 index 000000000000..9e1b047fd13d --- /dev/null +++ b/Documentation/block/null_blk.txt @@ -0,0 +1,71 @@ +Null block device driver +================================================================================ + +I. Overview + +The null block device (/dev/nullb*) is used for benchmarking the various +block-layer implementations. It emulates a block device of X gigabytes in size. +The following instances are possible: + + Single-queue block-layer + - Request-based. + - Single submission queue per device. + - Implements IO scheduling algorithms (CFQ, Deadline, noop). + Multi-queue block-layer + - Request-based. + - Configurable submission queues per device. + No block-layer (Known as bio-based) + - Bio-based. IO requests are submitted directly to the device driver. + - Directly accepts bio data structure and returns them. + +All of them has a completion queue for each core in the system. + +II. Module parameters applicable for all instances: + +queue_mode=[0-2]: Default: 2-Multi-queue + Selects which block-layer the module should instantiate with. + + 0: Bio-based. + 1: Single-queue. + 2: Multi-queue. + +home_node=[0--nr_nodes]: Default: NUMA_NO_NODE + Selects what socket the data structures is allocated from. + +gb=[Size in GB]: Default: 250GB + The size of the device reported to the system. + +bs=[Block size (in bytes)]: Default: 512 bytes + The block size reported to the system. + +nr_devices=[Num. devices]: Default: 2 + Number of block devices instantiated. They are instantiated as /dev/nullb0, + etc. + +irq_mode=[0-2]: Default: Soft-irq + The completion mode used for completing IOs to the block-layer. + + 0: None. + 1: Soft-irq. Uses ipi to complete IOs across sockets. Simulates the overhead + when IOs are issued from another socket than the home the device is + connected to. + 2: Timer: Waits a specific period (completion_nsec) for each IO before + completion. + +completion_nsec=[Num. ns]: Default: 10.000ns + Combined with irq_mode=2 (timer). The time each completion event must wait. + +submit_queues=[0..nr_cpus]: + The number of submission queues attached to the device driver. If unset, it + defaults to 1 on single-queue and bio-based instances. For multi-queue, + its ignored when use_per_node_hctx module parameter is 1. + +hw_queue_depth=[0..qdepth]: Defaults: 64 + The hardware queue depth of the device. + +III: Multi-queue specific parameters + +use_per_node_hctx=[0/1]: Defaults: 1 + If 1, the multi-queue block layer is instantiated with a hardware dispatch + queue for each CPU node in the system. If 0, it is instantiated with the + number of queues defined in the submit_queues parameter. From 2d263a7856cbaf26dd89b671e2161c4a49f8461b Mon Sep 17 00:00:00 2001 From: Matias Bjorling Date: Wed, 18 Dec 2013 13:41:43 +0100 Subject: [PATCH 16/20] null_blk: refactor init and init errors code paths Simplify the initialization logic of the three block-layers. - The queue initialization is split into two parts. This allows reuse of code when initializing the sq-, bio- and mq-based layers. - Set submit_queues default value to 0 and always set it at init time. - Simplify the init error code paths. Signed-off-by: Matias Bjorling Signed-off-by: Jens Axboe --- drivers/block/null_blk.c | 63 ++++++++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 25 deletions(-) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index f370fc13aea5..f0aeb2a7a9ca 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -65,7 +65,7 @@ enum { NULL_Q_MQ = 2, }; -static int submit_queues = 1; +static int submit_queues; module_param(submit_queues, int, S_IRUGO); MODULE_PARM_DESC(submit_queues, "Number of submission queues"); @@ -355,16 +355,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) kfree(hctx); } +static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) +{ + BUG_ON(!nullb); + BUG_ON(!nq); + + init_waitqueue_head(&nq->wait); + nq->queue_depth = nullb->queue_depth; +} + static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int index) { struct nullb *nullb = data; struct nullb_queue *nq = &nullb->queues[index]; - init_waitqueue_head(&nq->wait); - nq->queue_depth = nullb->queue_depth; - nullb->nr_queues++; hctx->driver_data = nq; + null_init_queue(nullb, nq); + nullb->nr_queues++; return 0; } @@ -417,13 +425,13 @@ static int setup_commands(struct nullb_queue *nq) nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); if (!nq->cmds) - return 1; + return -ENOMEM; tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); if (!nq->tag_map) { kfree(nq->cmds); - return 1; + return -ENOMEM; } for (i = 0; i < nq->queue_depth; i++) { @@ -454,33 +462,37 @@ static void cleanup_queues(struct nullb *nullb) static int setup_queues(struct nullb *nullb) { - struct nullb_queue *nq; - int i; - - nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL); + nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), + GFP_KERNEL); if (!nullb->queues) - return 1; + return -ENOMEM; nullb->nr_queues = 0; nullb->queue_depth = hw_queue_depth; - if (queue_mode == NULL_Q_MQ) - return 0; + return 0; +} + +static int init_driver_queues(struct nullb *nullb) +{ + struct nullb_queue *nq; + int i, ret = 0; for (i = 0; i < submit_queues; i++) { nq = &nullb->queues[i]; - init_waitqueue_head(&nq->wait); - nq->queue_depth = hw_queue_depth; - if (setup_commands(nq)) - break; + + null_init_queue(nullb, nq); + + ret = setup_commands(nq); + if (ret) + goto err_queue; nullb->nr_queues++; } - if (i == submit_queues) - return 0; - + return 0; +err_queue: cleanup_queues(nullb); - return 1; + return ret; } static int null_add_dev(void) @@ -495,9 +507,6 @@ static int null_add_dev(void) spin_lock_init(&nullb->lock); - if (queue_mode == NULL_Q_MQ && use_per_node_hctx) - submit_queues = nr_online_nodes; - if (setup_queues(nullb)) goto err; @@ -518,11 +527,13 @@ static int null_add_dev(void) } else if (queue_mode == NULL_Q_BIO) { nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); blk_queue_make_request(nullb->q, null_queue_bio); + init_driver_queues(nullb); } else { nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); blk_queue_prep_rq(nullb->q, null_rq_prep_fn); if (nullb->q) blk_queue_softirq_done(nullb->q, null_softirq_done_fn); + init_driver_queues(nullb); } if (!nullb->q) @@ -579,7 +590,9 @@ static int __init null_init(void) } #endif - if (submit_queues > nr_cpu_ids) + if (queue_mode == NULL_Q_MQ && use_per_node_hctx) + submit_queues = nr_online_nodes; + else if (submit_queues > nr_cpu_ids) submit_queues = nr_cpu_ids; else if (!submit_queues) submit_queues = 1; From d15ee6b1a43afbe1a6cece3bd8d336a9d5cb7060 Mon Sep 17 00:00:00 2001 From: Matias Bjorling Date: Wed, 18 Dec 2013 13:41:44 +0100 Subject: [PATCH 17/20] null_blk: warning on ignored submit_queues param Let the user know when the number of submission queues are being ignored. Signed-off-by: Matias Bjorling Signed-off-by: Jens Axboe --- drivers/block/null_blk.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index f0aeb2a7a9ca..8f2e7c330d6d 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -590,9 +590,12 @@ static int __init null_init(void) } #endif - if (queue_mode == NULL_Q_MQ && use_per_node_hctx) + if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { + if (submit_queues > 0) + pr_warn("null_blk: submit_queues param is set to %u.", + nr_online_nodes); submit_queues = nr_online_nodes; - else if (submit_queues > nr_cpu_ids) + } else if (submit_queues > nr_cpu_ids) submit_queues = nr_cpu_ids; else if (!submit_queues) submit_queues = 1; From 89ed05eea093d4c18df5d504d104f29b874fea29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matias=20Bj=C3=B8rling?= Date: Sat, 21 Dec 2013 00:10:59 +0100 Subject: [PATCH 18/20] null_blk: corrections to documentation Randy Dunlap reported a couple of grammar errors and unfortunate usages of socket/node/core. Signed-off-by: Matias Bjorling Acked-by: Randy Dunlap Signed-off-by: Jens Axboe --- Documentation/block/null_blk.txt | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt index 9e1b047fd13d..5603dad5534b 100644 --- a/Documentation/block/null_blk.txt +++ b/Documentation/block/null_blk.txt @@ -18,7 +18,7 @@ The following instances are possible: - Bio-based. IO requests are submitted directly to the device driver. - Directly accepts bio data structure and returns them. -All of them has a completion queue for each core in the system. +All of them have a completion queue for each core in the system. II. Module parameters applicable for all instances: @@ -30,7 +30,7 @@ queue_mode=[0-2]: Default: 2-Multi-queue 2: Multi-queue. home_node=[0--nr_nodes]: Default: NUMA_NO_NODE - Selects what socket the data structures is allocated from. + Selects what CPU node the data structures are allocated from. gb=[Size in GB]: Default: 250GB The size of the device reported to the system. @@ -38,34 +38,34 @@ gb=[Size in GB]: Default: 250GB bs=[Block size (in bytes)]: Default: 512 bytes The block size reported to the system. -nr_devices=[Num. devices]: Default: 2 +nr_devices=[Number of devices]: Default: 2 Number of block devices instantiated. They are instantiated as /dev/nullb0, etc. -irq_mode=[0-2]: Default: Soft-irq +irq_mode=[0-2]: Default: 1-Soft-irq The completion mode used for completing IOs to the block-layer. 0: None. - 1: Soft-irq. Uses ipi to complete IOs across sockets. Simulates the overhead - when IOs are issued from another socket than the home the device is + 1: Soft-irq. Uses IPI to complete IOs across CPU nodes. Simulates the overhead + when IOs are issued from another CPU node than the home the device is connected to. 2: Timer: Waits a specific period (completion_nsec) for each IO before completion. -completion_nsec=[Num. ns]: Default: 10.000ns +completion_nsec=[ns]: Default: 10.000ns Combined with irq_mode=2 (timer). The time each completion event must wait. submit_queues=[0..nr_cpus]: The number of submission queues attached to the device driver. If unset, it defaults to 1 on single-queue and bio-based instances. For multi-queue, - its ignored when use_per_node_hctx module parameter is 1. + it is ignored when use_per_node_hctx module parameter is 1. -hw_queue_depth=[0..qdepth]: Defaults: 64 +hw_queue_depth=[0..qdepth]: Default: 64 The hardware queue depth of the device. III: Multi-queue specific parameters -use_per_node_hctx=[0/1]: Defaults: 1 +use_per_node_hctx=[0/1]: Default: 1 If 1, the multi-queue block layer is instantiated with a hardware dispatch queue for each CPU node in the system. If 0, it is instantiated with the number of queues defined in the submit_queues parameter. From 200052440d3b56f593038a35b7c14bdc780184a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matias=20Bj=C3=B8rling?= Date: Sat, 21 Dec 2013 00:11:00 +0100 Subject: [PATCH 19/20] null_blk: set use_per_node_hctx param to false The defaults for the module is to instantiate itself with blk-mq and a submit queue for each CPU node in the system. To save resources, initialize instead with a single submit queue. Signed-off-by: Matias Bjorling Signed-off-by: Jens Axboe --- Documentation/block/null_blk.txt | 9 +++++---- drivers/block/null_blk.c | 4 ++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt index 5603dad5534b..b2830b435895 100644 --- a/Documentation/block/null_blk.txt +++ b/Documentation/block/null_blk.txt @@ -65,7 +65,8 @@ hw_queue_depth=[0..qdepth]: Default: 64 III: Multi-queue specific parameters -use_per_node_hctx=[0/1]: Default: 1 - If 1, the multi-queue block layer is instantiated with a hardware dispatch - queue for each CPU node in the system. If 0, it is instantiated with the - number of queues defined in the submit_queues parameter. +use_per_node_hctx=[0/1]: Default: 0 + 0: The number of submit queues are set to the value of the submit_queues + parameter. + 1: The multi-queue block layer is instantiated with a hardware dispatch + queue for each CPU node in the system. diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 8f2e7c330d6d..9b0311b61fe1 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -101,9 +101,9 @@ static int hw_queue_depth = 64; module_param(hw_queue_depth, int, S_IRUGO); MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); -static bool use_per_node_hctx = true; +static bool use_per_node_hctx = false; module_param(use_per_node_hctx, bool, S_IRUGO); -MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true"); +MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); static void put_tag(struct nullb_queue *nq, unsigned int tag) { From fc1bc35443741e132dd0118e8dbac53f69a6f76e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matias=20Bj=C3=B8rling?= Date: Sat, 21 Dec 2013 00:11:01 +0100 Subject: [PATCH 20/20] null_blk: support submit_queues on use_per_node_hctx In the case of both the submit_queues param and use_per_node_hctx param are used. We limit the number af submit_queues to the number of online nodes. If the submit_queues is a multiple of nr_online_nodes, its trivial. Simply map them to the nodes. For example: 8 submit queues are mapped as node0[0,1], node1[2,3], ... If uneven, we are left with an uneven number of submit_queues that must be mapped. These are mapped toward the first node and onward. E.g. 5 submit queues mapped onto 4 nodes are mapped as node0[0,1], node1[2], ... Signed-off-by: Matias Bjorling Signed-off-by: Jens Axboe --- drivers/block/null_blk.c | 39 +++++++++++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 9b0311b61fe1..528f4e47f38e 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -1,4 +1,5 @@ #include + #include #include #include @@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) { - return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, - hctx_index); + int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes); + int tip = (reg->nr_hw_queues % nr_online_nodes); + int node = 0, i, n; + + /* + * Split submit queues evenly wrt to the number of nodes. If uneven, + * fill the first buckets with one extra, until the rest is filled with + * no extra. + */ + for (i = 0, n = 1; i < hctx_index; i++, n++) { + if (n % b_size == 0) { + n = 0; + node++; + + tip--; + if (!tip) + b_size = reg->nr_hw_queues / nr_online_nodes; + } + } + + /* + * A node might not be online, therefore map the relative node id to the + * real node id. + */ + for_each_online_node(n) { + if (!node) + break; + node--; + } + + return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n); } static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) @@ -591,10 +621,11 @@ static int __init null_init(void) #endif if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { - if (submit_queues > 0) + if (submit_queues < nr_online_nodes) { pr_warn("null_blk: submit_queues param is set to %u.", nr_online_nodes); - submit_queues = nr_online_nodes; + submit_queues = nr_online_nodes; + } } else if (submit_queues > nr_cpu_ids) submit_queues = nr_cpu_ids; else if (!submit_queues)