From 1200e07f3ad4b9d976cf2fff3a0c3d9a1faecb3e Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 8 Apr 2019 19:02:38 +0800 Subject: [PATCH 01/13] block: don't use for-inside-for in bio_for_each_segment_all Commit 6dc4f100c175 ("block: allow bio_for_each_segment_all() to iterate over multi-page bvec") changes bio_for_each_segment_all() to use for-inside-for. This way breaks all bio_for_each_segment_all() call with error out branch via 'break', since now 'break' can only break from the inner loop. Fixes this issue by implementing bio_for_each_segment_all() via single 'for' loop, and now the logic is very similar with normal bvec iterator. Cc: Qu Wenruo Cc: linux-btrfs@vger.kernel.org Cc: linux-fsdevel@vger.kernel.org Cc: Omar Sandoval Reviewed-by: Johannes Thumshirn Reported-and-Tested-by: Qu Wenruo Fixes: 6dc4f100c175 ("block: allow bio_for_each_segment_all() to iterate over multi-page bvec") Reviewed-by: Christoph Hellwig Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- include/linux/bio.h | 20 ++++++++++++-------- include/linux/bvec.h | 14 ++++++++++---- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/include/linux/bio.h b/include/linux/bio.h index bb6090aa165d..e584673c1881 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -120,19 +120,23 @@ static inline bool bio_full(struct bio *bio) return bio->bi_vcnt >= bio->bi_max_vecs; } -#define mp_bvec_for_each_segment(bv, bvl, i, iter_all) \ - for (bv = bvec_init_iter_all(&iter_all); \ - (iter_all.done < (bvl)->bv_len) && \ - (mp_bvec_next_segment((bvl), &iter_all), 1); \ - iter_all.done += bv->bv_len, i += 1) +static inline bool bio_next_segment(const struct bio *bio, + struct bvec_iter_all *iter) +{ + if (iter->idx >= bio->bi_vcnt) + return false; + + bvec_advance(&bio->bi_io_vec[iter->idx], iter); + return true; +} /* * drivers should _never_ use the all version - the bio may have been split * before it got to the driver and the driver won't own all of it */ -#define bio_for_each_segment_all(bvl, bio, i, iter_all) \ - for (i = 0, iter_all.idx = 0; iter_all.idx < (bio)->bi_vcnt; iter_all.idx++) \ - mp_bvec_for_each_segment(bvl, &((bio)->bi_io_vec[iter_all.idx]), i, iter_all) +#define bio_for_each_segment_all(bvl, bio, i, iter) \ + for (i = 0, bvl = bvec_init_iter_all(&iter); \ + bio_next_segment((bio), &iter); i++) static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, unsigned bytes) diff --git a/include/linux/bvec.h b/include/linux/bvec.h index f6275c4da13a..3bc91879e1e2 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -145,18 +145,18 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv, static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all) { - iter_all->bv.bv_page = NULL; iter_all->done = 0; + iter_all->idx = 0; return &iter_all->bv; } -static inline void mp_bvec_next_segment(const struct bio_vec *bvec, - struct bvec_iter_all *iter_all) +static inline void bvec_advance(const struct bio_vec *bvec, + struct bvec_iter_all *iter_all) { struct bio_vec *bv = &iter_all->bv; - if (bv->bv_page) { + if (iter_all->done) { bv->bv_page = nth_page(bv->bv_page, 1); bv->bv_offset = 0; } else { @@ -165,6 +165,12 @@ static inline void mp_bvec_next_segment(const struct bio_vec *bvec, } bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset, bvec->bv_len - iter_all->done); + iter_all->done += bv->bv_len; + + if (iter_all->done == bvec->bv_len) { + iter_all->idx++; + iter_all->done = 0; + } } /* From 704236672edacf353c362bab70c3d3eda7bb4a51 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 8 Apr 2019 10:48:50 -0600 Subject: [PATCH 02/13] tools/io_uring: remove IOCQE_FLAG_CACHEHIT This ended up not being included in the mainline version of io_uring, so drop it from the test app as well. Signed-off-by: Jens Axboe --- tools/io_uring/io_uring-bench.c | 32 ++++---------------------------- 1 file changed, 4 insertions(+), 28 deletions(-) diff --git a/tools/io_uring/io_uring-bench.c b/tools/io_uring/io_uring-bench.c index 512306a37531..0f257139b003 100644 --- a/tools/io_uring/io_uring-bench.c +++ b/tools/io_uring/io_uring-bench.c @@ -32,10 +32,6 @@ #include "liburing.h" #include "barrier.h" -#ifndef IOCQE_FLAG_CACHEHIT -#define IOCQE_FLAG_CACHEHIT (1U << 0) -#endif - #define min(a, b) ((a < b) ? (a) : (b)) struct io_sq_ring { @@ -85,7 +81,6 @@ struct submitter { unsigned long reaps; unsigned long done; unsigned long calls; - unsigned long cachehit, cachemiss; volatile int finish; __s32 *fds; @@ -270,10 +265,6 @@ static int reap_events(struct submitter *s) return -1; } } - if (cqe->flags & IOCQE_FLAG_CACHEHIT) - s->cachehit++; - else - s->cachemiss++; reaped++; head++; } while (1); @@ -489,7 +480,7 @@ static void file_depths(char *buf) int main(int argc, char *argv[]) { struct submitter *s = &submitters[0]; - unsigned long done, calls, reap, cache_hit, cache_miss; + unsigned long done, calls, reap; int err, i, flags, fd; char *fdepths; void *ret; @@ -569,44 +560,29 @@ int main(int argc, char *argv[]) pthread_create(&s->thread, NULL, submitter_fn, s); fdepths = malloc(8 * s->nr_files); - cache_hit = cache_miss = reap = calls = done = 0; + reap = calls = done = 0; do { unsigned long this_done = 0; unsigned long this_reap = 0; unsigned long this_call = 0; - unsigned long this_cache_hit = 0; - unsigned long this_cache_miss = 0; unsigned long rpc = 0, ipc = 0; - double hit = 0.0; sleep(1); this_done += s->done; this_call += s->calls; this_reap += s->reaps; - this_cache_hit += s->cachehit; - this_cache_miss += s->cachemiss; - if (this_cache_hit && this_cache_miss) { - unsigned long hits, total; - - hits = this_cache_hit - cache_hit; - total = hits + this_cache_miss - cache_miss; - hit = (double) hits / (double) total; - hit *= 100.0; - } if (this_call - calls) { rpc = (this_done - done) / (this_call - calls); ipc = (this_reap - reap) / (this_call - calls); } else rpc = ipc = -1; file_depths(fdepths); - printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (%s), Cachehit=%0.2f%%\n", + printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (%s)\n", this_done - done, rpc, ipc, s->inflight, - fdepths, hit); + fdepths); done = this_done; calls = this_call; reap = this_reap; - cache_hit = s->cachehit; - cache_miss = s->cachemiss; } while (!finish); pthread_join(s->thread, &ret); From 3ec482d15cb986bf08b923f9193eeddb3b9ca69f Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 8 Apr 2019 10:51:01 -0600 Subject: [PATCH 03/13] io_uring: restrict IORING_SETUP_SQPOLL to root This options spawns a kernel side thread that will poll for submissions (and completions, if IORING_SETUP_IOPOLL is set). As this allows a user to potentially use more cycles outside of the normal hierarchy, restrict the use of this feature to root. Signed-off-by: Jens Axboe --- fs/io_uring.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index 07d6ef195d05..89aa8412b5f5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2245,6 +2245,10 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx, goto err; if (ctx->flags & IORING_SETUP_SQPOLL) { + ret = -EPERM; + if (!capable(CAP_SYS_ADMIN)) + goto err; + if (p->flags & IORING_SETUP_SQ_AFF) { int cpu; From eed47d19d9362bdd958e4ab56af480b9dbf6b2b6 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Wed, 10 Apr 2019 10:38:33 +0200 Subject: [PATCH 04/13] block, bfq: fix use after free in bfq_bfqq_expire The function bfq_bfqq_expire() invokes the function __bfq_bfqq_expire(), and the latter may free the in-service bfq-queue. If this happens, then no other instruction of bfq_bfqq_expire() must be executed, or a use-after-free will occur. Basing on the assumption that __bfq_bfqq_expire() invokes bfq_put_queue() on the in-service bfq-queue exactly once, the queue is assumed to be freed if its refcounter is equal to one right before invoking __bfq_bfqq_expire(). But, since commit 9dee8b3b057e ("block, bfq: fix queue removal from weights tree") this assumption is false. __bfq_bfqq_expire() may also invoke bfq_weights_tree_remove() and, since commit 9dee8b3b057e ("block, bfq: fix queue removal from weights tree"), also the latter function may invoke bfq_put_queue(). So __bfq_bfqq_expire() may invoke bfq_put_queue() twice, and this is the actual case where the in-service queue may happen to be freed. To address this issue, this commit moves the check on the refcounter of the queue right around the last bfq_put_queue() that may be invoked on the queue. Fixes: 9dee8b3b057e ("block, bfq: fix queue removal from weights tree") Reported-by: Dmitrii Tcvetkov Reported-by: Douglas Anderson Tested-by: Dmitrii Tcvetkov Tested-by: Douglas Anderson Signed-off-by: Paolo Valente Signed-off-by: Jens Axboe --- block/bfq-iosched.c | 15 +++++++-------- block/bfq-iosched.h | 2 +- block/bfq-wf2q.c | 17 +++++++++++++++-- 3 files changed, 23 insertions(+), 11 deletions(-) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index fac188dd78fa..dfb8cb0af13a 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -2822,7 +2822,7 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq) bfq_remove_request(q, rq); } -static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) +static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) { /* * If this bfqq is shared between multiple processes, check @@ -2855,9 +2855,11 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) /* * All in-service entities must have been properly deactivated * or requeued before executing the next function, which - * resets all in-service entites as no more in service. + * resets all in-service entities as no more in service. This + * may cause bfqq to be freed. If this happens, the next + * function returns true. */ - __bfq_bfqd_reset_in_service(bfqd); + return __bfq_bfqd_reset_in_service(bfqd); } /** @@ -3262,7 +3264,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd, bool slow; unsigned long delta = 0; struct bfq_entity *entity = &bfqq->entity; - int ref; /* * Check whether the process is slow (see bfq_bfqq_is_slow). @@ -3347,10 +3348,8 @@ void bfq_bfqq_expire(struct bfq_data *bfqd, * reason. */ __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); - ref = bfqq->ref; - __bfq_bfqq_expire(bfqd, bfqq); - - if (ref == 1) /* bfqq is gone, no more actions on it */ + if (__bfq_bfqq_expire(bfqd, bfqq)) + /* bfqq is gone, no more actions on it */ return; bfqq->injected_service = 0; diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 062e1c4787f4..86394e503ca9 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -995,7 +995,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree); bool next_queue_may_preempt(struct bfq_data *bfqd); struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd); -void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd); +bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd); void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, bool ins_into_idle_tree, bool expiration); void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index a11bef75483d..ae4d000ac0af 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -1605,7 +1605,8 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) return bfqq; } -void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) +/* returns true if the in-service queue gets freed */ +bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) { struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue; struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity; @@ -1629,8 +1630,20 @@ void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) * service tree either, then release the service reference to * the queue it represents (taken with bfq_get_entity). */ - if (!in_serv_entity->on_st) + if (!in_serv_entity->on_st) { + /* + * If no process is referencing in_serv_bfqq any + * longer, then the service reference may be the only + * reference to the queue. If this is the case, then + * bfqq gets freed here. + */ + int ref = in_serv_bfqq->ref; bfq_put_queue(in_serv_bfqq); + if (ref == 1) + return true; + } + + return false; } void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, From bf348f9b78d413e75bb079462751a1d86b6de36c Mon Sep 17 00:00:00 2001 From: Dongli Zhang Date: Wed, 27 Mar 2019 18:36:34 +0800 Subject: [PATCH 05/13] virtio-blk: limit number of hw queues by nr_cpu_ids When tag_set->nr_maps is 1, the block layer limits the number of hw queues by nr_cpu_ids. No matter how many hw queues are used by virtio-blk, as it has (tag_set->nr_maps == 1), it can use at most nr_cpu_ids hw queues. In addition, specifically for pci scenario, when the 'num-queues' specified by qemu is more than maxcpus, virtio-blk would not be able to allocate more than maxcpus vectors in order to have a vector for each queue. As a result, it falls back into MSI-X with one vector for config and one shared for queues. Considering above reasons, this patch limits the number of hw queues used by virtio-blk by nr_cpu_ids. Reviewed-by: Stefan Hajnoczi Signed-off-by: Dongli Zhang Signed-off-by: Jens Axboe --- drivers/block/virtio_blk.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 4bc083b7c9b5..2a7ca4a1e6f7 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -513,6 +513,8 @@ static int init_vq(struct virtio_blk *vblk) if (err) num_vqs = 1; + num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs); + vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL); if (!vblk->vqs) return -ENOMEM; From 1978f30a87732d4d9072a20abeded9fe17884f1b Mon Sep 17 00:00:00 2001 From: Dongli Zhang Date: Wed, 27 Mar 2019 18:36:35 +0800 Subject: [PATCH 06/13] scsi: virtio_scsi: limit number of hw queues by nr_cpu_ids When tag_set->nr_maps is 1, the block layer limits the number of hw queues by nr_cpu_ids. No matter how many hw queues are used by virtio-scsi, as it has (tag_set->nr_maps == 1), it can use at most nr_cpu_ids hw queues. In addition, specifically for pci scenario, when the 'num_queues' specified by qemu is more than maxcpus, virtio-scsi would not be able to allocate more than maxcpus vectors in order to have a vector for each queue. As a result, it falls back into MSI-X with one vector for config and one shared for queues. Considering above reasons, this patch limits the number of hw queues used by virtio-scsi by nr_cpu_ids. Reviewed-by: Stefan Hajnoczi Signed-off-by: Dongli Zhang Signed-off-by: Jens Axboe --- drivers/scsi/virtio_scsi.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 8af01777d09c..f8cb7c23305b 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -793,6 +793,7 @@ static int virtscsi_probe(struct virtio_device *vdev) /* We need to know how many queues before we allocate. */ num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; + num_queues = min_t(unsigned int, nr_cpu_ids, num_queues); num_targets = virtscsi_config_get(vdev, max_target) + 1; From 1b8f21b74c3c9c82fce5a751d7aefb7cc0b8d33d Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 9 Apr 2019 06:31:21 +0800 Subject: [PATCH 07/13] blk-mq: introduce blk_mq_complete_request_sync() In NVMe's error handler, follows the typical steps of tearing down hardware for recovering controller: 1) stop blk_mq hw queues 2) stop the real hw queues 3) cancel in-flight requests via blk_mq_tagset_busy_iter(tags, cancel_request, ...) cancel_request(): mark the request as abort blk_mq_complete_request(req); 4) destroy real hw queues However, there may be race between #3 and #4, because blk_mq_complete_request() may run q->mq_ops->complete(rq) remotelly and asynchronously, and ->complete(rq) may be run after #4. This patch introduces blk_mq_complete_request_sync() for fixing the above race. Cc: Sagi Grimberg Cc: Bart Van Assche Cc: James Smart Cc: linux-nvme@lists.infradead.org Reviewed-by: Keith Busch Reviewed-by: Christoph Hellwig Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq.c | 7 +++++++ include/linux/blk-mq.h | 1 + 2 files changed, 8 insertions(+) diff --git a/block/blk-mq.c b/block/blk-mq.c index a9354835cf51..9516304a38ee 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -654,6 +654,13 @@ bool blk_mq_complete_request(struct request *rq) } EXPORT_SYMBOL(blk_mq_complete_request); +void blk_mq_complete_request_sync(struct request *rq) +{ + WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); + rq->q->mq_ops->complete(rq); +} +EXPORT_SYMBOL_GPL(blk_mq_complete_request_sync); + int blk_mq_request_started(struct request *rq) { return blk_mq_rq_state(rq) != MQ_RQ_IDLE; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index cb2aa7ecafff..db29928de467 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -302,6 +302,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); bool blk_mq_complete_request(struct request *rq); +void blk_mq_complete_request_sync(struct request *rq); bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, struct bio *bio); bool blk_mq_queue_stopped(struct request_queue *q); From eb3afb75b57c28599af0dfa03a99579d410749e9 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 9 Apr 2019 06:31:22 +0800 Subject: [PATCH 08/13] nvme: cancel request synchronously nvme_cancel_request() is used in error handler, and it is always reliable to cancel request synchronously, and avoids possible race in which request may be completed after real hw queue is destroyed. One issue is reported by our customer on NVMe RDMA, in which freed ib queue pair may be used in nvme_rdma_complete_rq(). Cc: Sagi Grimberg Cc: Bart Van Assche Cc: James Smart Cc: linux-nvme@lists.infradead.org Reviewed-by: Keith Busch Reviewed-by: Christoph Hellwig Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 470601980794..2c43e12b70af 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -288,7 +288,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved) "Cancelling I/O %d", req->tag); nvme_req(req)->status = NVME_SC_ABORT_REQ; - blk_mq_complete_request(req); + blk_mq_complete_request_sync(req); return true; } EXPORT_SYMBOL_GPL(nvme_cancel_request); From b2b3a70cd9984fe39ed5aaa9ce596476051ce5de Mon Sep 17 00:00:00 2001 From: Hans Holmberg Date: Wed, 10 Apr 2019 19:56:43 +0200 Subject: [PATCH 09/13] lightnvm: pblk: fix crash in pblk_end_partial_read due to multipage bvecs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The introduction of multipage bio vectors broke pblk's partial read logic due to it not being prepared for multipage bio vectors. Use bio vector iterators instead of direct bio vector indexing. Fixes: 07173c3ec276 ("block: enable multipage bvecs") Reported-by: Klaus Jensen Signed-off-by: Hans Holmberg Updated description. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/pblk-read.c | 50 ++++++++++++++++++++---------------- 1 file changed, 28 insertions(+), 22 deletions(-) diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index 3789185144da..0b7d5fb4548d 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c @@ -231,14 +231,14 @@ static void pblk_end_partial_read(struct nvm_rq *rqd) struct pblk_sec_meta *meta; struct bio *new_bio = rqd->bio; struct bio *bio = pr_ctx->orig_bio; - struct bio_vec src_bv, dst_bv; void *meta_list = rqd->meta_list; - int bio_init_idx = pr_ctx->bio_init_idx; unsigned long *read_bitmap = pr_ctx->bitmap; + struct bvec_iter orig_iter = BVEC_ITER_ALL_INIT; + struct bvec_iter new_iter = BVEC_ITER_ALL_INIT; int nr_secs = pr_ctx->orig_nr_secs; int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs); void *src_p, *dst_p; - int hole, i; + int bit, i; if (unlikely(nr_holes == 1)) { struct ppa_addr ppa; @@ -257,33 +257,39 @@ static void pblk_end_partial_read(struct nvm_rq *rqd) /* Fill the holes in the original bio */ i = 0; - hole = find_first_zero_bit(read_bitmap, nr_secs); - do { - struct pblk_line *line; + for (bit = 0; bit < nr_secs; bit++) { + if (!test_bit(bit, read_bitmap)) { + struct bio_vec dst_bv, src_bv; + struct pblk_line *line; - line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]); - kref_put(&line->ref, pblk_line_put); + line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]); + kref_put(&line->ref, pblk_line_put); - meta = pblk_get_meta(pblk, meta_list, hole); - meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]); + meta = pblk_get_meta(pblk, meta_list, bit); + meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]); - src_bv = new_bio->bi_io_vec[i++]; - dst_bv = bio->bi_io_vec[bio_init_idx + hole]; + dst_bv = bio_iter_iovec(bio, orig_iter); + src_bv = bio_iter_iovec(new_bio, new_iter); - src_p = kmap_atomic(src_bv.bv_page); - dst_p = kmap_atomic(dst_bv.bv_page); + src_p = kmap_atomic(src_bv.bv_page); + dst_p = kmap_atomic(dst_bv.bv_page); - memcpy(dst_p + dst_bv.bv_offset, - src_p + src_bv.bv_offset, - PBLK_EXPOSED_PAGE_SIZE); + memcpy(dst_p + dst_bv.bv_offset, + src_p + src_bv.bv_offset, + PBLK_EXPOSED_PAGE_SIZE); - kunmap_atomic(src_p); - kunmap_atomic(dst_p); + kunmap_atomic(src_p); + kunmap_atomic(dst_p); - mempool_free(src_bv.bv_page, &pblk->page_bio_pool); + flush_dcache_page(dst_bv.bv_page); + mempool_free(src_bv.bv_page, &pblk->page_bio_pool); - hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1); - } while (hole < nr_secs); + bio_advance_iter(new_bio, &new_iter, + PBLK_EXPOSED_PAGE_SIZE); + i++; + } + bio_advance_iter(bio, &orig_iter, PBLK_EXPOSED_PAGE_SIZE); + } bio_put(new_bio); kfree(pr_ctx); From a3761c3c91209b58b6f33bf69dd8bb8ec0c9d925 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= Date: Wed, 10 Apr 2019 16:27:51 -0400 Subject: [PATCH 10/13] block: do not leak memory in bio_copy_user_iov() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When bio_add_pc_page() fails in bio_copy_user_iov() we should free the page we just allocated otherwise we are leaking it. Cc: linux-block@vger.kernel.org Cc: Linus Torvalds Cc: stable@vger.kernel.org Reviewed-by: Chaitanya Kulkarni Signed-off-by: Jérôme Glisse Signed-off-by: Jens Axboe --- block/bio.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/block/bio.c b/block/bio.c index b64cedc7f87c..716510ecd7ff 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1298,8 +1298,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q, } } - if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) + if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) { + if (!map_data) + __free_page(page); break; + } len -= bytes; offset = 0; From 67f471b6ed3b09033c4ac77ea03f92afdb1989fe Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 8 Apr 2019 11:15:19 -0700 Subject: [PATCH 11/13] nvme-fc: correct csn initialization and increments on error This patch fixes a long-standing bug that initialized the FC-NVME cmnd iu CSN value to 1. Early FC-NVME specs had the connection starting with CSN=1. By the time the spec reached approval, the language had changed to state a connection should start with CSN=0. This patch corrects the initialization value for FC-NVME connections. Additionally, in reviewing the transport, the CSN value is assigned to the new IU early in the start routine. It's possible that a later dma map request may fail, causing the command to never be sent to the controller. Change the location of the assignment so that it is immediately prior to calling the lldd. Add a comment block to explain the impacts if the lldd were to additionally fail sending the command. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Ewan D. Milne Signed-off-by: Christoph Hellwig --- drivers/nvme/host/fc.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index f3b9d91ba0df..6d8451356eac 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1845,7 +1845,7 @@ nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) memset(queue, 0, sizeof(*queue)); queue->ctrl = ctrl; queue->qnum = idx; - atomic_set(&queue->csn, 1); + atomic_set(&queue->csn, 0); queue->dev = ctrl->dev; if (idx > 0) @@ -1887,7 +1887,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue) */ queue->connection_id = 0; - atomic_set(&queue->csn, 1); + atomic_set(&queue->csn, 0); } static void @@ -2183,7 +2183,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, { struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; struct nvme_command *sqe = &cmdiu->sqe; - u32 csn; int ret, opstate; /* @@ -2198,8 +2197,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, /* format the FC-NVME CMD IU and fcp_req */ cmdiu->connection_id = cpu_to_be64(queue->connection_id); - csn = atomic_inc_return(&queue->csn); - cmdiu->csn = cpu_to_be32(csn); cmdiu->data_len = cpu_to_be32(data_len); switch (io_dir) { case NVMEFC_FCP_WRITE: @@ -2257,11 +2254,24 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, if (!(op->flags & FCOP_FLAGS_AEN)) blk_mq_start_request(op->rq); + cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn)); ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, &ctrl->rport->remoteport, queue->lldd_handle, &op->fcp_req); if (ret) { + /* + * If the lld fails to send the command is there an issue with + * the csn value? If the command that fails is the Connect, + * no - as the connection won't be live. If it is a command + * post-connect, it's possible a gap in csn may be created. + * Does this matter? As Linux initiators don't send fused + * commands, no. The gap would exist, but as there's nothing + * that depends on csn order to be delivered on the target + * side, it shouldn't hurt. It would be difficult for a + * target to even detect the csn gap as it has no idea when the + * cmd with the csn was supposed to arrive. + */ opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); From d808b7f759b50acf0784ce6230ffa63e12ef465d Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Tue, 9 Apr 2019 10:03:59 -0600 Subject: [PATCH 12/13] nvmet: fix discover log page when offsets are used The nvme target hadn't been taking the Get Log Page offset parameter into consideration, and so has been returning corrupted log pages when offsets are used. Since many tools, including nvme-cli, split the log request to 4k, we've been breaking discovery log responses when more than 3 subsystems exist. Fix the returned data by internally generating the entire discovery log page and copying only the requested bytes into the user buffer. The command log page offset type has been modified to a native __le64 to make it easier to extract the value from a command. Signed-off-by: Keith Busch Tested-by: Minwoo Im Reviewed-by: Chaitanya Kulkarni Reviewed-by: Hannes Reinecke Reviewed-by: James Smart Signed-off-by: Christoph Hellwig --- drivers/nvme/target/admin-cmd.c | 5 +++ drivers/nvme/target/discovery.c | 66 ++++++++++++++++++++++----------- drivers/nvme/target/nvmet.h | 1 + include/linux/nvme.h | 9 ++++- 4 files changed, 57 insertions(+), 24 deletions(-) diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 76250181fee0..9f72d515fc4b 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -24,6 +24,11 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd) return len; } +u64 nvmet_get_log_page_offset(struct nvme_command *cmd) +{ + return le64_to_cpu(cmd->get_log_page.lpo); +} + static void nvmet_execute_get_log_page_noop(struct nvmet_req *req) { nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len)); diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index c872b47a88f3..33ed95e72d6b 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -131,54 +131,76 @@ static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); } +static size_t discovery_log_entries(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_subsys_link *p; + struct nvmet_port *r; + size_t entries = 0; + + list_for_each_entry(p, &req->port->subsystems, entry) { + if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) + continue; + entries++; + } + list_for_each_entry(r, &req->port->referrals, entry) + entries++; + return entries; +} + static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) { const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry); struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmf_disc_rsp_page_hdr *hdr; + u64 offset = nvmet_get_log_page_offset(req->cmd); size_t data_len = nvmet_get_log_page_len(req->cmd); - size_t alloc_len = max(data_len, sizeof(*hdr)); - int residual_len = data_len - sizeof(*hdr); + size_t alloc_len; struct nvmet_subsys_link *p; struct nvmet_port *r; u32 numrec = 0; u16 status = 0; + void *buffer; + + /* Spec requires dword aligned offsets */ + if (offset & 0x3) { + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + goto out; + } /* * Make sure we're passing at least a buffer of response header size. * If host provided data len is less than the header size, only the * number of bytes requested by host will be sent to host. */ - hdr = kzalloc(alloc_len, GFP_KERNEL); - if (!hdr) { + down_read(&nvmet_config_sem); + alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req); + buffer = kzalloc(alloc_len, GFP_KERNEL); + if (!buffer) { + up_read(&nvmet_config_sem); status = NVME_SC_INTERNAL; goto out; } - down_read(&nvmet_config_sem); + hdr = buffer; list_for_each_entry(p, &req->port->subsystems, entry) { + char traddr[NVMF_TRADDR_SIZE]; + if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) continue; - if (residual_len >= entry_size) { - char traddr[NVMF_TRADDR_SIZE]; - nvmet_set_disc_traddr(req, req->port, traddr); - nvmet_format_discovery_entry(hdr, req->port, - p->subsys->subsysnqn, traddr, - NVME_NQN_NVME, numrec); - residual_len -= entry_size; - } + nvmet_set_disc_traddr(req, req->port, traddr); + nvmet_format_discovery_entry(hdr, req->port, + p->subsys->subsysnqn, traddr, + NVME_NQN_NVME, numrec); numrec++; } list_for_each_entry(r, &req->port->referrals, entry) { - if (residual_len >= entry_size) { - nvmet_format_discovery_entry(hdr, r, - NVME_DISC_SUBSYS_NAME, - r->disc_addr.traddr, - NVME_NQN_DISC, numrec); - residual_len -= entry_size; - } + nvmet_format_discovery_entry(hdr, r, + NVME_DISC_SUBSYS_NAME, + r->disc_addr.traddr, + NVME_NQN_DISC, numrec); numrec++; } @@ -190,8 +212,8 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) up_read(&nvmet_config_sem); - status = nvmet_copy_to_sgl(req, 0, hdr, data_len); - kfree(hdr); + status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len); + kfree(buffer); out: nvmet_req_complete(req, status); } diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 51e49efd7849..1653d19b187f 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -428,6 +428,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len); u32 nvmet_get_log_page_len(struct nvme_command *cmd); +u64 nvmet_get_log_page_offset(struct nvme_command *cmd); extern struct list_head *nvmet_ports; void nvmet_port_disc_changed(struct nvmet_port *port, diff --git a/include/linux/nvme.h b/include/linux/nvme.h index baa49e6a23cc..c40720cb59ac 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -967,8 +967,13 @@ struct nvme_get_log_page_command { __le16 numdl; __le16 numdu; __u16 rsvd11; - __le32 lpol; - __le32 lpou; + union { + struct { + __le32 lpol; + __le32 lpou; + }; + __le64 lpo; + }; __u32 rsvd14[2]; }; From a89afe58f1a74aac768a5eb77af95ef4ee15beaa Mon Sep 17 00:00:00 2001 From: Jason Yan Date: Fri, 12 Apr 2019 10:09:16 +0800 Subject: [PATCH 13/13] block: fix the return errno for direct IO If the last bio returned is not dio->bio, the status of the bio will not assigned to dio->bio if it is error. This will cause the whole IO status wrong. ksoftirqd/21-117 [021] ..s. 4017.966090: 8,0 C N 4883648 [0] -0 [018] ..s. 4017.970888: 8,0 C WS 4924800 + 1024 [0] -0 [018] ..s. 4017.970909: 8,0 D WS 4935424 + 1024 [] -0 [018] ..s. 4017.970924: 8,0 D WS 4936448 + 321 [] ksoftirqd/21-117 [021] ..s. 4017.995033: 8,0 C R 4883648 + 336 [65475] ksoftirqd/21-117 [021] d.s. 4018.001988: myprobe1: (blkdev_bio_end_io+0x0/0x168) bi_status=7 ksoftirqd/21-117 [021] d.s. 4018.001992: myprobe: (aio_complete_rw+0x0/0x148) x0=0xffff802f2595ad80 res=0x12a000 res2=0x0 We always have to assign bio->bi_status to dio->bio.bi_status because we will only check dio->bio.bi_status when we return the whole IO to the upper layer. Fixes: 542ff7bf18c6 ("block: new direct I/O implementation") Cc: stable@vger.kernel.org Cc: Christoph Hellwig Cc: Jens Axboe Reviewed-by: Ming Lei Signed-off-by: Jason Yan Signed-off-by: Jens Axboe --- fs/block_dev.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/block_dev.c b/fs/block_dev.c index 78d3257435c0..24615c76c1d0 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -307,10 +307,10 @@ static void blkdev_bio_end_io(struct bio *bio) struct blkdev_dio *dio = bio->bi_private; bool should_dirty = dio->should_dirty; - if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) { - if (bio->bi_status && !dio->bio.bi_status) - dio->bio.bi_status = bio->bi_status; - } else { + if (bio->bi_status && !dio->bio.bi_status) + dio->bio.bi_status = bio->bi_status; + + if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) { if (!dio->is_sync) { struct kiocb *iocb = dio->iocb; ssize_t ret;