target: increase iblock task sizes
There is no real limit for task sizes in the iblock driver given that we can chain bios. Increase the maximum size to UINT_MAX, and change the code to submit bios in a smaller batch size to avoid deadlocks when having more bios in flight than the pool supports. Also increase the pool size to always allow multiple tasks to be in flight. I also had to change the task refcounting to include one reference for the submission task, which is a standard practice in this kind of code in Linux (e.g. XFS I/O submission). This was wrong before, but couldn't be hit easily. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
parent
6e315a066d
commit
d5b4a21b3d
|
@ -46,6 +46,9 @@
|
|||
|
||||
#include "target_core_iblock.h"
|
||||
|
||||
#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
|
||||
#define IBLOCK_BIO_POOL_SIZE 128
|
||||
|
||||
static struct se_subsystem_api iblock_template;
|
||||
|
||||
static void iblock_bio_done(struct bio *, int);
|
||||
|
@ -100,10 +103,8 @@ static struct se_device *iblock_create_virtdevice(
|
|||
return ERR_PTR(ret);
|
||||
}
|
||||
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
|
||||
/*
|
||||
* These settings need to be made tunable..
|
||||
*/
|
||||
ib_dev->ibd_bio_set = bioset_create(32, 0);
|
||||
|
||||
ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
|
||||
if (!ib_dev->ibd_bio_set) {
|
||||
pr_err("IBLOCK: Unable to create bioset()\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -129,8 +130,8 @@ static struct se_device *iblock_create_virtdevice(
|
|||
q = bdev_get_queue(bd);
|
||||
limits = &dev_limits.limits;
|
||||
limits->logical_block_size = bdev_logical_block_size(bd);
|
||||
limits->max_hw_sectors = queue_max_hw_sectors(q);
|
||||
limits->max_sectors = queue_max_sectors(q);
|
||||
limits->max_hw_sectors = UINT_MAX;
|
||||
limits->max_sectors = UINT_MAX;
|
||||
dev_limits.hw_queue_depth = q->nr_requests;
|
||||
dev_limits.queue_depth = q->nr_requests;
|
||||
|
||||
|
@ -204,7 +205,7 @@ iblock_alloc_task(unsigned char *cdb)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
atomic_set(&ib_req->ib_bio_cnt, 0);
|
||||
atomic_set(&ib_req->pending, 1);
|
||||
return &ib_req->ib_task;
|
||||
}
|
||||
|
||||
|
@ -484,24 +485,35 @@ iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
|
|||
bio->bi_destructor = iblock_bio_destructor;
|
||||
bio->bi_end_io = &iblock_bio_done;
|
||||
bio->bi_sector = lba;
|
||||
atomic_inc(&ib_req->ib_bio_cnt);
|
||||
atomic_inc(&ib_req->pending);
|
||||
|
||||
pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
|
||||
pr_debug("Set ib_req->ib_bio_cnt: %d\n",
|
||||
atomic_read(&ib_req->ib_bio_cnt));
|
||||
pr_debug("Set ib_req->pending: %d\n", atomic_read(&ib_req->pending));
|
||||
return bio;
|
||||
}
|
||||
|
||||
static void iblock_submit_bios(struct bio_list *list, int rw)
|
||||
{
|
||||
struct blk_plug plug;
|
||||
struct bio *bio;
|
||||
|
||||
blk_start_plug(&plug);
|
||||
while ((bio = bio_list_pop(list)))
|
||||
submit_bio(rw, bio);
|
||||
blk_finish_plug(&plug);
|
||||
}
|
||||
|
||||
static int iblock_do_task(struct se_task *task)
|
||||
{
|
||||
struct se_cmd *cmd = task->task_se_cmd;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct iblock_req *ibr = IBLOCK_REQ(task);
|
||||
struct bio *bio;
|
||||
struct bio_list list;
|
||||
struct scatterlist *sg;
|
||||
u32 i, sg_num = task->task_sg_nents;
|
||||
sector_t block_lba;
|
||||
struct blk_plug plug;
|
||||
unsigned bio_cnt;
|
||||
int rw;
|
||||
|
||||
if (task->task_data_direction == DMA_TO_DEVICE) {
|
||||
|
@ -546,6 +558,7 @@ static int iblock_do_task(struct se_task *task)
|
|||
|
||||
bio_list_init(&list);
|
||||
bio_list_add(&list, bio);
|
||||
bio_cnt = 1;
|
||||
|
||||
for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
|
||||
/*
|
||||
|
@ -555,10 +568,16 @@ static int iblock_do_task(struct se_task *task)
|
|||
*/
|
||||
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
|
||||
!= sg->length) {
|
||||
if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
|
||||
iblock_submit_bios(&list, rw);
|
||||
bio_cnt = 0;
|
||||
}
|
||||
|
||||
bio = iblock_get_bio(task, block_lba, sg_num);
|
||||
if (!bio)
|
||||
goto fail;
|
||||
bio_list_add(&list, bio);
|
||||
bio_cnt++;
|
||||
}
|
||||
|
||||
/* Always in 512 byte units for Linux/Block */
|
||||
|
@ -566,11 +585,12 @@ static int iblock_do_task(struct se_task *task)
|
|||
sg_num--;
|
||||
}
|
||||
|
||||
blk_start_plug(&plug);
|
||||
while ((bio = bio_list_pop(&list)))
|
||||
submit_bio(rw, bio);
|
||||
blk_finish_plug(&plug);
|
||||
iblock_submit_bios(&list, rw);
|
||||
|
||||
if (atomic_dec_and_test(&ibr->pending)) {
|
||||
transport_complete_task(task,
|
||||
!atomic_read(&ibr->ib_bio_err_cnt));
|
||||
}
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
|
@ -622,7 +642,7 @@ static void iblock_bio_done(struct bio *bio, int err)
|
|||
|
||||
bio_put(bio);
|
||||
|
||||
if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
|
||||
if (!atomic_dec_and_test(&ibr->pending))
|
||||
return;
|
||||
|
||||
pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
struct iblock_req {
|
||||
struct se_task ib_task;
|
||||
atomic_t ib_bio_cnt;
|
||||
atomic_t pending;
|
||||
atomic_t ib_bio_err_cnt;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
|
|
Loading…
Reference in New Issue