block: make get_request[_wait]() fail if queue is dead
Currently get_request[_wait]() allocates request whether queue is dead or not. This patch makes get_request[_wait]() return NULL if @q is dead. blk_queue_bio() is updated to fail the submitted bio if request allocation fails. While at it, add docbook comments for get_request[_wait](). Note that the current code has rather unclear (there are spurious DEAD tests scattered around) assumption that the owner of a queue guarantees that no request travels block layer if the queue is dead and this patch in itself doesn't change much; however, this will allow fixing the broken assumption in the next patch. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
bc16a4f933
commit
da8303c63b
|
@ -709,10 +709,19 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* Get a free request, queue_lock must be held.
|
* get_request - get a free request
|
||||||
* Returns NULL on failure, with queue_lock held.
|
* @q: request_queue to allocate request from
|
||||||
* Returns !NULL on success, with queue_lock *not held*.
|
* @rw_flags: RW and SYNC flags
|
||||||
|
* @bio: bio to allocate request for (can be %NULL)
|
||||||
|
* @gfp_mask: allocation mask
|
||||||
|
*
|
||||||
|
* Get a free request from @q. This function may fail under memory
|
||||||
|
* pressure or if @q is dead.
|
||||||
|
*
|
||||||
|
* Must be callled with @q->queue_lock held and,
|
||||||
|
* Returns %NULL on failure, with @q->queue_lock held.
|
||||||
|
* Returns !%NULL on success, with @q->queue_lock *not held*.
|
||||||
*/
|
*/
|
||||||
static struct request *get_request(struct request_queue *q, int rw_flags,
|
static struct request *get_request(struct request_queue *q, int rw_flags,
|
||||||
struct bio *bio, gfp_t gfp_mask)
|
struct bio *bio, gfp_t gfp_mask)
|
||||||
|
@ -723,6 +732,9 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
|
||||||
const bool is_sync = rw_is_sync(rw_flags) != 0;
|
const bool is_sync = rw_is_sync(rw_flags) != 0;
|
||||||
int may_queue;
|
int may_queue;
|
||||||
|
|
||||||
|
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
may_queue = elv_may_queue(q, rw_flags);
|
may_queue = elv_may_queue(q, rw_flags);
|
||||||
if (may_queue == ELV_MQUEUE_NO)
|
if (may_queue == ELV_MQUEUE_NO)
|
||||||
goto rq_starved;
|
goto rq_starved;
|
||||||
|
@ -815,11 +827,18 @@ out:
|
||||||
return rq;
|
return rq;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* No available requests for this queue, wait for some requests to become
|
* get_request_wait - get a free request with retry
|
||||||
* available.
|
* @q: request_queue to allocate request from
|
||||||
|
* @rw_flags: RW and SYNC flags
|
||||||
|
* @bio: bio to allocate request for (can be %NULL)
|
||||||
*
|
*
|
||||||
* Called with q->queue_lock held, and returns with it unlocked.
|
* Get a free request from @q. This function keeps retrying under memory
|
||||||
|
* pressure and fails iff @q is dead.
|
||||||
|
*
|
||||||
|
* Must be callled with @q->queue_lock held and,
|
||||||
|
* Returns %NULL on failure, with @q->queue_lock held.
|
||||||
|
* Returns !%NULL on success, with @q->queue_lock *not held*.
|
||||||
*/
|
*/
|
||||||
static struct request *get_request_wait(struct request_queue *q, int rw_flags,
|
static struct request *get_request_wait(struct request_queue *q, int rw_flags,
|
||||||
struct bio *bio)
|
struct bio *bio)
|
||||||
|
@ -833,6 +852,9 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
|
||||||
struct io_context *ioc;
|
struct io_context *ioc;
|
||||||
struct request_list *rl = &q->rq;
|
struct request_list *rl = &q->rq;
|
||||||
|
|
||||||
|
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
|
prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
|
||||||
TASK_UNINTERRUPTIBLE);
|
TASK_UNINTERRUPTIBLE);
|
||||||
|
|
||||||
|
@ -863,19 +885,15 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
|
||||||
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
BUG_ON(rw != READ && rw != WRITE);
|
BUG_ON(rw != READ && rw != WRITE);
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
if (gfp_mask & __GFP_WAIT) {
|
if (gfp_mask & __GFP_WAIT)
|
||||||
rq = get_request_wait(q, rw, NULL);
|
rq = get_request_wait(q, rw, NULL);
|
||||||
} else {
|
else
|
||||||
rq = get_request(q, rw, NULL, gfp_mask);
|
rq = get_request(q, rw, NULL, gfp_mask);
|
||||||
if (!rq)
|
if (!rq)
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
}
|
|
||||||
/* q->queue_lock is unlocked at this point */
|
/* q->queue_lock is unlocked at this point */
|
||||||
|
|
||||||
return rq;
|
return rq;
|
||||||
|
@ -1299,6 +1317,10 @@ get_rq:
|
||||||
* Returns with the queue unlocked.
|
* Returns with the queue unlocked.
|
||||||
*/
|
*/
|
||||||
req = get_request_wait(q, rw_flags, bio);
|
req = get_request_wait(q, rw_flags, bio);
|
||||||
|
if (unlikely(!req)) {
|
||||||
|
bio_endio(bio, -ENODEV); /* @q is dead */
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* After dropping the lock and possibly sleeping here, our request
|
* After dropping the lock and possibly sleeping here, our request
|
||||||
|
|
Loading…
Reference in New Issue