block: make blk_init_free_list and elevator_init idempotent
blk_init_allocated_queue_node may fail and the caller _could_ retry. Accommodate the unlikely event that blk_init_allocated_queue_node is called on an already initialized (possibly partially) request_queue. Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
This commit is contained in:
parent
c86d1b8ae6
commit
1abec4fdbb
|
@ -467,6 +467,9 @@ static int blk_init_free_list(struct request_queue *q)
|
|||
{
|
||||
struct request_list *rl = &q->rq;
|
||||
|
||||
if (unlikely(rl->rq_pool))
|
||||
return 0;
|
||||
|
||||
rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
|
||||
rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
|
||||
rl->elvpriv = 0;
|
||||
|
|
|
@ -242,9 +242,11 @@ int elevator_init(struct request_queue *q, char *name)
|
|||
{
|
||||
struct elevator_type *e = NULL;
|
||||
struct elevator_queue *eq;
|
||||
int ret = 0;
|
||||
void *data;
|
||||
|
||||
if (unlikely(q->elevator))
|
||||
return 0;
|
||||
|
||||
INIT_LIST_HEAD(&q->queue_head);
|
||||
q->last_merge = NULL;
|
||||
q->end_sector = 0;
|
||||
|
@ -284,7 +286,7 @@ int elevator_init(struct request_queue *q, char *name)
|
|||
}
|
||||
|
||||
elevator_attach(q, eq, data);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(elevator_init);
|
||||
|
||||
|
|
Loading…
Reference in New Issue