[PATCH] blk: light iocontext ops

get_io_context needlessly turned off interrupts and checked for racing io
context creations.  Both of which aren't needed, because the io context can
only be created while in process context of the current process.

Also, split the function in 2.  A light version, current_io_context does not
elevate the reference count specifically, but can be used when in process
context, because the process holds a reference itself.

Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Jens Axboe <axboe@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Nick Piggin 2005-06-28 20:45:15 -07:00 committed by Linus Torvalds
parent d6344532a2
commit fb3cc4320e
2 changed files with 27 additions and 32 deletions

View File

@ -1876,7 +1876,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
{
struct request *rq = NULL;
struct request_list *rl = &q->rq;
struct io_context *ioc = get_io_context(GFP_ATOMIC);
struct io_context *ioc = current_io_context(GFP_ATOMIC);
if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
goto out;
@ -1959,7 +1959,6 @@ rq_starved:
rq_init(q, rq);
rq->rl = rl;
out:
put_io_context(ioc);
return rq;
}
@ -1997,9 +1996,8 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
* up to a big batch of them for a small period time.
* See ioc_batching, ioc_set_batching
*/
ioc = get_io_context(GFP_NOIO);
ioc = current_io_context(GFP_NOIO);
ioc_set_batching(q, ioc);
put_io_context(ioc);
spin_lock_irq(q->queue_lock);
}
@ -3282,24 +3280,20 @@ void exit_io_context(void)
/*
* If the current task has no IO context then create one and initialise it.
* If it does have a context, take a ref on it.
* Otherwise, return its existing IO context.
*
* This is always called in the context of the task which submitted the I/O.
* But weird things happen, so we disable local interrupts to ensure exclusive
* access to *current.
* This returned IO context doesn't have a specifically elevated refcount,
* but since the current task itself holds a reference, the context can be
* used in general code, so long as it stays within `current` context.
*/
struct io_context *get_io_context(int gfp_flags)
struct io_context *current_io_context(int gfp_flags)
{
struct task_struct *tsk = current;
unsigned long flags;
struct io_context *ret;
local_irq_save(flags);
ret = tsk->io_context;
if (ret)
goto out;
local_irq_restore(flags);
if (likely(ret))
return ret;
ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
if (ret) {
@ -3310,27 +3304,27 @@ struct io_context *get_io_context(int gfp_flags)
ret->nr_batch_requests = 0; /* because this is 0 */
ret->aic = NULL;
ret->cic = NULL;
local_irq_save(flags);
/*
* very unlikely, someone raced with us in setting up the task
* io context. free new context and just grab a reference.
*/
if (!tsk->io_context)
tsk->io_context = ret;
else {
kmem_cache_free(iocontext_cachep, ret);
ret = tsk->io_context;
}
out:
atomic_inc(&ret->refcount);
local_irq_restore(flags);
tsk->io_context = ret;
}
return ret;
}
EXPORT_SYMBOL(current_io_context);
/*
* If the current task has no IO context then create one and initialise it.
* If it does have a context, take a ref on it.
*
* This is always called in the context of the task which submitted the I/O.
*/
struct io_context *get_io_context(int gfp_flags)
{
struct io_context *ret;
ret = current_io_context(gfp_flags);
if (likely(ret))
atomic_inc(&ret->refcount);
return ret;
}
EXPORT_SYMBOL(get_io_context);
void copy_io_context(struct io_context **pdst, struct io_context **psrc)

View File

@ -96,6 +96,7 @@ struct io_context {
void put_io_context(struct io_context *ioc);
void exit_io_context(void);
struct io_context *current_io_context(int gfp_flags);
struct io_context *get_io_context(int gfp_flags);
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);