[PATCH] aio: revert lock_kiocb()
lock_kiocb() was introduced to serialize retrying and cancellation. In the process of doing so it tried to sleep waiting for KIF_LOCKED while holding the ctx_lock spinlock. Recent fixes have ensured that multiple concurrent retries won't be attempted for a given iocb. Cancel has other problems and has no significant in-tree users that have been complaining about it. So for the immediate future we'll revert sleeping with the lock held and will address proper cancellation and retry serialization in the future. Signed-off-by: Zach Brown <zach.brown@oracle.com> Acked-by: Benjamin LaHaise <bcrl@kvack.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
e7507ed91e
commit
4faa528528
26
fs/aio.c
26
fs/aio.c
|
@ -398,7 +398,7 @@ static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx)
|
|||
if (unlikely(!req))
|
||||
return NULL;
|
||||
|
||||
req->ki_flags = 1 << KIF_LOCKED;
|
||||
req->ki_flags = 0;
|
||||
req->ki_users = 2;
|
||||
req->ki_key = 0;
|
||||
req->ki_ctx = ctx;
|
||||
|
@ -547,25 +547,6 @@ struct kioctx *lookup_ioctx(unsigned long ctx_id)
|
|||
return ioctx;
|
||||
}
|
||||
|
||||
static int lock_kiocb_action(void *param)
|
||||
{
|
||||
schedule();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void lock_kiocb(struct kiocb *iocb)
|
||||
{
|
||||
wait_on_bit_lock(&iocb->ki_flags, KIF_LOCKED, lock_kiocb_action,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
static inline void unlock_kiocb(struct kiocb *iocb)
|
||||
{
|
||||
kiocbClearLocked(iocb);
|
||||
smp_mb__after_clear_bit();
|
||||
wake_up_bit(&iocb->ki_flags, KIF_LOCKED);
|
||||
}
|
||||
|
||||
/*
|
||||
* use_mm
|
||||
* Makes the calling kernel thread take on the specified
|
||||
|
@ -796,9 +777,7 @@ static int __aio_run_iocbs(struct kioctx *ctx)
|
|||
* Hold an extra reference while retrying i/o.
|
||||
*/
|
||||
iocb->ki_users++; /* grab extra reference */
|
||||
lock_kiocb(iocb);
|
||||
aio_run_iocb(iocb);
|
||||
unlock_kiocb(iocb);
|
||||
if (__aio_put_req(ctx, iocb)) /* drop extra ref */
|
||||
put_ioctx(ctx);
|
||||
}
|
||||
|
@ -1542,7 +1521,6 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
|||
|
||||
spin_lock_irq(&ctx->ctx_lock);
|
||||
aio_run_iocb(req);
|
||||
unlock_kiocb(req);
|
||||
if (!list_empty(&ctx->run_list)) {
|
||||
/* drain the run list */
|
||||
while (__aio_run_iocbs(ctx))
|
||||
|
@ -1674,7 +1652,6 @@ asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb,
|
|||
if (NULL != cancel) {
|
||||
struct io_event tmp;
|
||||
pr_debug("calling cancel\n");
|
||||
lock_kiocb(kiocb);
|
||||
memset(&tmp, 0, sizeof(tmp));
|
||||
tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
|
||||
tmp.data = kiocb->ki_user_data;
|
||||
|
@ -1686,7 +1663,6 @@ asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb,
|
|||
if (copy_to_user(result, &tmp, sizeof(tmp)))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
unlock_kiocb(kiocb);
|
||||
} else
|
||||
ret = -EINVAL;
|
||||
|
||||
|
|
|
@ -24,7 +24,12 @@ struct kioctx;
|
|||
#define KIOCB_SYNC_KEY (~0U)
|
||||
|
||||
/* ki_flags bits */
|
||||
#define KIF_LOCKED 0
|
||||
/*
|
||||
* This may be used for cancel/retry serialization in the future, but
|
||||
* for now it's unused and we probably don't want modules to even
|
||||
* think they can use it.
|
||||
*/
|
||||
/* #define KIF_LOCKED 0 */
|
||||
#define KIF_KICKED 1
|
||||
#define KIF_CANCELLED 2
|
||||
|
||||
|
|
Loading…
Reference in New Issue