fs/aio.c: make 3 functions static
Make the following needlessly global functions static: - __put_ioctx() - lookup_ioctx() - io_submit_one() Signed-off-by: Adrian Bunk <bunk@kernel.org> Cc: Zach Brown <zach.brown@oracle.com> Cc: Benjamin LaHaise <bcrl@kvack.org> Cc: Badari Pulavarty <pbadari@us.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
07d45da616
commit
d5470b596a
67
fs/aio.c
67
fs/aio.c
|
@ -191,6 +191,43 @@ static int aio_setup_ring(struct kioctx *ctx)
|
||||||
kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
|
kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
|
|
||||||
|
/* __put_ioctx
|
||||||
|
* Called when the last user of an aio context has gone away,
|
||||||
|
* and the struct needs to be freed.
|
||||||
|
*/
|
||||||
|
static void __put_ioctx(struct kioctx *ctx)
|
||||||
|
{
|
||||||
|
unsigned nr_events = ctx->max_reqs;
|
||||||
|
|
||||||
|
BUG_ON(ctx->reqs_active);
|
||||||
|
|
||||||
|
cancel_delayed_work(&ctx->wq);
|
||||||
|
cancel_work_sync(&ctx->wq.work);
|
||||||
|
aio_free_ring(ctx);
|
||||||
|
mmdrop(ctx->mm);
|
||||||
|
ctx->mm = NULL;
|
||||||
|
pr_debug("__put_ioctx: freeing %p\n", ctx);
|
||||||
|
kmem_cache_free(kioctx_cachep, ctx);
|
||||||
|
|
||||||
|
if (nr_events) {
|
||||||
|
spin_lock(&aio_nr_lock);
|
||||||
|
BUG_ON(aio_nr - nr_events > aio_nr);
|
||||||
|
aio_nr -= nr_events;
|
||||||
|
spin_unlock(&aio_nr_lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#define get_ioctx(kioctx) do { \
|
||||||
|
BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
|
||||||
|
atomic_inc(&(kioctx)->users); \
|
||||||
|
} while (0)
|
||||||
|
#define put_ioctx(kioctx) do { \
|
||||||
|
BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
|
||||||
|
if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \
|
||||||
|
__put_ioctx(kioctx); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
/* ioctx_alloc
|
/* ioctx_alloc
|
||||||
* Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
|
* Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
|
||||||
*/
|
*/
|
||||||
|
@ -361,32 +398,6 @@ void exit_aio(struct mm_struct *mm)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* __put_ioctx
|
|
||||||
* Called when the last user of an aio context has gone away,
|
|
||||||
* and the struct needs to be freed.
|
|
||||||
*/
|
|
||||||
void __put_ioctx(struct kioctx *ctx)
|
|
||||||
{
|
|
||||||
unsigned nr_events = ctx->max_reqs;
|
|
||||||
|
|
||||||
BUG_ON(ctx->reqs_active);
|
|
||||||
|
|
||||||
cancel_delayed_work(&ctx->wq);
|
|
||||||
cancel_work_sync(&ctx->wq.work);
|
|
||||||
aio_free_ring(ctx);
|
|
||||||
mmdrop(ctx->mm);
|
|
||||||
ctx->mm = NULL;
|
|
||||||
pr_debug("__put_ioctx: freeing %p\n", ctx);
|
|
||||||
kmem_cache_free(kioctx_cachep, ctx);
|
|
||||||
|
|
||||||
if (nr_events) {
|
|
||||||
spin_lock(&aio_nr_lock);
|
|
||||||
BUG_ON(aio_nr - nr_events > aio_nr);
|
|
||||||
aio_nr -= nr_events;
|
|
||||||
spin_unlock(&aio_nr_lock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* aio_get_req
|
/* aio_get_req
|
||||||
* Allocate a slot for an aio request. Increments the users count
|
* Allocate a slot for an aio request. Increments the users count
|
||||||
* of the kioctx so that the kioctx stays around until all requests are
|
* of the kioctx so that the kioctx stays around until all requests are
|
||||||
|
@ -545,7 +556,7 @@ int aio_put_req(struct kiocb *req)
|
||||||
/* Lookup an ioctx id. ioctx_list is lockless for reads.
|
/* Lookup an ioctx id. ioctx_list is lockless for reads.
|
||||||
* FIXME: this is O(n) and is only suitable for development.
|
* FIXME: this is O(n) and is only suitable for development.
|
||||||
*/
|
*/
|
||||||
struct kioctx *lookup_ioctx(unsigned long ctx_id)
|
static struct kioctx *lookup_ioctx(unsigned long ctx_id)
|
||||||
{
|
{
|
||||||
struct kioctx *ioctx;
|
struct kioctx *ioctx;
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
|
@ -1552,7 +1563,7 @@ static int aio_wake_function(wait_queue_t *wait, unsigned mode,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
||||||
struct iocb *iocb)
|
struct iocb *iocb)
|
||||||
{
|
{
|
||||||
struct kiocb *req;
|
struct kiocb *req;
|
||||||
|
|
|
@ -209,27 +209,8 @@ extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
|
||||||
extern int aio_put_req(struct kiocb *iocb);
|
extern int aio_put_req(struct kiocb *iocb);
|
||||||
extern void kick_iocb(struct kiocb *iocb);
|
extern void kick_iocb(struct kiocb *iocb);
|
||||||
extern int aio_complete(struct kiocb *iocb, long res, long res2);
|
extern int aio_complete(struct kiocb *iocb, long res, long res2);
|
||||||
extern void __put_ioctx(struct kioctx *ctx);
|
|
||||||
struct mm_struct;
|
struct mm_struct;
|
||||||
extern void exit_aio(struct mm_struct *mm);
|
extern void exit_aio(struct mm_struct *mm);
|
||||||
extern struct kioctx *lookup_ioctx(unsigned long ctx_id);
|
|
||||||
extern int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
|
||||||
struct iocb *iocb);
|
|
||||||
|
|
||||||
/* semi private, but used by the 32bit emulations: */
|
|
||||||
struct kioctx *lookup_ioctx(unsigned long ctx_id);
|
|
||||||
int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
|
||||||
struct iocb *iocb);
|
|
||||||
|
|
||||||
#define get_ioctx(kioctx) do { \
|
|
||||||
BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
|
|
||||||
atomic_inc(&(kioctx)->users); \
|
|
||||||
} while (0)
|
|
||||||
#define put_ioctx(kioctx) do { \
|
|
||||||
BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
|
|
||||||
if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \
|
|
||||||
__put_ioctx(kioctx); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
|
#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue