kernel: better document the use_mm/unuse_mm API contract

Switch the function documentation to kerneldoc comments, and add
WARN_ON_ONCE asserts that the calling thread is a kernel thread and does
not have ->mm set (or has ->mm set in the case of unuse_mm).

Also give the functions a kthread_ prefix to better document the use case.

[hch@lst.de: fix a comment typo, cover the newly merged use_mm/unuse_mm caller in vfio]
  Link: http://lkml.kernel.org/r/20200416053158.586887-3-hch@lst.de
[sfr@canb.auug.org.au: powerpc/vas: fix up for {un}use_mm() rename]
  Link: http://lkml.kernel.org/r/20200422163935.5aa93ba5@canb.auug.org.au

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Tested-by: Jens Axboe <axboe@kernel.dk>
Reviewed-by: Jens Axboe <axboe@kernel.dk>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> [usb]
Acked-by: Haren Myneni <haren@linux.ibm.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Felipe Balbi <balbi@kernel.org>
Cc: Jason Wang <jasowang@redhat.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Cc: Zhi Wang <zhi.a.wang@intel.com>
Link: http://lkml.kernel.org/r/20200404094101.672954-6-hch@lst.de
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Christoph Hellwig 2020-06-10 18:42:06 -07:00 committed by Linus Torvalds
parent 4dbe59a6ae
commit f5678e7f2a
12 changed files with 40 additions and 41 deletions

View File

@ -127,7 +127,7 @@ static void update_csb(struct vas_window *window,
return; return;
} }
use_mm(window->mm); kthread_use_mm(window->mm);
rc = copy_to_user(csb_addr, &csb, sizeof(csb)); rc = copy_to_user(csb_addr, &csb, sizeof(csb));
/* /*
* User space polls on csb.flags (first byte). So add barrier * User space polls on csb.flags (first byte). So add barrier
@ -139,7 +139,7 @@ static void update_csb(struct vas_window *window,
smp_mb(); smp_mb();
rc = copy_to_user(csb_addr, &csb, sizeof(u8)); rc = copy_to_user(csb_addr, &csb, sizeof(u8));
} }
unuse_mm(window->mm); kthread_unuse_mm(window->mm);
put_task_struct(tsk); put_task_struct(tsk);
/* Success */ /* Success */

View File

@ -197,9 +197,9 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
if ((mmptr) == current->mm) { \ if ((mmptr) == current->mm) { \
valid = !get_user((dst), (wptr)); \ valid = !get_user((dst), (wptr)); \
} else if (current->mm == NULL) { \ } else if (current->mm == NULL) { \
use_mm(mmptr); \ kthread_use_mm(mmptr); \
valid = !get_user((dst), (wptr)); \ valid = !get_user((dst), (wptr)); \
unuse_mm(mmptr); \ kthread_unuse_mm(mmptr); \
} \ } \
pagefault_enable(); \ pagefault_enable(); \
} \ } \

View File

@ -827,9 +827,9 @@ static void ffs_user_copy_worker(struct work_struct *work)
mm_segment_t oldfs = get_fs(); mm_segment_t oldfs = get_fs();
set_fs(USER_DS); set_fs(USER_DS);
use_mm(io_data->mm); kthread_use_mm(io_data->mm);
ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data); ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
unuse_mm(io_data->mm); kthread_unuse_mm(io_data->mm);
set_fs(oldfs); set_fs(oldfs);
} }

View File

@ -462,9 +462,9 @@ static void ep_user_copy_worker(struct work_struct *work)
struct kiocb *iocb = priv->iocb; struct kiocb *iocb = priv->iocb;
size_t ret; size_t ret;
use_mm(mm); kthread_use_mm(mm);
ret = copy_to_iter(priv->buf, priv->actual, &priv->to); ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
unuse_mm(mm); kthread_unuse_mm(mm);
if (!ret) if (!ret)
ret = -EFAULT; ret = -EFAULT;

View File

@ -2817,7 +2817,7 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
return -EPERM; return -EPERM;
if (kthread) if (kthread)
use_mm(mm); kthread_use_mm(mm);
else if (current->mm != mm) else if (current->mm != mm)
goto out; goto out;
@ -2844,7 +2844,7 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
*copied = copy_from_user(data, (void __user *)vaddr, *copied = copy_from_user(data, (void __user *)vaddr,
count) ? 0 : count; count) ? 0 : count;
if (kthread) if (kthread)
unuse_mm(mm); kthread_unuse_mm(mm);
out: out:
mmput(mm); mmput(mm);
return *copied ? 0 : -EFAULT; return *copied ? 0 : -EFAULT;

View File

@ -332,7 +332,7 @@ static int vhost_worker(void *data)
mm_segment_t oldfs = get_fs(); mm_segment_t oldfs = get_fs();
set_fs(USER_DS); set_fs(USER_DS);
use_mm(dev->mm); kthread_use_mm(dev->mm);
for (;;) { for (;;) {
/* mb paired w/ kthread_stop */ /* mb paired w/ kthread_stop */
@ -360,7 +360,7 @@ static int vhost_worker(void *data)
schedule(); schedule();
} }
} }
unuse_mm(dev->mm); kthread_unuse_mm(dev->mm);
set_fs(oldfs); set_fs(oldfs);
return 0; return 0;
} }

View File

@ -170,7 +170,7 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
} }
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
set_fs(KERNEL_DS); set_fs(KERNEL_DS);
unuse_mm(worker->mm); kthread_unuse_mm(worker->mm);
mmput(worker->mm); mmput(worker->mm);
worker->mm = NULL; worker->mm = NULL;
} }
@ -417,7 +417,7 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work) static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
{ {
if (worker->mm) { if (worker->mm) {
unuse_mm(worker->mm); kthread_unuse_mm(worker->mm);
mmput(worker->mm); mmput(worker->mm);
worker->mm = NULL; worker->mm = NULL;
} }
@ -426,7 +426,7 @@ static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
return; return;
} }
if (mmget_not_zero(work->mm)) { if (mmget_not_zero(work->mm)) {
use_mm(work->mm); kthread_use_mm(work->mm);
if (!worker->mm) if (!worker->mm)
set_fs(USER_DS); set_fs(USER_DS);
worker->mm = work->mm; worker->mm = work->mm;

View File

@ -5866,7 +5866,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (io_op_defs[req->opcode].needs_mm && !current->mm) { if (io_op_defs[req->opcode].needs_mm && !current->mm) {
if (unlikely(!mmget_not_zero(ctx->sqo_mm))) if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
return -EFAULT; return -EFAULT;
use_mm(ctx->sqo_mm); kthread_use_mm(ctx->sqo_mm);
} }
sqe_flags = READ_ONCE(sqe->flags); sqe_flags = READ_ONCE(sqe->flags);
@ -5980,7 +5980,7 @@ static inline void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
if (mm) { if (mm) {
unuse_mm(mm); kthread_unuse_mm(mm);
mmput(mm); mmput(mm);
} }
} }

View File

@ -200,8 +200,8 @@ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
void kthread_destroy_worker(struct kthread_worker *worker); void kthread_destroy_worker(struct kthread_worker *worker);
void use_mm(struct mm_struct *mm); void kthread_use_mm(struct mm_struct *mm);
void unuse_mm(struct mm_struct *mm); void kthread_unuse_mm(struct mm_struct *mm);
struct cgroup_subsys_state; struct cgroup_subsys_state;

View File

@ -1208,18 +1208,18 @@ void kthread_destroy_worker(struct kthread_worker *worker)
} }
EXPORT_SYMBOL(kthread_destroy_worker); EXPORT_SYMBOL(kthread_destroy_worker);
/* /**
* use_mm * kthread_use_mm - make the calling kthread operate on an address space
* Makes the calling kernel thread take on the specified * @mm: address space to operate on
* mm context.
* (Note: this routine is intended to be called only
* from a kernel thread context)
*/ */
void use_mm(struct mm_struct *mm) void kthread_use_mm(struct mm_struct *mm)
{ {
struct mm_struct *active_mm; struct mm_struct *active_mm;
struct task_struct *tsk = current; struct task_struct *tsk = current;
WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
WARN_ON_ONCE(tsk->mm);
task_lock(tsk); task_lock(tsk);
active_mm = tsk->active_mm; active_mm = tsk->active_mm;
if (active_mm != mm) { if (active_mm != mm) {
@ -1236,20 +1236,19 @@ void use_mm(struct mm_struct *mm)
if (active_mm != mm) if (active_mm != mm)
mmdrop(active_mm); mmdrop(active_mm);
} }
EXPORT_SYMBOL_GPL(use_mm); EXPORT_SYMBOL_GPL(kthread_use_mm);
/* /**
* unuse_mm * kthread_unuse_mm - reverse the effect of kthread_use_mm()
* Reverses the effect of use_mm, i.e. releases the * @mm: address space to operate on
* specified mm context which was earlier taken on
* by the calling kernel thread
* (Note: this routine is intended to be called only
* from a kernel thread context)
*/ */
void unuse_mm(struct mm_struct *mm) void kthread_unuse_mm(struct mm_struct *mm)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
WARN_ON_ONCE(!tsk->mm);
task_lock(tsk); task_lock(tsk);
sync_mm_rss(mm); sync_mm_rss(mm);
tsk->mm = NULL; tsk->mm = NULL;
@ -1257,7 +1256,7 @@ void unuse_mm(struct mm_struct *mm)
enter_lazy_tlb(mm, tsk); enter_lazy_tlb(mm, tsk);
task_unlock(tsk); task_unlock(tsk);
} }
EXPORT_SYMBOL_GPL(unuse_mm); EXPORT_SYMBOL_GPL(kthread_unuse_mm);
#ifdef CONFIG_BLK_CGROUP #ifdef CONFIG_BLK_CGROUP
/** /**

View File

@ -126,7 +126,7 @@ static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc)
/* /*
* The process p may have detached its own ->mm while exiting or through * The process p may have detached its own ->mm while exiting or through
* use_mm(), but one or more of its subthreads may still have a valid * kthread_use_mm(), but one or more of its subthreads may still have a valid
* pointer. Return p, or any of its subthreads with a valid ->mm, with * pointer. Return p, or any of its subthreads with a valid ->mm, with
* task_lock() held. * task_lock() held.
*/ */
@ -919,8 +919,8 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
continue; continue;
} }
/* /*
* No use_mm() user needs to read from the userspace so we are * No kthead_use_mm() user needs to read from the userspace so
* ok to reap it. * we are ok to reap it.
*/ */
if (unlikely(p->flags & PF_KTHREAD)) if (unlikely(p->flags & PF_KTHREAD))
continue; continue;

View File

@ -24,8 +24,8 @@
* task's vmacache pertains to a different mm (ie, its own). There is * task's vmacache pertains to a different mm (ie, its own). There is
* nothing we can do here. * nothing we can do here.
* *
* Also handle the case where a kernel thread has adopted this mm via use_mm(). * Also handle the case where a kernel thread has adopted this mm via
* That kernel thread's vmacache is not applicable to this mm. * kthread_use_mm(). That kernel thread's vmacache is not applicable to this mm.
*/ */
static inline bool vmacache_valid_mm(struct mm_struct *mm) static inline bool vmacache_valid_mm(struct mm_struct *mm)
{ {