mm: move use_mm/unuse_mm from aio.c to mm/
Anyone who wants to do copy to/from user from a kernel thread, needs use_mm (like what fs/aio has). Move that into mm/, to make reusing and exporting easier down the line, and make aio use it. Next intended user, besides aio, will be vhost-net. Acked-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
425fbf047c
commit
3d2d827f5c
47
fs/aio.c
47
fs/aio.c
|
@ -24,6 +24,7 @@
|
|||
#include <linux/file.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mmu_context.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/aio.h>
|
||||
|
@ -34,7 +35,6 @@
|
|||
|
||||
#include <asm/kmap_types.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
#if DEBUG > 1
|
||||
#define dprintk printk
|
||||
|
@ -594,51 +594,6 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* use_mm
|
||||
* Makes the calling kernel thread take on the specified
|
||||
* mm context.
|
||||
* Called by the retry thread execute retries within the
|
||||
* iocb issuer's mm context, so that copy_from/to_user
|
||||
* operations work seamlessly for aio.
|
||||
* (Note: this routine is intended to be called only
|
||||
* from a kernel thread context)
|
||||
*/
|
||||
static void use_mm(struct mm_struct *mm)
|
||||
{
|
||||
struct mm_struct *active_mm;
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
task_lock(tsk);
|
||||
active_mm = tsk->active_mm;
|
||||
atomic_inc(&mm->mm_count);
|
||||
tsk->mm = mm;
|
||||
tsk->active_mm = mm;
|
||||
switch_mm(active_mm, mm, tsk);
|
||||
task_unlock(tsk);
|
||||
|
||||
mmdrop(active_mm);
|
||||
}
|
||||
|
||||
/*
|
||||
* unuse_mm
|
||||
* Reverses the effect of use_mm, i.e. releases the
|
||||
* specified mm context which was earlier taken on
|
||||
* by the calling kernel thread
|
||||
* (Note: this routine is intended to be called only
|
||||
* from a kernel thread context)
|
||||
*/
|
||||
static void unuse_mm(struct mm_struct *mm)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
task_lock(tsk);
|
||||
tsk->mm = NULL;
|
||||
/* active_mm is still 'mm' */
|
||||
enter_lazy_tlb(mm, tsk);
|
||||
task_unlock(tsk);
|
||||
}
|
||||
|
||||
/*
|
||||
* Queue up a kiocb to be retried. Assumes that the kiocb
|
||||
* has already been marked as kicked, and places it on
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
#ifndef _LINUX_MMU_CONTEXT_H
|
||||
#define _LINUX_MMU_CONTEXT_H
|
||||
|
||||
struct mm_struct;
|
||||
|
||||
void use_mm(struct mm_struct *mm);
|
||||
void unuse_mm(struct mm_struct *mm);
|
||||
|
||||
#endif
|
|
@ -11,7 +11,7 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
|
|||
maccess.o page_alloc.o page-writeback.o \
|
||||
readahead.o swap.o truncate.o vmscan.o shmem.o \
|
||||
prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
|
||||
page_isolation.o mm_init.o $(mmu-y)
|
||||
page_isolation.o mm_init.o mmu_context.o $(mmu-y)
|
||||
obj-y += init-mm.o
|
||||
|
||||
obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
/* Copyright (C) 2009 Red Hat, Inc.
|
||||
*
|
||||
* See ../COPYING for licensing terms.
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mmu_context.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
/*
|
||||
* use_mm
|
||||
* Makes the calling kernel thread take on the specified
|
||||
* mm context.
|
||||
* Called by the retry thread execute retries within the
|
||||
* iocb issuer's mm context, so that copy_from/to_user
|
||||
* operations work seamlessly for aio.
|
||||
* (Note: this routine is intended to be called only
|
||||
* from a kernel thread context)
|
||||
*/
|
||||
void use_mm(struct mm_struct *mm)
|
||||
{
|
||||
struct mm_struct *active_mm;
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
task_lock(tsk);
|
||||
active_mm = tsk->active_mm;
|
||||
atomic_inc(&mm->mm_count);
|
||||
tsk->mm = mm;
|
||||
tsk->active_mm = mm;
|
||||
switch_mm(active_mm, mm, tsk);
|
||||
task_unlock(tsk);
|
||||
|
||||
mmdrop(active_mm);
|
||||
}
|
||||
|
||||
/*
|
||||
* unuse_mm
|
||||
* Reverses the effect of use_mm, i.e. releases the
|
||||
* specified mm context which was earlier taken on
|
||||
* by the calling kernel thread
|
||||
* (Note: this routine is intended to be called only
|
||||
* from a kernel thread context)
|
||||
*/
|
||||
void unuse_mm(struct mm_struct *mm)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
task_lock(tsk);
|
||||
tsk->mm = NULL;
|
||||
/* active_mm is still 'mm' */
|
||||
enter_lazy_tlb(mm, tsk);
|
||||
task_unlock(tsk);
|
||||
}
|
Loading…
Reference in New Issue