fix mremap() vs. ioctx_kill() race
teach ->mremap() method to return an error and have it fail for aio mappings in process of being killed Note that in case of ->mremap() failure we need to undo move_page_tables() we'd already done; we could call ->mremap() first, but then the failure of move_page_tables() would require undoing whatever _successful_ ->mremap() has done, which would be a lot more headache in general. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
8f778bbc54
commit
b2edffdd91
19
fs/aio.c
19
fs/aio.c
|
@ -278,11 +278,11 @@ static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void aio_ring_remap(struct file *file, struct vm_area_struct *vma)
|
||||
static int aio_ring_remap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct kioctx_table *table;
|
||||
int i;
|
||||
int i, res = -EINVAL;
|
||||
|
||||
spin_lock(&mm->ioctx_lock);
|
||||
rcu_read_lock();
|
||||
|
@ -292,13 +292,17 @@ static void aio_ring_remap(struct file *file, struct vm_area_struct *vma)
|
|||
|
||||
ctx = table->table[i];
|
||||
if (ctx && ctx->aio_ring_file == file) {
|
||||
ctx->user_id = ctx->mmap_base = vma->vm_start;
|
||||
if (!atomic_read(&ctx->dead)) {
|
||||
ctx->user_id = ctx->mmap_base = vma->vm_start;
|
||||
res = 0;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
spin_unlock(&mm->ioctx_lock);
|
||||
return res;
|
||||
}
|
||||
|
||||
static const struct file_operations aio_ring_fops = {
|
||||
|
@ -748,11 +752,12 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
|
|||
{
|
||||
struct kioctx_table *table;
|
||||
|
||||
if (atomic_xchg(&ctx->dead, 1))
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
spin_lock(&mm->ioctx_lock);
|
||||
if (atomic_xchg(&ctx->dead, 1)) {
|
||||
spin_unlock(&mm->ioctx_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
table = rcu_dereference_raw(mm->ioctx_table);
|
||||
WARN_ON(ctx != table->table[ctx->id]);
|
||||
table->table[ctx->id] = NULL;
|
||||
|
|
|
@ -1549,7 +1549,7 @@ struct file_operations {
|
|||
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
|
||||
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
|
||||
int (*mmap) (struct file *, struct vm_area_struct *);
|
||||
void (*mremap)(struct file *, struct vm_area_struct *);
|
||||
int (*mremap)(struct file *, struct vm_area_struct *);
|
||||
int (*open) (struct inode *, struct file *);
|
||||
int (*flush) (struct file *, fl_owner_t id);
|
||||
int (*release) (struct inode *, struct file *);
|
||||
|
|
10
mm/mremap.c
10
mm/mremap.c
|
@ -286,8 +286,14 @@ static unsigned long move_vma(struct vm_area_struct *vma,
|
|||
old_len = new_len;
|
||||
old_addr = new_addr;
|
||||
new_addr = -ENOMEM;
|
||||
} else if (vma->vm_file && vma->vm_file->f_op->mremap)
|
||||
vma->vm_file->f_op->mremap(vma->vm_file, new_vma);
|
||||
} else if (vma->vm_file && vma->vm_file->f_op->mremap) {
|
||||
err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma);
|
||||
if (err < 0) {
|
||||
move_page_tables(new_vma, new_addr, vma, old_addr,
|
||||
moved_len, true);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/* Conceal VM_ACCOUNT so old reservation is not undone */
|
||||
if (vm_flags & VM_ACCOUNT) {
|
||||
|
|
Loading…
Reference in New Issue