fix mremap() vs. ioctx_kill() race

teach ->mremap() method to return an error and have it fail for
aio mappings in process of being killed

Note that in case of ->mremap() failure we need to undo move_page_tables()
we'd already done; we could call ->mremap() first, but then the failure of
move_page_tables() would require undoing whatever _successful_ ->mremap()
has done, which would be a lot more headache in general.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2015-04-06 17:48:54 -04:00
parent 8f778bbc54
commit b2edffdd91
3 changed files with 21 additions and 10 deletions

View File

@ -278,11 +278,11 @@ static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
return 0; return 0;
} }
static void aio_ring_remap(struct file *file, struct vm_area_struct *vma) static int aio_ring_remap(struct file *file, struct vm_area_struct *vma)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct kioctx_table *table; struct kioctx_table *table;
int i; int i, res = -EINVAL;
spin_lock(&mm->ioctx_lock); spin_lock(&mm->ioctx_lock);
rcu_read_lock(); rcu_read_lock();
@ -292,13 +292,17 @@ static void aio_ring_remap(struct file *file, struct vm_area_struct *vma)
ctx = table->table[i]; ctx = table->table[i];
if (ctx && ctx->aio_ring_file == file) { if (ctx && ctx->aio_ring_file == file) {
ctx->user_id = ctx->mmap_base = vma->vm_start; if (!atomic_read(&ctx->dead)) {
ctx->user_id = ctx->mmap_base = vma->vm_start;
res = 0;
}
break; break;
} }
} }
rcu_read_unlock(); rcu_read_unlock();
spin_unlock(&mm->ioctx_lock); spin_unlock(&mm->ioctx_lock);
return res;
} }
static const struct file_operations aio_ring_fops = { static const struct file_operations aio_ring_fops = {
@ -748,11 +752,12 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
{ {
struct kioctx_table *table; struct kioctx_table *table;
if (atomic_xchg(&ctx->dead, 1))
return -EINVAL;
spin_lock(&mm->ioctx_lock); spin_lock(&mm->ioctx_lock);
if (atomic_xchg(&ctx->dead, 1)) {
spin_unlock(&mm->ioctx_lock);
return -EINVAL;
}
table = rcu_dereference_raw(mm->ioctx_table); table = rcu_dereference_raw(mm->ioctx_table);
WARN_ON(ctx != table->table[ctx->id]); WARN_ON(ctx != table->table[ctx->id]);
table->table[ctx->id] = NULL; table->table[ctx->id] = NULL;

View File

@ -1549,7 +1549,7 @@ struct file_operations {
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *); int (*mmap) (struct file *, struct vm_area_struct *);
void (*mremap)(struct file *, struct vm_area_struct *); int (*mremap)(struct file *, struct vm_area_struct *);
int (*open) (struct inode *, struct file *); int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id); int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *); int (*release) (struct inode *, struct file *);

View File

@ -286,8 +286,14 @@ static unsigned long move_vma(struct vm_area_struct *vma,
old_len = new_len; old_len = new_len;
old_addr = new_addr; old_addr = new_addr;
new_addr = -ENOMEM; new_addr = -ENOMEM;
} else if (vma->vm_file && vma->vm_file->f_op->mremap) } else if (vma->vm_file && vma->vm_file->f_op->mremap) {
vma->vm_file->f_op->mremap(vma->vm_file, new_vma); err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma);
if (err < 0) {
move_page_tables(new_vma, new_addr, vma, old_addr,
moved_len, true);
return err;
}
}
/* Conceal VM_ACCOUNT so old reservation is not undone */ /* Conceal VM_ACCOUNT so old reservation is not undone */
if (vm_flags & VM_ACCOUNT) { if (vm_flags & VM_ACCOUNT) {