xen/gntdev: fix unsafe vma access
In gntdev_ioctl_get_offset_for_vaddr, we need to hold mmap_sem while calling find_vma() to avoid potentially having the result freed out from under us. Similarly, the MMU notifier functions need to synchronize with gntdev_vma_close to avoid map->vma being freed during their iteration. Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> Reported-by: Al Viro <viro@ZenIV.linux.org.uk> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
parent
99beae6cb8
commit
2512f298cb
|
@ -378,7 +378,20 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
|
||||||
struct grant_map *map = vma->vm_private_data;
|
struct grant_map *map = vma->vm_private_data;
|
||||||
|
|
||||||
pr_debug("gntdev_vma_close %p\n", vma);
|
pr_debug("gntdev_vma_close %p\n", vma);
|
||||||
map->vma = NULL;
|
if (use_ptemod) {
|
||||||
|
struct file *file = vma->vm_file;
|
||||||
|
struct gntdev_priv *priv = file->private_data;
|
||||||
|
/* It is possible that an mmu notifier could be running
|
||||||
|
* concurrently, so take priv->lock to ensure that the vma won't
|
||||||
|
* vanishing during the unmap_grant_pages call, since we will
|
||||||
|
* spin here until that completes. Such a concurrent call will
|
||||||
|
* not do any unmapping, since that has been done prior to
|
||||||
|
* closing the vma, but it may still iterate the unmap_ops list.
|
||||||
|
*/
|
||||||
|
spin_lock(&priv->lock);
|
||||||
|
map->vma = NULL;
|
||||||
|
spin_unlock(&priv->lock);
|
||||||
|
}
|
||||||
vma->vm_private_data = NULL;
|
vma->vm_private_data = NULL;
|
||||||
gntdev_put_map(map);
|
gntdev_put_map(map);
|
||||||
}
|
}
|
||||||
|
@ -579,25 +592,31 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
|
||||||
struct ioctl_gntdev_get_offset_for_vaddr op;
|
struct ioctl_gntdev_get_offset_for_vaddr op;
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
struct grant_map *map;
|
struct grant_map *map;
|
||||||
|
int rv = -EINVAL;
|
||||||
|
|
||||||
if (copy_from_user(&op, u, sizeof(op)) != 0)
|
if (copy_from_user(&op, u, sizeof(op)) != 0)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
|
pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
|
||||||
|
|
||||||
|
down_read(¤t->mm->mmap_sem);
|
||||||
vma = find_vma(current->mm, op.vaddr);
|
vma = find_vma(current->mm, op.vaddr);
|
||||||
if (!vma || vma->vm_ops != &gntdev_vmops)
|
if (!vma || vma->vm_ops != &gntdev_vmops)
|
||||||
return -EINVAL;
|
goto out_unlock;
|
||||||
|
|
||||||
map = vma->vm_private_data;
|
map = vma->vm_private_data;
|
||||||
if (!map)
|
if (!map)
|
||||||
return -EINVAL;
|
goto out_unlock;
|
||||||
|
|
||||||
op.offset = map->index << PAGE_SHIFT;
|
op.offset = map->index << PAGE_SHIFT;
|
||||||
op.count = map->count;
|
op.count = map->count;
|
||||||
|
rv = 0;
|
||||||
|
|
||||||
if (copy_to_user(u, &op, sizeof(op)) != 0)
|
out_unlock:
|
||||||
|
up_read(¤t->mm->mmap_sem);
|
||||||
|
|
||||||
|
if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
return 0;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
|
static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
|
||||||
|
|
Loading…
Reference in New Issue