From b3d7fe86fbd06638c71dd851ba921adf50d912ce Mon Sep 17 00:00:00 2001 From: Aleksandr Nogikh Date: Wed, 23 Mar 2022 16:07:15 -0700 Subject: [PATCH] kcov: properly handle subsequent mmap calls Allocate the kcov buffer during KCOV_MODE_INIT in order to untie mmapping of a kcov instance and the actual coverage collection process. Modify kcov_mmap, so that it can be reliably used any number of times once KCOV_MODE_INIT has succeeded. These changes to the user-facing interface of the tool only weaken the preconditions, so all existing user space code should remain compatible with the new version. Link: https://lkml.kernel.org/r/20220117153634.150357-3-nogikh@google.com Signed-off-by: Aleksandr Nogikh Reviewed-by: Dmitry Vyukov Reviewed-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Marco Elver Cc: Sebastian Andrzej Siewior Cc: Taras Madan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kcov.c | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/kernel/kcov.c b/kernel/kcov.c index e1be7301500b..475524bd900a 100644 --- a/kernel/kcov.c +++ b/kernel/kcov.c @@ -459,37 +459,28 @@ void kcov_task_exit(struct task_struct *t) static int kcov_mmap(struct file *filep, struct vm_area_struct *vma) { int res = 0; - void *area; struct kcov *kcov = vma->vm_file->private_data; unsigned long size, off; struct page *page; unsigned long flags; - area = vmalloc_user(vma->vm_end - vma->vm_start); - if (!area) - return -ENOMEM; - spin_lock_irqsave(&kcov->lock, flags); size = kcov->size * sizeof(unsigned long); - if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 || + if (kcov->area == NULL || vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != size) { res = -EINVAL; goto exit; } - if (!kcov->area) { - kcov->area = area; - vma->vm_flags |= VM_DONTEXPAND; - spin_unlock_irqrestore(&kcov->lock, flags); - for (off = 0; off < size; off += PAGE_SIZE) { - page = vmalloc_to_page(kcov->area + off); - if (vm_insert_page(vma, vma->vm_start + off, page)) - WARN_ONCE(1, "vm_insert_page() failed"); - } - return 0; + spin_unlock_irqrestore(&kcov->lock, flags); + vma->vm_flags |= VM_DONTEXPAND; + for (off = 0; off < size; off += PAGE_SIZE) { + page = vmalloc_to_page(kcov->area + off); + if (vm_insert_page(vma, vma->vm_start + off, page)) + WARN_ONCE(1, "vm_insert_page() failed"); } + return 0; exit: spin_unlock_irqrestore(&kcov->lock, flags); - vfree(area); return res; } @@ -674,6 +665,7 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) unsigned int remote_num_handles; unsigned long remote_arg_size; unsigned long size, flags; + void *area; kcov = filep->private_data; switch (cmd) { @@ -683,17 +675,21 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) * Must happen before anything else. * * First check the size argument - it must be at least 2 - * to hold the current position and one PC. Later we allocate - * size * sizeof(unsigned long) memory, that must not overflow. + * to hold the current position and one PC. */ size = arg; if (size < 2 || size > INT_MAX / sizeof(unsigned long)) return -EINVAL; + area = vmalloc_user(size * sizeof(unsigned long)); + if (area == NULL) + return -ENOMEM; spin_lock_irqsave(&kcov->lock, flags); if (kcov->mode != KCOV_MODE_DISABLED) { spin_unlock_irqrestore(&kcov->lock, flags); + vfree(area); return -EBUSY; } + kcov->area = area; kcov->size = size; kcov->mode = KCOV_MODE_INIT; spin_unlock_irqrestore(&kcov->lock, flags);