counter: Fix use-after-free race condition for events_queue_size write

A race condition is possible when writing to events_queue_size where the
events kfifo is freed during the execution of a kfifo_in(), resulting in
a use-after-free. This patch prevents such a scenario by protecting the
events queue in operation with a spinlock and locking before performing
the events queue size adjustment.

The existing events_lock mutex is renamed to events_out_lock to reflect
that it only protects events queue out operations. Because the events
queue in operations can occur in an interrupt context, a new
events_in_lock spinlock is introduced and utilized.

Fixes: feff17a550 ("counter: Implement events_queue_size sysfs attribute")
Cc: David Lechner <david@lechnology.com>
Signed-off-by: William Breathitt Gray <vilhelm.gray@gmail.com>
Link: https://lore.kernel.org/r/20211021103540.955639-1-vilhelm.gray@gmail.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
William Breathitt Gray 2021-10-21 19:35:40 +09:00 committed by Greg Kroah-Hartman
parent 310e75c72f
commit 8ac33b8b68
3 changed files with 17 additions and 6 deletions

View File

@ -81,10 +81,10 @@ static ssize_t counter_chrdev_read(struct file *filp, char __user *buf,
return -ENODEV;
}
if (mutex_lock_interruptible(&counter->events_lock))
if (mutex_lock_interruptible(&counter->events_out_lock))
return -ERESTARTSYS;
err = kfifo_to_user(&counter->events, buf, len, &copied);
mutex_unlock(&counter->events_lock);
mutex_unlock(&counter->events_out_lock);
if (err < 0)
return err;
} while (!copied);
@ -436,7 +436,8 @@ int counter_chrdev_add(struct counter_device *const counter)
spin_lock_init(&counter->events_list_lock);
mutex_init(&counter->n_events_list_lock);
init_waitqueue_head(&counter->events_wait);
mutex_init(&counter->events_lock);
spin_lock_init(&counter->events_in_lock);
mutex_init(&counter->events_out_lock);
/* Initialize character device */
cdev_init(&counter->chrdev, &counter_fops);
@ -559,7 +560,8 @@ void counter_push_event(struct counter_device *const counter, const u8 event,
ev.watch.component = comp_node->component;
ev.status = -counter_get_data(counter, comp_node, &ev.value);
copied += kfifo_in(&counter->events, &ev, 1);
copied += kfifo_in_spinlocked_noirqsave(&counter->events, &ev,
1, &counter->events_in_lock);
}
exit_early:

View File

@ -11,6 +11,8 @@
#include <linux/kfifo.h>
#include <linux/kstrtox.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/types.h>
@ -796,6 +798,7 @@ static int counter_events_queue_size_write(struct counter_device *counter,
{
DECLARE_KFIFO_PTR(events, struct counter_event);
int err;
unsigned long flags;
/* Allocate new events queue */
err = kfifo_alloc(&events, val, GFP_KERNEL);
@ -803,8 +806,12 @@ static int counter_events_queue_size_write(struct counter_device *counter,
return err;
/* Swap in new events queue */
mutex_lock(&counter->events_out_lock);
spin_lock_irqsave(&counter->events_in_lock, flags);
kfifo_free(&counter->events);
counter->events.kfifo = events.kfifo;
spin_unlock_irqrestore(&counter->events_in_lock, flags);
mutex_unlock(&counter->events_out_lock);
return 0;
}

View File

@ -296,7 +296,8 @@ struct counter_ops {
* @n_events_list_lock: lock to protect Counter next events list operations
* @events: queue of detected Counter events
* @events_wait: wait queue to allow blocking reads of Counter events
* @events_lock: lock to protect Counter events queue read operations
* @events_in_lock: lock to protect Counter events queue in operations
* @events_out_lock: lock to protect Counter events queue out operations
* @ops_exist_lock: lock to prevent use during removal
*/
struct counter_device {
@ -323,7 +324,8 @@ struct counter_device {
struct mutex n_events_list_lock;
DECLARE_KFIFO_PTR(events, struct counter_event);
wait_queue_head_t events_wait;
struct mutex events_lock;
spinlock_t events_in_lock;
struct mutex events_out_lock;
struct mutex ops_exist_lock;
};