misc: fastrpc: Fix use-after-free and race in fastrpc_map_find

Currently, there is a race window between the point when the mutex is
unlocked in fastrpc_map_lookup and the reference count increasing
(fastrpc_map_get) in fastrpc_map_find, which can also lead to
use-after-free.

So lets merge fastrpc_map_find into fastrpc_map_lookup which allows us
to both protect the maps list by also taking the &fl->lock spinlock and
the reference count, since the spinlock will be released only after.
Add take_ref argument to make this suitable for all callers.

Fixes: 8f6c1d8c4f ("misc: fastrpc: Add fdlist implementation")
Cc: stable <stable@kernel.org>
Co-developed-by: Ola Jeppsson <ola@snap.com>
Signed-off-by: Ola Jeppsson <ola@snap.com>
Signed-off-by: Abel Vesa <abel.vesa@linaro.org>
Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Link: https://lore.kernel.org/r/20221124174941.418450-2-srinivas.kandagatla@linaro.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Abel Vesa 2022-11-24 17:49:39 +00:00 committed by Greg Kroah-Hartman
parent 5023adc364
commit 9446fa1683
1 changed files with 21 additions and 20 deletions

View File

@ -351,30 +351,31 @@ static void fastrpc_map_get(struct fastrpc_map *map)
static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
struct fastrpc_map **ppmap)
struct fastrpc_map **ppmap, bool take_ref)
{
struct fastrpc_session_ctx *sess = fl->sctx;
struct fastrpc_map *map = NULL;
int ret = -ENOENT;
mutex_lock(&fl->mutex);
spin_lock(&fl->lock);
list_for_each_entry(map, &fl->maps, node) {
if (map->fd == fd) {
*ppmap = map;
mutex_unlock(&fl->mutex);
return 0;
if (map->fd != fd)
continue;
if (take_ref) {
ret = fastrpc_map_get(map);
if (ret) {
dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n",
__func__, fd, ret);
break;
}
}
*ppmap = map;
ret = 0;
break;
}
mutex_unlock(&fl->mutex);
return -ENOENT;
}
static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
struct fastrpc_map **ppmap)
{
int ret = fastrpc_map_lookup(fl, fd, ppmap);
if (!ret)
fastrpc_map_get(*ppmap);
spin_unlock(&fl->lock);
return ret;
}
@ -746,7 +747,7 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
struct fastrpc_map *map = NULL;
int err = 0;
if (!fastrpc_map_find(fl, fd, ppmap))
if (!fastrpc_map_lookup(fl, fd, ppmap, true))
return 0;
map = kzalloc(sizeof(*map), GFP_KERNEL);
@ -1070,7 +1071,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
if (!fdlist[i])
break;
if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false))
fastrpc_map_put(mmap);
}