cachefiles: add spin_lock for cachefiles_ondemand_info
[ Upstream commit 0a790040838c736495d5afd6b2d636f159f817f1 ]
The following concurrency may cause a read request to fail to be completed
and result in a hung:
t1 | t2
---------------------------------------------------------
cachefiles_ondemand_copen
req = xa_erase(&cache->reqs, id)
// Anon fd is maliciously closed.
cachefiles_ondemand_fd_release
xa_lock(&cache->reqs)
cachefiles_ondemand_set_object_close(object)
xa_unlock(&cache->reqs)
cachefiles_ondemand_set_object_open
// No one will ever close it again.
cachefiles_ondemand_daemon_read
cachefiles_ondemand_select_req
// Get a read req but its fd is already closed.
// The daemon can't issue a cread ioctl with an closed fd, then hung.
So add spin_lock for cachefiles_ondemand_info to protect ondemand_id and
state, thus we can avoid the above problem in cachefiles_ondemand_copen()
by using ondemand_id to determine if fd has been closed.
Fixes: c838305450
("cachefiles: notify the user daemon when looking up cookie")
Signed-off-by: Baokun Li <libaokun1@huawei.com>
Link: https://lore.kernel.org/r/20240522114308.2402121-8-libaokun@huaweicloud.com
Acked-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
f740fd943b
commit
e564e48ca2
|
@ -55,6 +55,7 @@ struct cachefiles_ondemand_info {
|
||||||
int ondemand_id;
|
int ondemand_id;
|
||||||
enum cachefiles_object_state state;
|
enum cachefiles_object_state state;
|
||||||
struct cachefiles_object *object;
|
struct cachefiles_object *object;
|
||||||
|
spinlock_t lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -10,13 +10,16 @@ static int cachefiles_ondemand_fd_release(struct inode *inode,
|
||||||
struct cachefiles_object *object = file->private_data;
|
struct cachefiles_object *object = file->private_data;
|
||||||
struct cachefiles_cache *cache = object->volume->cache;
|
struct cachefiles_cache *cache = object->volume->cache;
|
||||||
struct cachefiles_ondemand_info *info = object->ondemand;
|
struct cachefiles_ondemand_info *info = object->ondemand;
|
||||||
int object_id = info->ondemand_id;
|
int object_id;
|
||||||
struct cachefiles_req *req;
|
struct cachefiles_req *req;
|
||||||
XA_STATE(xas, &cache->reqs, 0);
|
XA_STATE(xas, &cache->reqs, 0);
|
||||||
|
|
||||||
xa_lock(&cache->reqs);
|
xa_lock(&cache->reqs);
|
||||||
|
spin_lock(&info->lock);
|
||||||
|
object_id = info->ondemand_id;
|
||||||
info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
|
info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
|
||||||
cachefiles_ondemand_set_object_close(object);
|
cachefiles_ondemand_set_object_close(object);
|
||||||
|
spin_unlock(&info->lock);
|
||||||
|
|
||||||
/* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
|
/* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
|
||||||
xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
|
xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
|
||||||
|
@ -116,6 +119,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
|
||||||
{
|
{
|
||||||
struct cachefiles_req *req;
|
struct cachefiles_req *req;
|
||||||
struct fscache_cookie *cookie;
|
struct fscache_cookie *cookie;
|
||||||
|
struct cachefiles_ondemand_info *info;
|
||||||
char *pid, *psize;
|
char *pid, *psize;
|
||||||
unsigned long id;
|
unsigned long id;
|
||||||
long size;
|
long size;
|
||||||
|
@ -166,6 +170,33 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
info = req->object->ondemand;
|
||||||
|
spin_lock(&info->lock);
|
||||||
|
/*
|
||||||
|
* The anonymous fd was closed before copen ? Fail the request.
|
||||||
|
*
|
||||||
|
* t1 | t2
|
||||||
|
* ---------------------------------------------------------
|
||||||
|
* cachefiles_ondemand_copen
|
||||||
|
* req = xa_erase(&cache->reqs, id)
|
||||||
|
* // Anon fd is maliciously closed.
|
||||||
|
* cachefiles_ondemand_fd_release
|
||||||
|
* xa_lock(&cache->reqs)
|
||||||
|
* cachefiles_ondemand_set_object_close(object)
|
||||||
|
* xa_unlock(&cache->reqs)
|
||||||
|
* cachefiles_ondemand_set_object_open
|
||||||
|
* // No one will ever close it again.
|
||||||
|
* cachefiles_ondemand_daemon_read
|
||||||
|
* cachefiles_ondemand_select_req
|
||||||
|
*
|
||||||
|
* Get a read req but its fd is already closed. The daemon can't
|
||||||
|
* issue a cread ioctl with an closed fd, then hung.
|
||||||
|
*/
|
||||||
|
if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED) {
|
||||||
|
spin_unlock(&info->lock);
|
||||||
|
req->error = -EBADFD;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
cookie = req->object->cookie;
|
cookie = req->object->cookie;
|
||||||
cookie->object_size = size;
|
cookie->object_size = size;
|
||||||
if (size)
|
if (size)
|
||||||
|
@ -175,6 +206,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
|
||||||
trace_cachefiles_ondemand_copen(req->object, id, size);
|
trace_cachefiles_ondemand_copen(req->object, id, size);
|
||||||
|
|
||||||
cachefiles_ondemand_set_object_open(req->object);
|
cachefiles_ondemand_set_object_open(req->object);
|
||||||
|
spin_unlock(&info->lock);
|
||||||
wake_up_all(&cache->daemon_pollwq);
|
wake_up_all(&cache->daemon_pollwq);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -552,6 +584,7 @@ int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
object->ondemand->object = object;
|
object->ondemand->object = object;
|
||||||
|
spin_lock_init(&object->ondemand->lock);
|
||||||
INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker);
|
INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue