RDMA/core: Fix unsafe linked list traversal after failing to allocate CQ
It's not safe to access the next CQ in list_for_each_entry() after
invoking ib_free_cq(), because the CQ has already been freed in current
iteration. It should be replaced by list_for_each_entry_safe().
Fixes: c7ff819aef
("RDMA/core: Introduce shared CQ pool API")
Link: https://lore.kernel.org/r/1598963935-32335-1-git-send-email-liweihang@huawei.com
Signed-off-by: Xi Wang <wangxi11@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
097a9d23b7
commit
8aa64be019
|
@ -379,7 +379,7 @@ static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes,
|
|||
{
|
||||
LIST_HEAD(tmp_list);
|
||||
unsigned int nr_cqs, i;
|
||||
struct ib_cq *cq;
|
||||
struct ib_cq *cq, *n;
|
||||
int ret;
|
||||
|
||||
if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
|
||||
|
@ -412,7 +412,7 @@ static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes,
|
|||
return 0;
|
||||
|
||||
out_free_cqs:
|
||||
list_for_each_entry(cq, &tmp_list, pool_entry) {
|
||||
list_for_each_entry_safe(cq, n, &tmp_list, pool_entry) {
|
||||
cq->shared = false;
|
||||
ib_free_cq(cq);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue