RDMA/hns: Use mutex instead of spinlock for ida allocation

GFP_KERNEL may cause ida_alloc_range() to sleep, but the spinlock covering
this function is not allowed to sleep, so the spinlock needs to be changed
to mutex.

As there is a certain chance of memory allocation failure, GFP_ATOMIC is
not suitable for QP allocation scenarios.

Fixes: 71586dd200 ("RDMA/hns: Create QP with selected QPN for bank load balance")
Link: https://lore.kernel.org/r/1611048513-28663-1-git-send-email-liweihang@huawei.com
Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Yangyang Li 2021-01-19 17:28:33 +08:00 committed by Jason Gunthorpe
parent 9f206f7398
commit 9293d3fcb7
2 changed files with 7 additions and 6 deletions

View File

@ -532,7 +532,7 @@ struct hns_roce_qp_table {
struct hns_roce_hem_table sccc_table; struct hns_roce_hem_table sccc_table;
struct mutex scc_mutex; struct mutex scc_mutex;
struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM]; struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
spinlock_t bank_lock; struct mutex bank_mutex;
}; };
struct hns_roce_cq_table { struct hns_roce_cq_table {

View File

@ -209,7 +209,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
hr_qp->doorbell_qpn = 1; hr_qp->doorbell_qpn = 1;
} else { } else {
spin_lock(&qp_table->bank_lock); mutex_lock(&qp_table->bank_mutex);
bankid = get_least_load_bankid_for_qp(qp_table->bank); bankid = get_least_load_bankid_for_qp(qp_table->bank);
ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid, ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
@ -217,12 +217,12 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
if (ret) { if (ret) {
ibdev_err(&hr_dev->ib_dev, ibdev_err(&hr_dev->ib_dev,
"failed to alloc QPN, ret = %d\n", ret); "failed to alloc QPN, ret = %d\n", ret);
spin_unlock(&qp_table->bank_lock); mutex_unlock(&qp_table->bank_mutex);
return ret; return ret;
} }
qp_table->bank[bankid].inuse++; qp_table->bank[bankid].inuse++;
spin_unlock(&qp_table->bank_lock); mutex_unlock(&qp_table->bank_mutex);
hr_qp->doorbell_qpn = (u32)num; hr_qp->doorbell_qpn = (u32)num;
} }
@ -408,9 +408,9 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3); ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3);
spin_lock(&hr_dev->qp_table.bank_lock); mutex_lock(&hr_dev->qp_table.bank_mutex);
hr_dev->qp_table.bank[bankid].inuse--; hr_dev->qp_table.bank[bankid].inuse--;
spin_unlock(&hr_dev->qp_table.bank_lock); mutex_unlock(&hr_dev->qp_table.bank_mutex);
} }
static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
@ -1371,6 +1371,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
unsigned int i; unsigned int i;
mutex_init(&qp_table->scc_mutex); mutex_init(&qp_table->scc_mutex);
mutex_init(&qp_table->bank_mutex);
xa_init(&hr_dev->qp_table_xa); xa_init(&hr_dev->qp_table_xa);
reserved_from_bot = hr_dev->caps.reserved_qps; reserved_from_bot = hr_dev->caps.reserved_qps;