RDMA/rdma_cm: Remove process_req and timer sorting
Now that the work queue is used directly to launch and track the work there is no need for the second processing function to do 'all list entries'. Just schedule all entries onto the main work queue directly. We can also drop all of the useless list sorting now, as the workqueue sorts by expiration time automatically. This change requires switching lock to a spinlock as netdev notifiers are called in an atomic context, this is now easy since the lock does not need to be held across the lookup, that is already single threaded due to the work queue. Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Reviewed-by: Parav Pandit <parav@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
60cc43fc88
commit
e19c0d2378
|
@ -68,11 +68,8 @@ struct addr_req {
|
|||
|
||||
static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0);
|
||||
|
||||
static void process_req(struct work_struct *work);
|
||||
|
||||
static DEFINE_MUTEX(lock);
|
||||
static DEFINE_SPINLOCK(lock);
|
||||
static LIST_HEAD(req_list);
|
||||
static DECLARE_DELAYED_WORK(work, process_req);
|
||||
static struct workqueue_struct *addr_wq;
|
||||
|
||||
static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
|
||||
|
@ -112,7 +109,7 @@ static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
|
|||
memcpy(&gid, nla_data(curr), nla_len(curr));
|
||||
}
|
||||
|
||||
mutex_lock(&lock);
|
||||
spin_lock_bh(&lock);
|
||||
list_for_each_entry(req, &req_list, list) {
|
||||
if (nlh->nlmsg_seq != req->seq)
|
||||
continue;
|
||||
|
@ -122,7 +119,7 @@ static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
|
|||
found = 1;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&lock);
|
||||
spin_unlock_bh(&lock);
|
||||
|
||||
if (!found)
|
||||
pr_info("Couldn't find request waiting for DGID: %pI6\n",
|
||||
|
@ -302,7 +299,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
|
|||
}
|
||||
EXPORT_SYMBOL(rdma_translate_ip);
|
||||
|
||||
static void set_timeout(struct delayed_work *delayed_work, unsigned long time)
|
||||
static void set_timeout(struct addr_req *req, unsigned long time)
|
||||
{
|
||||
unsigned long delay;
|
||||
|
||||
|
@ -310,23 +307,15 @@ static void set_timeout(struct delayed_work *delayed_work, unsigned long time)
|
|||
if ((long)delay < 0)
|
||||
delay = 0;
|
||||
|
||||
mod_delayed_work(addr_wq, delayed_work, delay);
|
||||
mod_delayed_work(addr_wq, &req->work, delay);
|
||||
}
|
||||
|
||||
static void queue_req(struct addr_req *req)
|
||||
{
|
||||
struct addr_req *temp_req;
|
||||
|
||||
mutex_lock(&lock);
|
||||
list_for_each_entry_reverse(temp_req, &req_list, list) {
|
||||
if (time_after_eq(req->timeout, temp_req->timeout))
|
||||
break;
|
||||
}
|
||||
|
||||
list_add(&req->list, &temp_req->list);
|
||||
|
||||
set_timeout(&req->work, req->timeout);
|
||||
mutex_unlock(&lock);
|
||||
spin_lock_bh(&lock);
|
||||
list_add_tail(&req->list, &req_list);
|
||||
set_timeout(req, req->timeout);
|
||||
spin_unlock_bh(&lock);
|
||||
}
|
||||
|
||||
static int ib_nl_fetch_ha(const struct dst_entry *dst,
|
||||
|
@ -584,7 +573,6 @@ static void process_one_req(struct work_struct *_work)
|
|||
struct addr_req *req;
|
||||
struct sockaddr *src_in, *dst_in;
|
||||
|
||||
mutex_lock(&lock);
|
||||
req = container_of(_work, struct addr_req, work.work);
|
||||
|
||||
if (req->status == -ENODATA) {
|
||||
|
@ -596,13 +584,15 @@ static void process_one_req(struct work_struct *_work)
|
|||
req->status = -ETIMEDOUT;
|
||||
} else if (req->status == -ENODATA) {
|
||||
/* requeue the work for retrying again */
|
||||
set_timeout(&req->work, req->timeout);
|
||||
mutex_unlock(&lock);
|
||||
spin_lock_bh(&lock);
|
||||
set_timeout(req, req->timeout);
|
||||
spin_unlock_bh(&lock);
|
||||
return;
|
||||
}
|
||||
}
|
||||
spin_lock_bh(&lock);
|
||||
list_del(&req->list);
|
||||
mutex_unlock(&lock);
|
||||
spin_unlock_bh(&lock);
|
||||
|
||||
/*
|
||||
* Although the work will normally have been canceled by the
|
||||
|
@ -619,47 +609,6 @@ static void process_one_req(struct work_struct *_work)
|
|||
kfree(req);
|
||||
}
|
||||
|
||||
static void process_req(struct work_struct *work)
|
||||
{
|
||||
struct addr_req *req, *temp_req;
|
||||
struct sockaddr *src_in, *dst_in;
|
||||
struct list_head done_list;
|
||||
|
||||
INIT_LIST_HEAD(&done_list);
|
||||
|
||||
mutex_lock(&lock);
|
||||
list_for_each_entry_safe(req, temp_req, &req_list, list) {
|
||||
if (req->status == -ENODATA) {
|
||||
src_in = (struct sockaddr *) &req->src_addr;
|
||||
dst_in = (struct sockaddr *) &req->dst_addr;
|
||||
req->status = addr_resolve(src_in, dst_in, req->addr,
|
||||
true, req->seq);
|
||||
if (req->status && time_after_eq(jiffies, req->timeout))
|
||||
req->status = -ETIMEDOUT;
|
||||
else if (req->status == -ENODATA) {
|
||||
set_timeout(&req->work, req->timeout);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
list_move_tail(&req->list, &done_list);
|
||||
}
|
||||
|
||||
mutex_unlock(&lock);
|
||||
|
||||
list_for_each_entry_safe(req, temp_req, &done_list, list) {
|
||||
list_del(&req->list);
|
||||
/* It is safe to cancel other work items from this work item
|
||||
* because at a time there can be only one work item running
|
||||
* with this single threaded work queue.
|
||||
*/
|
||||
cancel_delayed_work(&req->work);
|
||||
req->callback(req->status, (struct sockaddr *) &req->src_addr,
|
||||
req->addr, req->context);
|
||||
put_client(req->client);
|
||||
kfree(req);
|
||||
}
|
||||
}
|
||||
|
||||
int rdma_resolve_ip(struct rdma_addr_client *client,
|
||||
struct sockaddr *src_addr, struct sockaddr *dst_addr,
|
||||
struct rdma_dev_addr *addr, int timeout_ms,
|
||||
|
@ -743,17 +692,16 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
|
|||
{
|
||||
struct addr_req *req, *temp_req;
|
||||
|
||||
mutex_lock(&lock);
|
||||
spin_lock_bh(&lock);
|
||||
list_for_each_entry_safe(req, temp_req, &req_list, list) {
|
||||
if (req->addr == addr) {
|
||||
req->status = -ECANCELED;
|
||||
req->timeout = jiffies;
|
||||
list_move(&req->list, &req_list);
|
||||
set_timeout(&req->work, req->timeout);
|
||||
set_timeout(req, req->timeout);
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&lock);
|
||||
spin_unlock_bh(&lock);
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_addr_cancel);
|
||||
|
||||
|
@ -810,11 +758,17 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
|
|||
static int netevent_callback(struct notifier_block *self, unsigned long event,
|
||||
void *ctx)
|
||||
{
|
||||
struct addr_req *req;
|
||||
|
||||
if (event == NETEVENT_NEIGH_UPDATE) {
|
||||
struct neighbour *neigh = ctx;
|
||||
|
||||
if (neigh->nud_state & NUD_VALID)
|
||||
set_timeout(&work, jiffies);
|
||||
if (neigh->nud_state & NUD_VALID) {
|
||||
spin_lock_bh(&lock);
|
||||
list_for_each_entry(req, &req_list, list)
|
||||
set_timeout(req, jiffies);
|
||||
spin_unlock_bh(&lock);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue