net/mlx5e: vxlan: Use RCU for vxlan table lookup

Remove the spinlock protecting the vxlan table and use RCU instead.
This will improve performance as it will eliminate contention on data
path cores.

Fixes: b3f63c3d5e ("net/mlx5e: Add netdev support for VXLAN tunneling")
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Reviewed-by: Maxim Mikityanskiy <maximmi@mellanox.com>
This commit is contained in:
Saeed Mahameed 2020-05-15 17:09:05 -07:00
parent 185901ceeb
commit 7a64ca862a
1 changed files with 31 additions and 42 deletions

View File

@ -40,7 +40,6 @@
struct mlx5_vxlan {
struct mlx5_core_dev *mdev;
spinlock_t lock; /* protect vxlan table */
/* max_num_ports is usuallly 4, 16 buckets is more than enough */
DECLARE_HASHTABLE(htable, 4);
int num_ports;
@ -78,45 +77,46 @@ static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
return mlx5_cmd_exec_in(mdev, delete_vxlan_udp_dport, in);
}
static struct mlx5_vxlan_port*
mlx5_vxlan_lookup_port_locked(struct mlx5_vxlan *vxlan, u16 port)
{
struct mlx5_vxlan_port *vxlanp;
hash_for_each_possible(vxlan->htable, vxlanp, hlist, port) {
if (vxlanp->udp_port == port)
return vxlanp;
}
return NULL;
}
struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port)
{
struct mlx5_vxlan_port *vxlanp;
struct mlx5_vxlan_port *retptr = NULL, *vxlanp;
if (!mlx5_vxlan_allowed(vxlan))
return NULL;
spin_lock_bh(&vxlan->lock);
vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
spin_unlock_bh(&vxlan->lock);
rcu_read_lock();
hash_for_each_possible_rcu(vxlan->htable, vxlanp, hlist, port)
if (vxlanp->udp_port == port) {
retptr = vxlanp;
break;
}
rcu_read_unlock();
return vxlanp;
return retptr;
}
static struct mlx5_vxlan_port *vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port)
{
struct mlx5_vxlan_port *vxlanp;
hash_for_each_possible(vxlan->htable, vxlanp, hlist, port)
if (vxlanp->udp_port == port)
return vxlanp;
return NULL;
}
int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
{
struct mlx5_vxlan_port *vxlanp;
int ret = -ENOSPC;
vxlanp = mlx5_vxlan_lookup_port(vxlan, port);
if (vxlanp) {
refcount_inc(&vxlanp->refcount);
return 0;
}
int ret = 0;
mutex_lock(&vxlan->sync_lock);
vxlanp = vxlan_lookup_port(vxlan, port);
if (vxlanp) {
refcount_inc(&vxlanp->refcount);
goto unlock;
}
if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) {
mlx5_core_info(vxlan->mdev,
"UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n",
@ -138,9 +138,7 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
vxlanp->udp_port = port;
refcount_set(&vxlanp->refcount, 1);
spin_lock_bh(&vxlan->lock);
hash_add(vxlan->htable, &vxlanp->hlist, port);
spin_unlock_bh(&vxlan->lock);
hash_add_rcu(vxlan->htable, &vxlanp->hlist, port);
vxlan->num_ports++;
mutex_unlock(&vxlan->sync_lock);
@ -157,34 +155,26 @@ unlock:
int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
{
struct mlx5_vxlan_port *vxlanp;
bool remove = false;
int ret = 0;
mutex_lock(&vxlan->sync_lock);
spin_lock_bh(&vxlan->lock);
vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
vxlanp = vxlan_lookup_port(vxlan, port);
if (!vxlanp) {
ret = -ENOENT;
goto out_unlock;
}
if (refcount_dec_and_test(&vxlanp->refcount)) {
hash_del(&vxlanp->hlist);
remove = true;
}
out_unlock:
spin_unlock_bh(&vxlan->lock);
if (remove) {
hash_del_rcu(&vxlanp->hlist);
synchronize_rcu();
mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
kfree(vxlanp);
vxlan->num_ports--;
}
out_unlock:
mutex_unlock(&vxlan->sync_lock);
return ret;
}
@ -201,7 +191,6 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
vxlan->mdev = mdev;
mutex_init(&vxlan->sync_lock);
spin_lock_init(&vxlan->lock);
hash_init(vxlan->htable);
/* Hardware adds 4789 (IANA_VXLAN_UDP_PORT) by default */