Just a few fixes this time:
* mesh rhashtable fixes from Herbert * a small error path fix when starting AP interfaces -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEH1e1rEeCd0AIMq6MB8qZga/fl8QFAlxmtYAACgkQB8qZga/f l8RqahAAh6NGphorxr07mZmHQXSxaSwKhGdbNZF30gWY5U+Pi/e6SDJXQFCiE36n gMlYq3cNx3O129+rQo5T45avRgU7bCxE2gGzPU+Fk8BlvCUhgGf1n91I9duJG5LW U4PFK5ywzWEwqA3vZLLqwegRVJJuG3Fyq7pDDz7iNA8SwNfvCjl9q1ahoKq+yTBd 1z5cc2NNd61TKsxBWhcDMnxLGgLd5ae8eQgkcje0I1XKj2KX5CwbNfaQgIQfaEEd wNLvrMGj57PAIZ4bHw2BiTOikKD5CEUmr9xZ4qmfcmsrIHX5ncHnbO9lBj4VX11J YAmD+P9Yz8WxhORt9DefKBVYZXi338fcOPBkrD7lo+F8PU70BBnIaWjDxuE1Ig3x T4L1woy4ByxKsQoX4NcJo18J0/IESSYrNU/00IfFg525sd8LBegJ6sN5d87eVbhC 6DGMfmtysvkFZQ+IgxToCxKttIiOy/L2iTnDL8zkINLr+Dg01Nq6ZgdzFcec9km4 BJa8Hby6pik6NGnl42YsfDhHosakko13hqkm2nWYVRFWTlNOGtZvVGUp5pbVFsrG VM4ZdBzGcHNYIC8dlrN1zsLMNEtBDF9e2vJxclUEpjgFQVZCQ2iWCieXR9iUsT/P RB1Z9lh0EkIqgOccbpkomqevB8jQczlewdQyBFP0XIMEDjocoBM= =4o1/ -----END PGP SIGNATURE----- Merge tag 'mac80211-for-davem-2019-02-15' of git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211 Johannes Berg says: ==================== Just a few fixes this time: * mesh rhashtable fixes from Herbert * a small error path fix when starting AP interfaces ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
a31687e85a
|
@ -941,6 +941,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
|
|||
BSS_CHANGED_P2P_PS |
|
||||
BSS_CHANGED_TXPOWER;
|
||||
int err;
|
||||
int prev_beacon_int;
|
||||
|
||||
old = sdata_dereference(sdata->u.ap.beacon, sdata);
|
||||
if (old)
|
||||
|
@ -963,6 +964,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
|
|||
|
||||
sdata->needed_rx_chains = sdata->local->rx_chains;
|
||||
|
||||
prev_beacon_int = sdata->vif.bss_conf.beacon_int;
|
||||
sdata->vif.bss_conf.beacon_int = params->beacon_interval;
|
||||
|
||||
if (params->he_cap)
|
||||
|
@ -974,8 +976,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
|
|||
if (!err)
|
||||
ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
|
||||
mutex_unlock(&local->mtx);
|
||||
if (err)
|
||||
if (err) {
|
||||
sdata->vif.bss_conf.beacon_int = prev_beacon_int;
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Apply control port protocol, this allows us to
|
||||
|
|
|
@ -70,6 +70,7 @@ enum mesh_deferred_task_flags {
|
|||
* @dst: mesh path destination mac address
|
||||
* @mpp: mesh proxy mac address
|
||||
* @rhash: rhashtable list pointer
|
||||
* @walk_list: linked list containing all mesh_path objects.
|
||||
* @gate_list: list pointer for known gates list
|
||||
* @sdata: mesh subif
|
||||
* @next_hop: mesh neighbor to which frames for this destination will be
|
||||
|
@ -105,6 +106,7 @@ struct mesh_path {
|
|||
u8 dst[ETH_ALEN];
|
||||
u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
|
||||
struct rhash_head rhash;
|
||||
struct hlist_node walk_list;
|
||||
struct hlist_node gate_list;
|
||||
struct ieee80211_sub_if_data *sdata;
|
||||
struct sta_info __rcu *next_hop;
|
||||
|
@ -133,12 +135,16 @@ struct mesh_path {
|
|||
* gate's mpath may or may not be resolved and active.
|
||||
* @gates_lock: protects updates to known_gates
|
||||
* @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
|
||||
* @walk_head: linked list containging all mesh_path objects
|
||||
* @walk_lock: lock protecting walk_head
|
||||
* @entries: number of entries in the table
|
||||
*/
|
||||
struct mesh_table {
|
||||
struct hlist_head known_gates;
|
||||
spinlock_t gates_lock;
|
||||
struct rhashtable rhead;
|
||||
struct hlist_head walk_head;
|
||||
spinlock_t walk_lock;
|
||||
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
|
||||
};
|
||||
|
||||
|
|
|
@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void)
|
|||
return NULL;
|
||||
|
||||
INIT_HLIST_HEAD(&newtbl->known_gates);
|
||||
INIT_HLIST_HEAD(&newtbl->walk_head);
|
||||
atomic_set(&newtbl->entries, 0);
|
||||
spin_lock_init(&newtbl->gates_lock);
|
||||
spin_lock_init(&newtbl->walk_lock);
|
||||
|
||||
return newtbl;
|
||||
}
|
||||
|
@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
|
|||
static struct mesh_path *
|
||||
__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
|
||||
{
|
||||
int i = 0, ret;
|
||||
struct mesh_path *mpath = NULL;
|
||||
struct rhashtable_iter iter;
|
||||
int i = 0;
|
||||
struct mesh_path *mpath;
|
||||
|
||||
ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
while ((mpath = rhashtable_walk_next(&iter))) {
|
||||
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
||||
continue;
|
||||
if (IS_ERR(mpath))
|
||||
break;
|
||||
hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
|
||||
if (i++ == idx)
|
||||
break;
|
||||
}
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
|
||||
if (IS_ERR(mpath) || !mpath)
|
||||
if (!mpath)
|
||||
return NULL;
|
||||
|
||||
if (mpath_expired(mpath)) {
|
||||
|
@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
|
|||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
tbl = sdata->u.mesh.mesh_paths;
|
||||
spin_lock_bh(&tbl->walk_lock);
|
||||
do {
|
||||
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
|
||||
&new_mpath->rhash,
|
||||
|
@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
|
|||
mpath = rhashtable_lookup_fast(&tbl->rhead,
|
||||
dst,
|
||||
mesh_rht_params);
|
||||
|
||||
else if (!ret)
|
||||
hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
|
||||
} while (unlikely(ret == -EEXIST && !mpath));
|
||||
spin_unlock_bh(&tbl->walk_lock);
|
||||
|
||||
if (ret && ret != -EEXIST)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
/* At this point either new_mpath was added, or we found a
|
||||
* matching entry already in the table; in the latter case
|
||||
* free the unnecessary new entry.
|
||||
*/
|
||||
if (ret == -EEXIST) {
|
||||
if (ret) {
|
||||
kfree(new_mpath);
|
||||
|
||||
if (ret != -EEXIST)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
new_mpath = mpath;
|
||||
}
|
||||
|
||||
sdata->u.mesh.mesh_paths_generation++;
|
||||
return new_mpath;
|
||||
}
|
||||
|
@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
|
|||
|
||||
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
|
||||
tbl = sdata->u.mesh.mpp_paths;
|
||||
|
||||
spin_lock_bh(&tbl->walk_lock);
|
||||
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
|
||||
&new_mpath->rhash,
|
||||
mesh_rht_params);
|
||||
if (!ret)
|
||||
hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
|
||||
spin_unlock_bh(&tbl->walk_lock);
|
||||
|
||||
if (ret)
|
||||
kfree(new_mpath);
|
||||
|
||||
sdata->u.mesh.mpp_paths_generation++;
|
||||
return ret;
|
||||
|
@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta)
|
|||
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
|
||||
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
|
||||
struct mesh_path *mpath;
|
||||
struct rhashtable_iter iter;
|
||||
int ret;
|
||||
|
||||
ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
while ((mpath = rhashtable_walk_next(&iter))) {
|
||||
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
||||
continue;
|
||||
if (IS_ERR(mpath))
|
||||
break;
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
|
||||
if (rcu_access_pointer(mpath->next_hop) == sta &&
|
||||
mpath->flags & MESH_PATH_ACTIVE &&
|
||||
!(mpath->flags & MESH_PATH_FIXED)) {
|
||||
|
@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta)
|
|||
WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
|
||||
}
|
||||
}
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void mesh_path_free_rcu(struct mesh_table *tbl,
|
||||
|
@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
|
|||
|
||||
static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
|
||||
{
|
||||
hlist_del_rcu(&mpath->walk_list);
|
||||
rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
|
||||
mesh_path_free_rcu(tbl, mpath);
|
||||
}
|
||||
|
@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
|
|||
struct ieee80211_sub_if_data *sdata = sta->sdata;
|
||||
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
|
||||
struct mesh_path *mpath;
|
||||
struct rhashtable_iter iter;
|
||||
int ret;
|
||||
|
||||
ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
while ((mpath = rhashtable_walk_next(&iter))) {
|
||||
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
||||
continue;
|
||||
if (IS_ERR(mpath))
|
||||
break;
|
||||
struct hlist_node *n;
|
||||
|
||||
spin_lock_bh(&tbl->walk_lock);
|
||||
hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
|
||||
if (rcu_access_pointer(mpath->next_hop) == sta)
|
||||
__mesh_path_del(tbl, mpath);
|
||||
}
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
spin_unlock_bh(&tbl->walk_lock);
|
||||
}
|
||||
|
||||
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
|
||||
|
@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
|
|||
{
|
||||
struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
|
||||
struct mesh_path *mpath;
|
||||
struct rhashtable_iter iter;
|
||||
int ret;
|
||||
|
||||
ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
while ((mpath = rhashtable_walk_next(&iter))) {
|
||||
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
||||
continue;
|
||||
if (IS_ERR(mpath))
|
||||
break;
|
||||
struct hlist_node *n;
|
||||
|
||||
spin_lock_bh(&tbl->walk_lock);
|
||||
hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
|
||||
if (ether_addr_equal(mpath->mpp, proxy))
|
||||
__mesh_path_del(tbl, mpath);
|
||||
}
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
spin_unlock_bh(&tbl->walk_lock);
|
||||
}
|
||||
|
||||
static void table_flush_by_iface(struct mesh_table *tbl)
|
||||
{
|
||||
struct mesh_path *mpath;
|
||||
struct rhashtable_iter iter;
|
||||
int ret;
|
||||
struct hlist_node *n;
|
||||
|
||||
ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
while ((mpath = rhashtable_walk_next(&iter))) {
|
||||
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
||||
continue;
|
||||
if (IS_ERR(mpath))
|
||||
break;
|
||||
spin_lock_bh(&tbl->walk_lock);
|
||||
hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
|
||||
__mesh_path_del(tbl, mpath);
|
||||
}
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
spin_unlock_bh(&tbl->walk_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -675,7 +624,7 @@ static int table_path_del(struct mesh_table *tbl,
|
|||
{
|
||||
struct mesh_path *mpath;
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock_bh(&tbl->walk_lock);
|
||||
mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
|
||||
if (!mpath) {
|
||||
rcu_read_unlock();
|
||||
|
@ -683,7 +632,7 @@ static int table_path_del(struct mesh_table *tbl,
|
|||
}
|
||||
|
||||
__mesh_path_del(tbl, mpath);
|
||||
rcu_read_unlock();
|
||||
spin_unlock_bh(&tbl->walk_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
|
|||
struct mesh_table *tbl)
|
||||
{
|
||||
struct mesh_path *mpath;
|
||||
struct rhashtable_iter iter;
|
||||
int ret;
|
||||
struct hlist_node *n;
|
||||
|
||||
ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
while ((mpath = rhashtable_walk_next(&iter))) {
|
||||
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
||||
continue;
|
||||
if (IS_ERR(mpath))
|
||||
break;
|
||||
spin_lock_bh(&tbl->walk_lock);
|
||||
hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
|
||||
if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
|
||||
(!(mpath->flags & MESH_PATH_FIXED)) &&
|
||||
time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
|
||||
__mesh_path_del(tbl, mpath);
|
||||
}
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
spin_unlock_bh(&tbl->walk_lock);
|
||||
}
|
||||
|
||||
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
|
||||
|
|
Loading…
Reference in New Issue