batman-adv: protect each hash row with rcu locks
Signed-off-by: Marek Lindner <lindner_marek@yahoo.de>
This commit is contained in:
parent
a775eb847a
commit
fb778ea173
|
@ -27,13 +27,16 @@ static void hash_init(struct hashtable_t *hash)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0 ; i < hash->size; i++)
|
for (i = 0 ; i < hash->size; i++) {
|
||||||
INIT_HLIST_HEAD(&hash->table[i]);
|
INIT_HLIST_HEAD(&hash->table[i]);
|
||||||
|
spin_lock_init(&hash->list_locks[i]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* free only the hashtable and the hash itself. */
|
/* free only the hashtable and the hash itself. */
|
||||||
void hash_destroy(struct hashtable_t *hash)
|
void hash_destroy(struct hashtable_t *hash)
|
||||||
{
|
{
|
||||||
|
kfree(hash->list_locks);
|
||||||
kfree(hash->table);
|
kfree(hash->table);
|
||||||
kfree(hash);
|
kfree(hash);
|
||||||
}
|
}
|
||||||
|
@ -43,20 +46,33 @@ struct hashtable_t *hash_new(int size)
|
||||||
{
|
{
|
||||||
struct hashtable_t *hash;
|
struct hashtable_t *hash;
|
||||||
|
|
||||||
hash = kmalloc(sizeof(struct hashtable_t) , GFP_ATOMIC);
|
hash = kmalloc(sizeof(struct hashtable_t), GFP_ATOMIC);
|
||||||
|
|
||||||
if (!hash)
|
if (!hash)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
hash->size = size;
|
|
||||||
hash->table = kmalloc(sizeof(struct element_t *) * size, GFP_ATOMIC);
|
hash->table = kmalloc(sizeof(struct element_t *) * size, GFP_ATOMIC);
|
||||||
|
if (!hash->table)
|
||||||
|
goto free_hash;
|
||||||
|
|
||||||
if (!hash->table) {
|
hash->list_locks = kmalloc(sizeof(spinlock_t) * size, GFP_ATOMIC);
|
||||||
kfree(hash);
|
if (!hash->list_locks)
|
||||||
return NULL;
|
goto free_table;
|
||||||
}
|
|
||||||
|
|
||||||
|
hash->size = size;
|
||||||
hash_init(hash);
|
hash_init(hash);
|
||||||
|
|
||||||
return hash;
|
return hash;
|
||||||
|
|
||||||
|
free_table:
|
||||||
|
kfree(hash->table);
|
||||||
|
free_hash:
|
||||||
|
kfree(hash);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void bucket_free_rcu(struct rcu_head *rcu)
|
||||||
|
{
|
||||||
|
struct element_t *bucket;
|
||||||
|
|
||||||
|
bucket = container_of(rcu, struct element_t, rcu);
|
||||||
|
kfree(bucket);
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,10 +39,12 @@ typedef void (*hashdata_free_cb)(void *, void *);
|
||||||
struct element_t {
|
struct element_t {
|
||||||
void *data; /* pointer to the data */
|
void *data; /* pointer to the data */
|
||||||
struct hlist_node hlist; /* bucket list pointer */
|
struct hlist_node hlist; /* bucket list pointer */
|
||||||
|
struct rcu_head rcu;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct hashtable_t {
|
struct hashtable_t {
|
||||||
struct hlist_head *table; /* the hashtable itself, with the buckets */
|
struct hlist_head *table; /* the hashtable itself with the buckets */
|
||||||
|
spinlock_t *list_locks; /* spinlock for each hash list entry */
|
||||||
int size; /* size of hashtable */
|
int size; /* size of hashtable */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -52,6 +54,8 @@ struct hashtable_t *hash_new(int size);
|
||||||
/* free only the hashtable and the hash itself. */
|
/* free only the hashtable and the hash itself. */
|
||||||
void hash_destroy(struct hashtable_t *hash);
|
void hash_destroy(struct hashtable_t *hash);
|
||||||
|
|
||||||
|
void bucket_free_rcu(struct rcu_head *rcu);
|
||||||
|
|
||||||
/* remove the hash structure. if hashdata_free_cb != NULL, this function will be
|
/* remove the hash structure. if hashdata_free_cb != NULL, this function will be
|
||||||
* called to remove the elements inside of the hash. if you don't remove the
|
* called to remove the elements inside of the hash. if you don't remove the
|
||||||
* elements, memory might be leaked. */
|
* elements, memory might be leaked. */
|
||||||
|
@ -61,19 +65,22 @@ static inline void hash_delete(struct hashtable_t *hash,
|
||||||
struct hlist_head *head;
|
struct hlist_head *head;
|
||||||
struct hlist_node *walk, *safe;
|
struct hlist_node *walk, *safe;
|
||||||
struct element_t *bucket;
|
struct element_t *bucket;
|
||||||
|
spinlock_t *list_lock; /* spinlock to protect write access */
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < hash->size; i++) {
|
for (i = 0; i < hash->size; i++) {
|
||||||
head = &hash->table[i];
|
head = &hash->table[i];
|
||||||
|
list_lock = &hash->list_locks[i];
|
||||||
|
|
||||||
hlist_for_each_safe(walk, safe, head) {
|
spin_lock_bh(list_lock);
|
||||||
bucket = hlist_entry(walk, struct element_t, hlist);
|
hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
|
||||||
if (free_cb)
|
if (free_cb)
|
||||||
free_cb(bucket->data, arg);
|
free_cb(bucket->data, arg);
|
||||||
|
|
||||||
hlist_del(walk);
|
hlist_del_rcu(walk);
|
||||||
kfree(bucket);
|
call_rcu(&bucket->rcu, bucket_free_rcu);
|
||||||
}
|
}
|
||||||
|
spin_unlock_bh(list_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
hash_destroy(hash);
|
hash_destroy(hash);
|
||||||
|
@ -88,29 +95,39 @@ static inline int hash_add(struct hashtable_t *hash,
|
||||||
struct hlist_head *head;
|
struct hlist_head *head;
|
||||||
struct hlist_node *walk, *safe;
|
struct hlist_node *walk, *safe;
|
||||||
struct element_t *bucket;
|
struct element_t *bucket;
|
||||||
|
spinlock_t *list_lock; /* spinlock to protect write access */
|
||||||
|
|
||||||
if (!hash)
|
if (!hash)
|
||||||
return -1;
|
goto err;
|
||||||
|
|
||||||
index = choose(data, hash->size);
|
index = choose(data, hash->size);
|
||||||
head = &hash->table[index];
|
head = &hash->table[index];
|
||||||
|
list_lock = &hash->list_locks[index];
|
||||||
|
|
||||||
hlist_for_each_safe(walk, safe, head) {
|
rcu_read_lock();
|
||||||
bucket = hlist_entry(walk, struct element_t, hlist);
|
hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
|
||||||
if (compare(bucket->data, data))
|
if (compare(bucket->data, data))
|
||||||
return -1;
|
goto err_unlock;
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
/* no duplicate found in list, add new element */
|
/* no duplicate found in list, add new element */
|
||||||
bucket = kmalloc(sizeof(struct element_t), GFP_ATOMIC);
|
bucket = kmalloc(sizeof(struct element_t), GFP_ATOMIC);
|
||||||
|
|
||||||
if (!bucket)
|
if (!bucket)
|
||||||
return -1;
|
goto err;
|
||||||
|
|
||||||
bucket->data = data;
|
bucket->data = data;
|
||||||
hlist_add_head(&bucket->hlist, head);
|
|
||||||
|
spin_lock_bh(list_lock);
|
||||||
|
hlist_add_head_rcu(&bucket->hlist, head);
|
||||||
|
spin_unlock_bh(list_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_unlock:
|
||||||
|
rcu_read_unlock();
|
||||||
|
err:
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* removes data from hash, if found. returns pointer do data on success, so you
|
/* removes data from hash, if found. returns pointer do data on success, so you
|
||||||
|
@ -125,25 +142,31 @@ static inline void *hash_remove(struct hashtable_t *hash,
|
||||||
struct hlist_node *walk;
|
struct hlist_node *walk;
|
||||||
struct element_t *bucket;
|
struct element_t *bucket;
|
||||||
struct hlist_head *head;
|
struct hlist_head *head;
|
||||||
void *data_save;
|
void *data_save = NULL;
|
||||||
|
|
||||||
index = choose(data, hash->size);
|
index = choose(data, hash->size);
|
||||||
head = &hash->table[index];
|
head = &hash->table[index];
|
||||||
|
|
||||||
|
spin_lock_bh(&hash->list_locks[index]);
|
||||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
hlist_for_each_entry(bucket, walk, head, hlist) {
|
||||||
if (compare(bucket->data, data)) {
|
if (compare(bucket->data, data)) {
|
||||||
data_save = bucket->data;
|
data_save = bucket->data;
|
||||||
hlist_del(walk);
|
hlist_del_rcu(walk);
|
||||||
kfree(bucket);
|
call_rcu(&bucket->rcu, bucket_free_rcu);
|
||||||
return data_save;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
spin_unlock_bh(&hash->list_locks[index]);
|
||||||
|
|
||||||
return NULL;
|
return data_save;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* finds data, based on the key in keydata. returns the found data on success,
|
/**
|
||||||
* or NULL on error */
|
* finds data, based on the key in keydata. returns the found data on success,
|
||||||
|
* or NULL on error
|
||||||
|
*
|
||||||
|
* caller must lock with rcu_read_lock() / rcu_read_unlock()
|
||||||
|
**/
|
||||||
static inline void *hash_find(struct hashtable_t *hash,
|
static inline void *hash_find(struct hashtable_t *hash,
|
||||||
hashdata_compare_cb compare,
|
hashdata_compare_cb compare,
|
||||||
hashdata_choose_cb choose, void *keydata)
|
hashdata_choose_cb choose, void *keydata)
|
||||||
|
@ -152,6 +175,7 @@ static inline void *hash_find(struct hashtable_t *hash,
|
||||||
struct hlist_head *head;
|
struct hlist_head *head;
|
||||||
struct hlist_node *walk;
|
struct hlist_node *walk;
|
||||||
struct element_t *bucket;
|
struct element_t *bucket;
|
||||||
|
void *bucket_data = NULL;
|
||||||
|
|
||||||
if (!hash)
|
if (!hash)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -159,13 +183,14 @@ static inline void *hash_find(struct hashtable_t *hash,
|
||||||
index = choose(keydata , hash->size);
|
index = choose(keydata , hash->size);
|
||||||
head = &hash->table[index];
|
head = &hash->table[index];
|
||||||
|
|
||||||
hlist_for_each(walk, head) {
|
hlist_for_each_entry(bucket, walk, head, hlist) {
|
||||||
bucket = hlist_entry(walk, struct element_t, hlist);
|
if (compare(bucket->data, keydata)) {
|
||||||
if (compare(bucket->data, keydata))
|
bucket_data = bucket->data;
|
||||||
return bucket->data;
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
return bucket_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* _NET_BATMAN_ADV_HASH_H_ */
|
#endif /* _NET_BATMAN_ADV_HASH_H_ */
|
||||||
|
|
|
@ -220,9 +220,11 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
|
||||||
goto dst_unreach;
|
goto dst_unreach;
|
||||||
|
|
||||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||||
|
rcu_read_lock();
|
||||||
orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
|
orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
|
||||||
compare_orig, choose_orig,
|
compare_orig, choose_orig,
|
||||||
icmp_packet->dst));
|
icmp_packet->dst));
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (!orig_node)
|
if (!orig_node)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
|
@ -150,9 +150,11 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
|
||||||
int size;
|
int size;
|
||||||
int hash_added;
|
int hash_added;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
|
orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
|
||||||
compare_orig, choose_orig,
|
compare_orig, choose_orig,
|
||||||
addr));
|
addr));
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (orig_node)
|
if (orig_node)
|
||||||
return orig_node;
|
return orig_node;
|
||||||
|
@ -294,6 +296,7 @@ static void _purge_orig(struct bat_priv *bat_priv)
|
||||||
struct hlist_node *walk, *safe;
|
struct hlist_node *walk, *safe;
|
||||||
struct hlist_head *head;
|
struct hlist_head *head;
|
||||||
struct element_t *bucket;
|
struct element_t *bucket;
|
||||||
|
spinlock_t *list_lock; /* spinlock to protect write access */
|
||||||
struct orig_node *orig_node;
|
struct orig_node *orig_node;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -305,22 +308,26 @@ static void _purge_orig(struct bat_priv *bat_priv)
|
||||||
/* for all origins... */
|
/* for all origins... */
|
||||||
for (i = 0; i < hash->size; i++) {
|
for (i = 0; i < hash->size; i++) {
|
||||||
head = &hash->table[i];
|
head = &hash->table[i];
|
||||||
|
list_lock = &hash->list_locks[i];
|
||||||
|
|
||||||
|
spin_lock_bh(list_lock);
|
||||||
hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
|
hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
|
||||||
orig_node = bucket->data;
|
orig_node = bucket->data;
|
||||||
|
|
||||||
if (purge_orig_node(bat_priv, orig_node)) {
|
if (purge_orig_node(bat_priv, orig_node)) {
|
||||||
if (orig_node->gw_flags)
|
if (orig_node->gw_flags)
|
||||||
gw_node_delete(bat_priv, orig_node);
|
gw_node_delete(bat_priv, orig_node);
|
||||||
hlist_del(walk);
|
hlist_del_rcu(walk);
|
||||||
kfree(bucket);
|
call_rcu(&bucket->rcu, bucket_free_rcu);
|
||||||
free_orig_node(orig_node, bat_priv);
|
free_orig_node(orig_node, bat_priv);
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (time_after(jiffies, orig_node->last_frag_packet +
|
if (time_after(jiffies, orig_node->last_frag_packet +
|
||||||
msecs_to_jiffies(FRAG_TIMEOUT)))
|
msecs_to_jiffies(FRAG_TIMEOUT)))
|
||||||
frag_list_free(&orig_node->frag_list);
|
frag_list_free(&orig_node->frag_list);
|
||||||
}
|
}
|
||||||
|
spin_unlock_bh(list_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||||
|
@ -387,7 +394,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
|
||||||
for (i = 0; i < hash->size; i++) {
|
for (i = 0; i < hash->size; i++) {
|
||||||
head = &hash->table[i];
|
head = &hash->table[i];
|
||||||
|
|
||||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
rcu_read_lock();
|
||||||
|
hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
|
||||||
orig_node = bucket->data;
|
orig_node = bucket->data;
|
||||||
|
|
||||||
if (!orig_node->router)
|
if (!orig_node->router)
|
||||||
|
@ -408,17 +416,16 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
|
||||||
neigh_node->addr,
|
neigh_node->addr,
|
||||||
neigh_node->if_incoming->net_dev->name);
|
neigh_node->if_incoming->net_dev->name);
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
hlist_for_each_entry_rcu(neigh_node, node,
|
hlist_for_each_entry_rcu(neigh_node, node,
|
||||||
&orig_node->neigh_list, list) {
|
&orig_node->neigh_list, list) {
|
||||||
seq_printf(seq, " %pM (%3i)", neigh_node->addr,
|
seq_printf(seq, " %pM (%3i)", neigh_node->addr,
|
||||||
neigh_node->tq_avg);
|
neigh_node->tq_avg);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
seq_printf(seq, "\n");
|
seq_printf(seq, "\n");
|
||||||
batman_count++;
|
batman_count++;
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||||
|
@ -476,18 +483,21 @@ int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
|
||||||
for (i = 0; i < hash->size; i++) {
|
for (i = 0; i < hash->size; i++) {
|
||||||
head = &hash->table[i];
|
head = &hash->table[i];
|
||||||
|
|
||||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
rcu_read_lock();
|
||||||
|
hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
|
||||||
orig_node = bucket->data;
|
orig_node = bucket->data;
|
||||||
|
|
||||||
if (orig_node_add_if(orig_node, max_if_num) == -1)
|
if (orig_node_add_if(orig_node, max_if_num) == -1)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
|
rcu_read_unlock();
|
||||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -562,7 +572,8 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
|
||||||
for (i = 0; i < hash->size; i++) {
|
for (i = 0; i < hash->size; i++) {
|
||||||
head = &hash->table[i];
|
head = &hash->table[i];
|
||||||
|
|
||||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
rcu_read_lock();
|
||||||
|
hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
|
||||||
orig_node = bucket->data;
|
orig_node = bucket->data;
|
||||||
|
|
||||||
ret = orig_node_del_if(orig_node, max_if_num,
|
ret = orig_node_del_if(orig_node, max_if_num,
|
||||||
|
@ -571,6 +582,7 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
|
||||||
if (ret == -1)
|
if (ret == -1)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* renumber remaining batman interfaces _inside_ of orig_hash_lock */
|
/* renumber remaining batman interfaces _inside_ of orig_hash_lock */
|
||||||
|
@ -595,6 +607,7 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
|
rcu_read_unlock();
|
||||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,8 @@ void slide_own_bcast_window(struct batman_if *batman_if)
|
||||||
for (i = 0; i < hash->size; i++) {
|
for (i = 0; i < hash->size; i++) {
|
||||||
head = &hash->table[i];
|
head = &hash->table[i];
|
||||||
|
|
||||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
rcu_read_lock();
|
||||||
|
hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
|
||||||
orig_node = bucket->data;
|
orig_node = bucket->data;
|
||||||
word_index = batman_if->if_num * NUM_WORDS;
|
word_index = batman_if->if_num * NUM_WORDS;
|
||||||
word = &(orig_node->bcast_own[word_index]);
|
word = &(orig_node->bcast_own[word_index]);
|
||||||
|
@ -61,6 +62,7 @@ void slide_own_bcast_window(struct batman_if *batman_if)
|
||||||
orig_node->bcast_own_sum[batman_if->if_num] =
|
orig_node->bcast_own_sum[batman_if->if_num] =
|
||||||
bit_packet_count(word);
|
bit_packet_count(word);
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||||
|
@ -873,9 +875,11 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
|
||||||
/* answer echo request (ping) */
|
/* answer echo request (ping) */
|
||||||
/* get routing information */
|
/* get routing information */
|
||||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||||
|
rcu_read_lock();
|
||||||
orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
|
orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
|
||||||
compare_orig, choose_orig,
|
compare_orig, choose_orig,
|
||||||
icmp_packet->orig));
|
icmp_packet->orig));
|
||||||
|
rcu_read_unlock();
|
||||||
ret = NET_RX_DROP;
|
ret = NET_RX_DROP;
|
||||||
|
|
||||||
if ((orig_node) && (orig_node->router)) {
|
if ((orig_node) && (orig_node->router)) {
|
||||||
|
@ -931,9 +935,11 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
|
||||||
|
|
||||||
/* get routing information */
|
/* get routing information */
|
||||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||||
|
rcu_read_lock();
|
||||||
orig_node = ((struct orig_node *)
|
orig_node = ((struct orig_node *)
|
||||||
hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
|
hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
|
||||||
icmp_packet->orig));
|
icmp_packet->orig));
|
||||||
|
rcu_read_unlock();
|
||||||
ret = NET_RX_DROP;
|
ret = NET_RX_DROP;
|
||||||
|
|
||||||
if ((orig_node) && (orig_node->router)) {
|
if ((orig_node) && (orig_node->router)) {
|
||||||
|
@ -1023,9 +1029,11 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
|
||||||
|
|
||||||
/* get routing information */
|
/* get routing information */
|
||||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||||
|
rcu_read_lock();
|
||||||
orig_node = ((struct orig_node *)
|
orig_node = ((struct orig_node *)
|
||||||
hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
|
hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
|
||||||
icmp_packet->dst));
|
icmp_packet->dst));
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
if ((orig_node) && (orig_node->router)) {
|
if ((orig_node) && (orig_node->router)) {
|
||||||
|
|
||||||
|
@ -1094,9 +1102,11 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
|
||||||
router_orig->orig, ETH_ALEN) == 0) {
|
router_orig->orig, ETH_ALEN) == 0) {
|
||||||
primary_orig_node = router_orig;
|
primary_orig_node = router_orig;
|
||||||
} else {
|
} else {
|
||||||
|
rcu_read_lock();
|
||||||
primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig,
|
primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig,
|
||||||
choose_orig,
|
choose_orig,
|
||||||
router_orig->primary_addr);
|
router_orig->primary_addr);
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (!primary_orig_node)
|
if (!primary_orig_node)
|
||||||
return orig_node->router;
|
return orig_node->router;
|
||||||
|
@ -1199,9 +1209,11 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
|
||||||
|
|
||||||
/* get routing information */
|
/* get routing information */
|
||||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||||
|
rcu_read_lock();
|
||||||
orig_node = ((struct orig_node *)
|
orig_node = ((struct orig_node *)
|
||||||
hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
|
hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
|
||||||
unicast_packet->dest));
|
unicast_packet->dest));
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
router = find_router(bat_priv, orig_node, recv_if);
|
router = find_router(bat_priv, orig_node, recv_if);
|
||||||
|
|
||||||
|
@ -1345,9 +1357,11 @@ int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
|
||||||
return NET_RX_DROP;
|
return NET_RX_DROP;
|
||||||
|
|
||||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||||
|
rcu_read_lock();
|
||||||
orig_node = ((struct orig_node *)
|
orig_node = ((struct orig_node *)
|
||||||
hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
|
hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
|
||||||
bcast_packet->orig));
|
bcast_packet->orig));
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (!orig_node) {
|
if (!orig_node) {
|
||||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||||
|
|
|
@ -60,10 +60,12 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
|
||||||
int required_bytes;
|
int required_bytes;
|
||||||
|
|
||||||
spin_lock_bh(&bat_priv->hna_lhash_lock);
|
spin_lock_bh(&bat_priv->hna_lhash_lock);
|
||||||
|
rcu_read_lock();
|
||||||
hna_local_entry =
|
hna_local_entry =
|
||||||
((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
|
((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
|
||||||
compare_orig, choose_orig,
|
compare_orig, choose_orig,
|
||||||
addr));
|
addr));
|
||||||
|
rcu_read_unlock();
|
||||||
spin_unlock_bh(&bat_priv->hna_lhash_lock);
|
spin_unlock_bh(&bat_priv->hna_lhash_lock);
|
||||||
|
|
||||||
if (hna_local_entry) {
|
if (hna_local_entry) {
|
||||||
|
@ -116,9 +118,11 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
|
||||||
/* remove address from global hash if present */
|
/* remove address from global hash if present */
|
||||||
spin_lock_bh(&bat_priv->hna_ghash_lock);
|
spin_lock_bh(&bat_priv->hna_ghash_lock);
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
hna_global_entry = ((struct hna_global_entry *)
|
hna_global_entry = ((struct hna_global_entry *)
|
||||||
hash_find(bat_priv->hna_global_hash,
|
hash_find(bat_priv->hna_global_hash,
|
||||||
compare_orig, choose_orig, addr));
|
compare_orig, choose_orig, addr));
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (hna_global_entry)
|
if (hna_global_entry)
|
||||||
_hna_global_del_orig(bat_priv, hna_global_entry,
|
_hna_global_del_orig(bat_priv, hna_global_entry,
|
||||||
|
@ -252,9 +256,11 @@ void hna_local_remove(struct bat_priv *bat_priv,
|
||||||
|
|
||||||
spin_lock_bh(&bat_priv->hna_lhash_lock);
|
spin_lock_bh(&bat_priv->hna_lhash_lock);
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
hna_local_entry = (struct hna_local_entry *)
|
hna_local_entry = (struct hna_local_entry *)
|
||||||
hash_find(bat_priv->hna_local_hash, compare_orig, choose_orig,
|
hash_find(bat_priv->hna_local_hash, compare_orig, choose_orig,
|
||||||
addr);
|
addr);
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (hna_local_entry)
|
if (hna_local_entry)
|
||||||
hna_local_del(bat_priv, hna_local_entry, message);
|
hna_local_del(bat_priv, hna_local_entry, message);
|
||||||
|
@ -334,9 +340,11 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
|
||||||
spin_lock_bh(&bat_priv->hna_ghash_lock);
|
spin_lock_bh(&bat_priv->hna_ghash_lock);
|
||||||
|
|
||||||
hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
|
hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
|
||||||
|
rcu_read_lock();
|
||||||
hna_global_entry = (struct hna_global_entry *)
|
hna_global_entry = (struct hna_global_entry *)
|
||||||
hash_find(bat_priv->hna_global_hash, compare_orig,
|
hash_find(bat_priv->hna_global_hash, compare_orig,
|
||||||
choose_orig, hna_ptr);
|
choose_orig, hna_ptr);
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (!hna_global_entry) {
|
if (!hna_global_entry) {
|
||||||
spin_unlock_bh(&bat_priv->hna_ghash_lock);
|
spin_unlock_bh(&bat_priv->hna_ghash_lock);
|
||||||
|
@ -368,9 +376,11 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
|
||||||
spin_lock_bh(&bat_priv->hna_lhash_lock);
|
spin_lock_bh(&bat_priv->hna_lhash_lock);
|
||||||
|
|
||||||
hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
|
hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
|
||||||
|
rcu_read_lock();
|
||||||
hna_local_entry = (struct hna_local_entry *)
|
hna_local_entry = (struct hna_local_entry *)
|
||||||
hash_find(bat_priv->hna_local_hash, compare_orig,
|
hash_find(bat_priv->hna_local_hash, compare_orig,
|
||||||
choose_orig, hna_ptr);
|
choose_orig, hna_ptr);
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (hna_local_entry)
|
if (hna_local_entry)
|
||||||
hna_local_del(bat_priv, hna_local_entry,
|
hna_local_del(bat_priv, hna_local_entry,
|
||||||
|
@ -483,9 +493,11 @@ void hna_global_del_orig(struct bat_priv *bat_priv,
|
||||||
|
|
||||||
while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
|
while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
|
||||||
hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
|
hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
|
||||||
|
rcu_read_lock();
|
||||||
hna_global_entry = (struct hna_global_entry *)
|
hna_global_entry = (struct hna_global_entry *)
|
||||||
hash_find(bat_priv->hna_global_hash, compare_orig,
|
hash_find(bat_priv->hna_global_hash, compare_orig,
|
||||||
choose_orig, hna_ptr);
|
choose_orig, hna_ptr);
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
if ((hna_global_entry) &&
|
if ((hna_global_entry) &&
|
||||||
(hna_global_entry->orig_node == orig_node))
|
(hna_global_entry->orig_node == orig_node))
|
||||||
|
@ -521,9 +533,11 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
|
||||||
struct hna_global_entry *hna_global_entry;
|
struct hna_global_entry *hna_global_entry;
|
||||||
|
|
||||||
spin_lock_bh(&bat_priv->hna_ghash_lock);
|
spin_lock_bh(&bat_priv->hna_ghash_lock);
|
||||||
|
rcu_read_lock();
|
||||||
hna_global_entry = (struct hna_global_entry *)
|
hna_global_entry = (struct hna_global_entry *)
|
||||||
hash_find(bat_priv->hna_global_hash,
|
hash_find(bat_priv->hna_global_hash,
|
||||||
compare_orig, choose_orig, addr);
|
compare_orig, choose_orig, addr);
|
||||||
|
rcu_read_unlock();
|
||||||
spin_unlock_bh(&bat_priv->hna_ghash_lock);
|
spin_unlock_bh(&bat_priv->hna_ghash_lock);
|
||||||
|
|
||||||
if (!hna_global_entry)
|
if (!hna_global_entry)
|
||||||
|
|
|
@ -179,9 +179,11 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
|
||||||
|
|
||||||
*new_skb = NULL;
|
*new_skb = NULL;
|
||||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||||
|
rcu_read_lock();
|
||||||
orig_node = ((struct orig_node *)
|
orig_node = ((struct orig_node *)
|
||||||
hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
|
hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
|
||||||
unicast_packet->orig));
|
unicast_packet->orig));
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (!orig_node) {
|
if (!orig_node) {
|
||||||
pr_debug("couldn't find originator in orig_hash\n");
|
pr_debug("couldn't find originator in orig_hash\n");
|
||||||
|
|
|
@ -380,8 +380,10 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
|
||||||
sizeof(struct vis_packet));
|
sizeof(struct vis_packet));
|
||||||
|
|
||||||
memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
|
memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
|
||||||
|
rcu_read_lock();
|
||||||
old_info = hash_find(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
|
old_info = hash_find(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
|
||||||
&search_elem);
|
&search_elem);
|
||||||
|
rcu_read_unlock();
|
||||||
kfree_skb(search_elem.skb_packet);
|
kfree_skb(search_elem.skb_packet);
|
||||||
|
|
||||||
if (old_info) {
|
if (old_info) {
|
||||||
|
@ -540,7 +542,8 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
|
||||||
for (i = 0; i < hash->size; i++) {
|
for (i = 0; i < hash->size; i++) {
|
||||||
head = &hash->table[i];
|
head = &hash->table[i];
|
||||||
|
|
||||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
rcu_read_lock();
|
||||||
|
hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
|
||||||
orig_node = bucket->data;
|
orig_node = bucket->data;
|
||||||
if ((orig_node) && (orig_node->router) &&
|
if ((orig_node) && (orig_node->router) &&
|
||||||
(orig_node->flags & VIS_SERVER) &&
|
(orig_node->flags & VIS_SERVER) &&
|
||||||
|
@ -550,6 +553,7 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
|
||||||
ETH_ALEN);
|
ETH_ALEN);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
return best_tq;
|
return best_tq;
|
||||||
|
@ -605,7 +609,8 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
|
||||||
for (i = 0; i < hash->size; i++) {
|
for (i = 0; i < hash->size; i++) {
|
||||||
head = &hash->table[i];
|
head = &hash->table[i];
|
||||||
|
|
||||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
rcu_read_lock();
|
||||||
|
hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
|
||||||
orig_node = bucket->data;
|
orig_node = bucket->data;
|
||||||
neigh_node = orig_node->router;
|
neigh_node = orig_node->router;
|
||||||
|
|
||||||
|
@ -632,10 +637,12 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
|
||||||
packet->entries++;
|
packet->entries++;
|
||||||
|
|
||||||
if (vis_packet_full(info)) {
|
if (vis_packet_full(info)) {
|
||||||
|
rcu_read_unlock();
|
||||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||||
|
@ -721,7 +728,8 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
|
||||||
for (i = 0; i < hash->size; i++) {
|
for (i = 0; i < hash->size; i++) {
|
||||||
head = &hash->table[i];
|
head = &hash->table[i];
|
||||||
|
|
||||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
rcu_read_lock();
|
||||||
|
hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
|
||||||
orig_node = bucket->data;
|
orig_node = bucket->data;
|
||||||
|
|
||||||
/* if it's a vis server and reachable, send it. */
|
/* if it's a vis server and reachable, send it. */
|
||||||
|
@ -746,7 +754,7 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
|
||||||
|
|
||||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||||
|
@ -763,9 +771,11 @@ static void unicast_vis_packet(struct bat_priv *bat_priv,
|
||||||
|
|
||||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||||
packet = (struct vis_packet *)info->skb_packet->data;
|
packet = (struct vis_packet *)info->skb_packet->data;
|
||||||
|
rcu_read_lock();
|
||||||
orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
|
orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
|
||||||
compare_orig, choose_orig,
|
compare_orig, choose_orig,
|
||||||
packet->target_orig));
|
packet->target_orig));
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
if ((!orig_node) || (!orig_node->router))
|
if ((!orig_node) || (!orig_node->router))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
Loading…
Reference in New Issue