Merge branch 'rhash-cleanups'
NeilBrown says: ==================== A few rhashtables cleanups 2 patches fixes documentation 1 fixes a bit in rhashtable_walk_start() 1 improves rhashtable_walk stability. All reviewed and Acked. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
5cb5ce3363
|
@ -836,9 +836,8 @@ out:
|
|||
*
|
||||
* It is safe to call this function from atomic context.
|
||||
*
|
||||
* Will trigger an automatic deferred table resizing if the size grows
|
||||
* beyond the watermark indicated by grow_decision() which can be passed
|
||||
* to rhashtable_init().
|
||||
* Will trigger an automatic deferred table resizing if residency in the
|
||||
* table grows beyond 70%.
|
||||
*/
|
||||
static inline int rhashtable_insert_fast(
|
||||
struct rhashtable *ht, struct rhash_head *obj,
|
||||
|
@ -866,9 +865,8 @@ static inline int rhashtable_insert_fast(
|
|||
*
|
||||
* It is safe to call this function from atomic context.
|
||||
*
|
||||
* Will trigger an automatic deferred table resizing if the size grows
|
||||
* beyond the watermark indicated by grow_decision() which can be passed
|
||||
* to rhashtable_init().
|
||||
* Will trigger an automatic deferred table resizing if residency in the
|
||||
* table grows beyond 70%.
|
||||
*/
|
||||
static inline int rhltable_insert_key(
|
||||
struct rhltable *hlt, const void *key, struct rhlist_head *list,
|
||||
|
@ -890,9 +888,8 @@ static inline int rhltable_insert_key(
|
|||
*
|
||||
* It is safe to call this function from atomic context.
|
||||
*
|
||||
* Will trigger an automatic deferred table resizing if the size grows
|
||||
* beyond the watermark indicated by grow_decision() which can be passed
|
||||
* to rhashtable_init().
|
||||
* Will trigger an automatic deferred table resizing if residency in the
|
||||
* table grows beyond 70%.
|
||||
*/
|
||||
static inline int rhltable_insert(
|
||||
struct rhltable *hlt, struct rhlist_head *list,
|
||||
|
@ -922,9 +919,8 @@ static inline int rhltable_insert(
|
|||
*
|
||||
* It is safe to call this function from atomic context.
|
||||
*
|
||||
* Will trigger an automatic deferred table resizing if the size grows
|
||||
* beyond the watermark indicated by grow_decision() which can be passed
|
||||
* to rhashtable_init().
|
||||
* Will trigger an automatic deferred table resizing if residency in the
|
||||
* table grows beyond 70%.
|
||||
*/
|
||||
static inline int rhashtable_lookup_insert_fast(
|
||||
struct rhashtable *ht, struct rhash_head *obj,
|
||||
|
@ -981,9 +977,8 @@ static inline void *rhashtable_lookup_get_insert_fast(
|
|||
*
|
||||
* Lookups may occur in parallel with hashtable mutations and resizing.
|
||||
*
|
||||
* Will trigger an automatic deferred table resizing if the size grows
|
||||
* beyond the watermark indicated by grow_decision() which can be passed
|
||||
* to rhashtable_init().
|
||||
* Will trigger an automatic deferred table resizing if residency in the
|
||||
* table grows beyond 70%.
|
||||
*
|
||||
* Returns zero on success.
|
||||
*/
|
||||
|
@ -1134,8 +1129,8 @@ static inline int __rhashtable_remove_fast(
|
|||
* walk the bucket chain upon removal. The removal operation is thus
|
||||
* considerable slow if the hash table is not correctly sized.
|
||||
*
|
||||
* Will automatically shrink the table via rhashtable_expand() if the
|
||||
* shrink_decision function specified at rhashtable_init() returns true.
|
||||
* Will automatically shrink the table if permitted when residency drops
|
||||
* below 30%.
|
||||
*
|
||||
* Returns zero on success, -ENOENT if the entry could not be found.
|
||||
*/
|
||||
|
@ -1156,8 +1151,8 @@ static inline int rhashtable_remove_fast(
|
|||
* walk the bucket chain upon removal. The removal operation is thus
|
||||
* considerable slow if the hash table is not correctly sized.
|
||||
*
|
||||
* Will automatically shrink the table via rhashtable_expand() if the
|
||||
* shrink_decision function specified at rhashtable_init() returns true.
|
||||
* Will automatically shrink the table if permitted when residency drops
|
||||
* below 30%
|
||||
*
|
||||
* Returns zero on success, -ENOENT if the entry could not be found.
|
||||
*/
|
||||
|
@ -1273,8 +1268,9 @@ static inline int rhashtable_walk_init(struct rhashtable *ht,
|
|||
* For a completely stable walk you should construct your own data
|
||||
* structure outside the hash table.
|
||||
*
|
||||
* This function may sleep so you must not call it from interrupt
|
||||
* context or with spin locks held.
|
||||
* This function may be called from any process context, including
|
||||
* non-preemptable context, but cannot be called from softirq or
|
||||
* hardirq context.
|
||||
*
|
||||
* You must call rhashtable_walk_exit after this function returns.
|
||||
*/
|
||||
|
|
|
@ -668,8 +668,9 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
|
|||
* For a completely stable walk you should construct your own data
|
||||
* structure outside the hash table.
|
||||
*
|
||||
* This function may sleep so you must not call it from interrupt
|
||||
* context or with spin locks held.
|
||||
* This function may be called from any process context, including
|
||||
* non-preemptable context, but cannot be called from softirq or
|
||||
* hardirq context.
|
||||
*
|
||||
* You must call rhashtable_walk_exit after this function returns.
|
||||
*/
|
||||
|
@ -726,6 +727,7 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter)
|
|||
__acquires(RCU)
|
||||
{
|
||||
struct rhashtable *ht = iter->ht;
|
||||
bool rhlist = ht->rhlist;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
|
@ -734,11 +736,52 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter)
|
|||
list_del(&iter->walker.list);
|
||||
spin_unlock(&ht->lock);
|
||||
|
||||
if (!iter->walker.tbl && !iter->end_of_table) {
|
||||
if (iter->end_of_table)
|
||||
return 0;
|
||||
if (!iter->walker.tbl) {
|
||||
iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
|
||||
iter->slot = 0;
|
||||
iter->skip = 0;
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (iter->p && !rhlist) {
|
||||
/*
|
||||
* We need to validate that 'p' is still in the table, and
|
||||
* if so, update 'skip'
|
||||
*/
|
||||
struct rhash_head *p;
|
||||
int skip = 0;
|
||||
rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
|
||||
skip++;
|
||||
if (p == iter->p) {
|
||||
iter->skip = skip;
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
iter->p = NULL;
|
||||
} else if (iter->p && rhlist) {
|
||||
/* Need to validate that 'list' is still in the table, and
|
||||
* if so, update 'skip' and 'p'.
|
||||
*/
|
||||
struct rhash_head *p;
|
||||
struct rhlist_head *list;
|
||||
int skip = 0;
|
||||
rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
|
||||
for (list = container_of(p, struct rhlist_head, rhead);
|
||||
list;
|
||||
list = rcu_dereference(list->next)) {
|
||||
skip++;
|
||||
if (list == iter->list) {
|
||||
iter->p = p;
|
||||
skip = skip;
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
}
|
||||
iter->p = NULL;
|
||||
}
|
||||
found:
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
|
||||
|
@ -914,8 +957,6 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
|
|||
iter->walker.tbl = NULL;
|
||||
spin_unlock(&ht->lock);
|
||||
|
||||
iter->p = NULL;
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue