net: add napi_id and hash
Adds a napi_id and a hashing mechanism to lookup a napi by id. This will be used by subsequent patches to implement low latency Ethernet device polling. Based on a code sample by Eric Dumazet. Signed-off-by: Eliezer Tamir <eliezer.tamir@linux.intel.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Tested-by: Willem de Bruijn <willemb@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
6f00a02296
commit
af12fa6e46
|
@ -324,12 +324,15 @@ struct napi_struct {
|
|||
struct sk_buff *gro_list;
|
||||
struct sk_buff *skb;
|
||||
struct list_head dev_list;
|
||||
struct hlist_node napi_hash_node;
|
||||
unsigned int napi_id;
|
||||
};
|
||||
|
||||
enum {
|
||||
NAPI_STATE_SCHED, /* Poll is scheduled */
|
||||
NAPI_STATE_DISABLE, /* Disable pending */
|
||||
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
|
||||
NAPI_STATE_HASHED, /* In NAPI hash */
|
||||
};
|
||||
|
||||
enum gro_result {
|
||||
|
@ -445,6 +448,32 @@ static inline bool napi_reschedule(struct napi_struct *napi)
|
|||
extern void __napi_complete(struct napi_struct *n);
|
||||
extern void napi_complete(struct napi_struct *n);
|
||||
|
||||
/**
|
||||
* napi_by_id - lookup a NAPI by napi_id
|
||||
* @napi_id: hashed napi_id
|
||||
*
|
||||
* lookup @napi_id in napi_hash table
|
||||
* must be called under rcu_read_lock()
|
||||
*/
|
||||
extern struct napi_struct *napi_by_id(unsigned int napi_id);
|
||||
|
||||
/**
|
||||
* napi_hash_add - add a NAPI to global hashtable
|
||||
* @napi: napi context
|
||||
*
|
||||
* generate a new napi_id and store a @napi under it in napi_hash
|
||||
*/
|
||||
extern void napi_hash_add(struct napi_struct *napi);
|
||||
|
||||
/**
|
||||
* napi_hash_del - remove a NAPI from global table
|
||||
* @napi: napi context
|
||||
*
|
||||
* Warning: caller must observe rcu grace period
|
||||
* before freeing memory containing @napi
|
||||
*/
|
||||
extern void napi_hash_del(struct napi_struct *napi);
|
||||
|
||||
/**
|
||||
* napi_disable - prevent NAPI from scheduling
|
||||
* @n: napi context
|
||||
|
|
|
@ -129,6 +129,7 @@
|
|||
#include <linux/inetdevice.h>
|
||||
#include <linux/cpu_rmap.h>
|
||||
#include <linux/static_key.h>
|
||||
#include <linux/hashtable.h>
|
||||
|
||||
#include "net-sysfs.h"
|
||||
|
||||
|
@ -166,6 +167,12 @@ static struct list_head offload_base __read_mostly;
|
|||
DEFINE_RWLOCK(dev_base_lock);
|
||||
EXPORT_SYMBOL(dev_base_lock);
|
||||
|
||||
/* protects napi_hash addition/deletion and napi_gen_id */
|
||||
static DEFINE_SPINLOCK(napi_hash_lock);
|
||||
|
||||
static unsigned int napi_gen_id;
|
||||
static DEFINE_HASHTABLE(napi_hash, 8);
|
||||
|
||||
seqcount_t devnet_rename_seq;
|
||||
|
||||
static inline void dev_base_seq_inc(struct net *net)
|
||||
|
@ -4136,6 +4143,58 @@ void napi_complete(struct napi_struct *n)
|
|||
}
|
||||
EXPORT_SYMBOL(napi_complete);
|
||||
|
||||
/* must be called under rcu_read_lock(), as we dont take a reference */
|
||||
struct napi_struct *napi_by_id(unsigned int napi_id)
|
||||
{
|
||||
unsigned int hash = napi_id % HASH_SIZE(napi_hash);
|
||||
struct napi_struct *napi;
|
||||
|
||||
hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
|
||||
if (napi->napi_id == napi_id)
|
||||
return napi;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(napi_by_id);
|
||||
|
||||
void napi_hash_add(struct napi_struct *napi)
|
||||
{
|
||||
if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
|
||||
|
||||
spin_lock(&napi_hash_lock);
|
||||
|
||||
/* 0 is not a valid id, we also skip an id that is taken
|
||||
* we expect both events to be extremely rare
|
||||
*/
|
||||
napi->napi_id = 0;
|
||||
while (!napi->napi_id) {
|
||||
napi->napi_id = ++napi_gen_id;
|
||||
if (napi_by_id(napi->napi_id))
|
||||
napi->napi_id = 0;
|
||||
}
|
||||
|
||||
hlist_add_head_rcu(&napi->napi_hash_node,
|
||||
&napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
|
||||
|
||||
spin_unlock(&napi_hash_lock);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(napi_hash_add);
|
||||
|
||||
/* Warning : caller is responsible to make sure rcu grace period
|
||||
* is respected before freeing memory containing @napi
|
||||
*/
|
||||
void napi_hash_del(struct napi_struct *napi)
|
||||
{
|
||||
spin_lock(&napi_hash_lock);
|
||||
|
||||
if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
|
||||
hlist_del_rcu(&napi->napi_hash_node);
|
||||
|
||||
spin_unlock(&napi_hash_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(napi_hash_del);
|
||||
|
||||
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
|
||||
int (*poll)(struct napi_struct *, int), int weight)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue