sfc: do ARFS expiry work occasionally even without NAPI poll
If there's no traffic on a channel, its ARFS expiry work will never get scheduled by efx_poll() as that isn't being run. So make efx_filter_rfs_expire() reschedule itself to run after 30 seconds. Signed-off-by: Edward Cree <ecree@solarflare.com> Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
This commit is contained in:
parent
ca70bd423f
commit
6fbc05e591
|
@ -355,7 +355,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
|
||||||
|
|
||||||
#ifdef CONFIG_RFS_ACCEL
|
#ifdef CONFIG_RFS_ACCEL
|
||||||
/* Perhaps expire some ARFS filters */
|
/* Perhaps expire some ARFS filters */
|
||||||
schedule_work(&channel->filter_work);
|
mod_delayed_work(system_wq, &channel->filter_work, 0);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* There is no race here; although napi_disable() will
|
/* There is no race here; although napi_disable() will
|
||||||
|
@ -487,7 +487,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_RFS_ACCEL
|
#ifdef CONFIG_RFS_ACCEL
|
||||||
INIT_WORK(&channel->filter_work, efx_filter_rfs_expire);
|
INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
rx_queue = &channel->rx_queue;
|
rx_queue = &channel->rx_queue;
|
||||||
|
@ -533,7 +533,7 @@ efx_copy_channel(const struct efx_channel *old_channel)
|
||||||
memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
|
memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
|
||||||
timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
|
timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
|
||||||
#ifdef CONFIG_RFS_ACCEL
|
#ifdef CONFIG_RFS_ACCEL
|
||||||
INIT_WORK(&channel->filter_work, efx_filter_rfs_expire);
|
INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return channel;
|
return channel;
|
||||||
|
@ -1994,7 +1994,7 @@ static void efx_remove_filters(struct efx_nic *efx)
|
||||||
struct efx_channel *channel;
|
struct efx_channel *channel;
|
||||||
|
|
||||||
efx_for_each_channel(channel, efx) {
|
efx_for_each_channel(channel, efx) {
|
||||||
flush_work(&channel->filter_work);
|
cancel_delayed_work_sync(&channel->filter_work);
|
||||||
kfree(channel->rps_flow_id);
|
kfree(channel->rps_flow_id);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -169,13 +169,17 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
||||||
bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota);
|
bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota);
|
||||||
static inline void efx_filter_rfs_expire(struct work_struct *data)
|
static inline void efx_filter_rfs_expire(struct work_struct *data)
|
||||||
{
|
{
|
||||||
struct efx_channel *channel = container_of(data, struct efx_channel,
|
struct delayed_work *dwork = to_delayed_work(data);
|
||||||
filter_work);
|
struct efx_channel *channel;
|
||||||
unsigned int time = jiffies - channel->rfs_last_expiry, quota;
|
unsigned int time, quota;
|
||||||
|
|
||||||
|
channel = container_of(dwork, struct efx_channel, filter_work);
|
||||||
|
time = jiffies - channel->rfs_last_expiry;
|
||||||
quota = channel->rfs_filter_count * time / (30 * HZ);
|
quota = channel->rfs_filter_count * time / (30 * HZ);
|
||||||
if (quota > 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
|
if (quota > 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
|
||||||
channel->rfs_last_expiry += time;
|
channel->rfs_last_expiry += time;
|
||||||
|
/* Ensure we do more work eventually even if NAPI poll is not happening */
|
||||||
|
schedule_delayed_work(dwork, 30 * HZ);
|
||||||
}
|
}
|
||||||
#define efx_filter_rfs_enabled() 1
|
#define efx_filter_rfs_enabled() 1
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -501,7 +501,7 @@ struct efx_channel {
|
||||||
unsigned int rfs_expire_index;
|
unsigned int rfs_expire_index;
|
||||||
unsigned int n_rfs_succeeded;
|
unsigned int n_rfs_succeeded;
|
||||||
unsigned int n_rfs_failed;
|
unsigned int n_rfs_failed;
|
||||||
struct work_struct filter_work;
|
struct delayed_work filter_work;
|
||||||
#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
|
#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
|
||||||
u32 *rps_flow_id;
|
u32 *rps_flow_id;
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in New Issue