sfc: give ef10 its own rwsem in the filter table instead of filter_lock

efx->filter_lock remains in place for use on farch, but EF10 now ignores it.
EFX_EF10_FILTER_FLAG_BUSY is no longer needed, hence it is removed.

Signed-off-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Edward Cree 2018-03-27 17:42:28 +01:00 committed by David S. Miller
parent 3af0f34290
commit c2bebe37c6
3 changed files with 152 additions and 214 deletions

View File

@ -96,17 +96,15 @@ struct efx_ef10_filter_table {
MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2]; MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
unsigned int rx_match_count; unsigned int rx_match_count;
struct rw_semaphore lock; /* Protects entries */
struct { struct {
unsigned long spec; /* pointer to spec plus flag bits */ unsigned long spec; /* pointer to spec plus flag bits */
/* BUSY flag indicates that an update is in progress. AUTO_OLD is /* AUTO_OLD is used to mark and sweep MAC filters for the device address lists. */
* used to mark and sweep MAC filters for the device address lists. /* unused flag 1UL */
*/
#define EFX_EF10_FILTER_FLAG_BUSY 1UL
#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL #define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
#define EFX_EF10_FILTER_FLAGS 3UL #define EFX_EF10_FILTER_FLAGS 3UL
u64 handle; /* firmware handle */ u64 handle; /* firmware handle */
} *entry; } *entry;
wait_queue_head_t waitq;
/* Shadow of net_device address lists, guarded by mac_lock */ /* Shadow of net_device address lists, guarded by mac_lock */
struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX]; struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
@ -4302,26 +4300,33 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
struct efx_filter_spec *spec, struct efx_filter_spec *spec,
bool replace_equal) bool replace_equal)
{ {
struct efx_ef10_filter_table *table = efx->filter_state;
DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
struct efx_ef10_filter_table *table;
struct efx_filter_spec *saved_spec; struct efx_filter_spec *saved_spec;
struct efx_rss_context *ctx = NULL; struct efx_rss_context *ctx = NULL;
unsigned int match_pri, hash; unsigned int match_pri, hash;
unsigned int priv_flags; unsigned int priv_flags;
bool replacing = false; bool replacing = false;
unsigned int depth, i;
int ins_index = -1; int ins_index = -1;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
bool is_mc_recip; bool is_mc_recip;
s32 rc; s32 rc;
down_read(&efx->filter_sem);
table = efx->filter_state;
down_write(&table->lock);
/* For now, only support RX filters */ /* For now, only support RX filters */
if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) != if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
EFX_FILTER_FLAG_RX) EFX_FILTER_FLAG_RX) {
return -EINVAL; rc = -EINVAL;
goto out_unlock;
}
rc = efx_ef10_filter_pri(table, spec); rc = efx_ef10_filter_pri(table, spec);
if (rc < 0) if (rc < 0)
return rc; goto out_unlock;
match_pri = rc; match_pri = rc;
hash = efx_ef10_filter_hash(spec); hash = efx_ef10_filter_hash(spec);
@ -4335,86 +4340,64 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
&efx->rss_context.list); &efx->rss_context.list);
else else
ctx = &efx->rss_context; ctx = &efx->rss_context;
if (!ctx) if (!ctx) {
return -ENOENT; rc = -ENOENT;
if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) goto out_unlock;
return -EOPNOTSUPP; }
if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
rc = -EOPNOTSUPP;
goto out_unlock;
}
} }
/* Find any existing filters with the same match tuple or /* Find any existing filters with the same match tuple or
* else a free slot to insert at. If any of them are busy, * else a free slot to insert at.
* we have to wait and retry.
*/ */
for (;;) { for (depth = 1; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
unsigned int depth = 1; i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
unsigned int i; saved_spec = efx_ef10_filter_entry_spec(table, i);
spin_lock_bh(&efx->filter_lock); if (!saved_spec) {
if (ins_index < 0)
for (;;) { ins_index = i;
i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); } else if (efx_ef10_filter_equal(spec, saved_spec)) {
saved_spec = efx_ef10_filter_entry_spec(table, i); if (spec->priority < saved_spec->priority &&
spec->priority != EFX_FILTER_PRI_AUTO) {
if (!saved_spec) { rc = -EPERM;
goto out_unlock;
}
if (!is_mc_recip) {
/* This is the only one */
if (spec->priority ==
saved_spec->priority &&
!replace_equal) {
rc = -EEXIST;
goto out_unlock;
}
ins_index = i;
break;
} else if (spec->priority >
saved_spec->priority ||
(spec->priority ==
saved_spec->priority &&
replace_equal)) {
if (ins_index < 0) if (ins_index < 0)
ins_index = i; ins_index = i;
} else if (efx_ef10_filter_equal(spec, saved_spec)) { else
if (table->entry[i].spec & __set_bit(depth, mc_rem_map);
EFX_EF10_FILTER_FLAG_BUSY)
break;
if (spec->priority < saved_spec->priority &&
spec->priority != EFX_FILTER_PRI_AUTO) {
rc = -EPERM;
goto out_unlock;
}
if (!is_mc_recip) {
/* This is the only one */
if (spec->priority ==
saved_spec->priority &&
!replace_equal) {
rc = -EEXIST;
goto out_unlock;
}
ins_index = i;
goto found;
} else if (spec->priority >
saved_spec->priority ||
(spec->priority ==
saved_spec->priority &&
replace_equal)) {
if (ins_index < 0)
ins_index = i;
else
__set_bit(depth, mc_rem_map);
}
} }
/* Once we reach the maximum search depth, use
* the first suitable slot or return -EBUSY if
* there was none
*/
if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
if (ins_index < 0) {
rc = -EBUSY;
goto out_unlock;
}
goto found;
}
++depth;
} }
prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_bh(&efx->filter_lock);
schedule();
} }
found: /* Once we reach the maximum search depth, use the first suitable
/* Create a software table entry if necessary, and mark it * slot, or return -EBUSY if there was none
* busy. We might yet fail to insert, but any attempt to
* insert a conflicting filter while we're waiting for the
* firmware must find the busy entry.
*/ */
if (ins_index < 0) {
rc = -EBUSY;
goto out_unlock;
}
/* Create a software table entry if necessary. */
saved_spec = efx_ef10_filter_entry_spec(table, ins_index); saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
if (saved_spec) { if (saved_spec) {
if (spec->priority == EFX_FILTER_PRI_AUTO && if (spec->priority == EFX_FILTER_PRI_AUTO &&
@ -4438,28 +4421,13 @@ found:
*saved_spec = *spec; *saved_spec = *spec;
priv_flags = 0; priv_flags = 0;
} }
efx_ef10_filter_set_entry(table, ins_index, saved_spec, efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
/* Mark lower-priority multicast recipients busy prior to removal */
if (is_mc_recip) {
unsigned int depth, i;
for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
if (test_bit(depth, mc_rem_map))
table->entry[i].spec |=
EFX_EF10_FILTER_FLAG_BUSY;
}
}
spin_unlock_bh(&efx->filter_lock);
/* Actually insert the filter on the HW */
rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle, rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
ctx, replacing); ctx, replacing);
/* Finalise the software table entry */ /* Finalise the software table entry */
spin_lock_bh(&efx->filter_lock);
if (rc == 0) { if (rc == 0) {
if (replacing) { if (replacing) {
/* Update the fields that may differ */ /* Update the fields that may differ */
@ -4475,6 +4443,12 @@ found:
} else if (!replacing) { } else if (!replacing) {
kfree(saved_spec); kfree(saved_spec);
saved_spec = NULL; saved_spec = NULL;
} else {
/* We failed to replace, so the old filter is still present.
* Roll back the software table to reflect this. In fact the
* efx_ef10_filter_set_entry() call below will do the right
* thing, so nothing extra is needed here.
*/
} }
efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags); efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
@ -4496,7 +4470,6 @@ found:
priv_flags = efx_ef10_filter_entry_flags(table, i); priv_flags = efx_ef10_filter_entry_flags(table, i);
if (rc == 0) { if (rc == 0) {
spin_unlock_bh(&efx->filter_lock);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
@ -4504,15 +4477,12 @@ found:
rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
inbuf, sizeof(inbuf), inbuf, sizeof(inbuf),
NULL, 0, NULL); NULL, 0, NULL);
spin_lock_bh(&efx->filter_lock);
} }
if (rc == 0) { if (rc == 0) {
kfree(saved_spec); kfree(saved_spec);
saved_spec = NULL; saved_spec = NULL;
priv_flags = 0; priv_flags = 0;
} else {
priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
} }
efx_ef10_filter_set_entry(table, i, saved_spec, efx_ef10_filter_set_entry(table, i, saved_spec,
priv_flags); priv_flags);
@ -4523,10 +4493,9 @@ found:
if (rc == 0) if (rc == 0)
rc = efx_ef10_make_filter_id(match_pri, ins_index); rc = efx_ef10_make_filter_id(match_pri, ins_index);
wake_up_all(&table->waitq);
out_unlock: out_unlock:
spin_unlock_bh(&efx->filter_lock); up_write(&table->lock);
finish_wait(&table->waitq, &wait); up_read(&efx->filter_sem);
return rc; return rc;
} }
@ -4539,6 +4508,8 @@ static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
* If !by_index, remove by ID * If !by_index, remove by ID
* If by_index, remove by index * If by_index, remove by index
* Filter ID may come from userland and must be range-checked. * Filter ID may come from userland and must be range-checked.
* Caller must hold efx->filter_sem for read, and efx->filter_state->lock
* for write.
*/ */
static int efx_ef10_filter_remove_internal(struct efx_nic *efx, static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
unsigned int priority_mask, unsigned int priority_mask,
@ -4553,45 +4524,23 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
int rc; int rc;
/* Find the software table entry and mark it busy. Don't
* remove it yet; any attempt to update while we're waiting
* for the firmware must find the busy entry.
*/
for (;;) {
spin_lock_bh(&efx->filter_lock);
if (!(table->entry[filter_idx].spec &
EFX_EF10_FILTER_FLAG_BUSY))
break;
prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_bh(&efx->filter_lock);
schedule();
}
spec = efx_ef10_filter_entry_spec(table, filter_idx); spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (!spec || if (!spec ||
(!by_index && (!by_index &&
efx_ef10_filter_pri(table, spec) != efx_ef10_filter_pri(table, spec) !=
efx_ef10_filter_get_unsafe_pri(filter_id))) { efx_ef10_filter_get_unsafe_pri(filter_id)))
rc = -ENOENT; return -ENOENT;
goto out_unlock;
}
if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO && if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
priority_mask == (1U << EFX_FILTER_PRI_AUTO)) { priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
/* Just remove flags */ /* Just remove flags */
spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO; spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD; table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
rc = 0; return 0;
goto out_unlock;
} }
if (!(priority_mask & (1U << spec->priority))) { if (!(priority_mask & (1U << spec->priority)))
rc = -ENOENT; return -ENOENT;
goto out_unlock;
}
table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
spin_unlock_bh(&efx->filter_lock);
if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
/* Reset to an automatic filter */ /* Reset to an automatic filter */
@ -4609,7 +4558,6 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
&efx->rss_context, &efx->rss_context,
true); true);
spin_lock_bh(&efx->filter_lock);
if (rc == 0) if (rc == 0)
*spec = new_spec; *spec = new_spec;
} else { } else {
@ -4624,7 +4572,6 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP,
inbuf, sizeof(inbuf), NULL, 0, NULL); inbuf, sizeof(inbuf), NULL, 0, NULL);
spin_lock_bh(&efx->filter_lock);
if ((rc == 0) || (rc == -ENOENT)) { if ((rc == 0) || (rc == -ENOENT)) {
/* Filter removed OK or didn't actually exist */ /* Filter removed OK or didn't actually exist */
kfree(spec); kfree(spec);
@ -4636,11 +4583,6 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
} }
} }
table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
wake_up_all(&table->waitq);
out_unlock:
spin_unlock_bh(&efx->filter_lock);
finish_wait(&table->waitq, &wait);
return rc; return rc;
} }
@ -4648,17 +4590,33 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
enum efx_filter_priority priority, enum efx_filter_priority priority,
u32 filter_id) u32 filter_id)
{ {
return efx_ef10_filter_remove_internal(efx, 1U << priority, struct efx_ef10_filter_table *table;
filter_id, false); int rc;
down_read(&efx->filter_sem);
table = efx->filter_state;
down_write(&table->lock);
rc = efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
false);
up_write(&table->lock);
up_read(&efx->filter_sem);
return rc;
} }
/* Caller must hold efx->filter_sem for read */
static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx, static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
enum efx_filter_priority priority, enum efx_filter_priority priority,
u32 filter_id) u32 filter_id)
{ {
struct efx_ef10_filter_table *table = efx->filter_state;
if (filter_id == EFX_EF10_FILTER_ID_INVALID) if (filter_id == EFX_EF10_FILTER_ID_INVALID)
return; return;
efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, true);
down_write(&table->lock);
efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
true);
up_write(&table->lock);
} }
static int efx_ef10_filter_get_safe(struct efx_nic *efx, static int efx_ef10_filter_get_safe(struct efx_nic *efx,
@ -4666,11 +4624,13 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
u32 filter_id, struct efx_filter_spec *spec) u32 filter_id, struct efx_filter_spec *spec)
{ {
unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id); unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
struct efx_ef10_filter_table *table = efx->filter_state;
const struct efx_filter_spec *saved_spec; const struct efx_filter_spec *saved_spec;
struct efx_ef10_filter_table *table;
int rc; int rc;
spin_lock_bh(&efx->filter_lock); down_read(&efx->filter_sem);
table = efx->filter_state;
down_read(&table->lock);
saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (saved_spec && saved_spec->priority == priority && if (saved_spec && saved_spec->priority == priority &&
efx_ef10_filter_pri(table, saved_spec) == efx_ef10_filter_pri(table, saved_spec) ==
@ -4680,13 +4640,15 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
} else { } else {
rc = -ENOENT; rc = -ENOENT;
} }
spin_unlock_bh(&efx->filter_lock); up_read(&table->lock);
up_read(&efx->filter_sem);
return rc; return rc;
} }
static int efx_ef10_filter_clear_rx(struct efx_nic *efx, static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority) enum efx_filter_priority priority)
{ {
struct efx_ef10_filter_table *table;
unsigned int priority_mask; unsigned int priority_mask;
unsigned int i; unsigned int i;
int rc; int rc;
@ -4694,31 +4656,40 @@ static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
priority_mask = (((1U << (priority + 1)) - 1) & priority_mask = (((1U << (priority + 1)) - 1) &
~(1U << EFX_FILTER_PRI_AUTO)); ~(1U << EFX_FILTER_PRI_AUTO));
down_read(&efx->filter_sem);
table = efx->filter_state;
down_write(&table->lock);
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
rc = efx_ef10_filter_remove_internal(efx, priority_mask, rc = efx_ef10_filter_remove_internal(efx, priority_mask,
i, true); i, true);
if (rc && rc != -ENOENT) if (rc && rc != -ENOENT)
return rc; break;
rc = 0;
} }
return 0; up_write(&table->lock);
up_read(&efx->filter_sem);
return rc;
} }
static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx, static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority) enum efx_filter_priority priority)
{ {
struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_filter_table *table;
unsigned int filter_idx; unsigned int filter_idx;
s32 count = 0; s32 count = 0;
spin_lock_bh(&efx->filter_lock); down_read(&efx->filter_sem);
table = efx->filter_state;
down_read(&table->lock);
for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
if (table->entry[filter_idx].spec && if (table->entry[filter_idx].spec &&
efx_ef10_filter_entry_spec(table, filter_idx)->priority == efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
priority) priority)
++count; ++count;
} }
spin_unlock_bh(&efx->filter_lock); up_read(&table->lock);
up_read(&efx->filter_sem);
return count; return count;
} }
@ -4733,12 +4704,15 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
enum efx_filter_priority priority, enum efx_filter_priority priority,
u32 *buf, u32 size) u32 *buf, u32 size)
{ {
struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_filter_table *table;
struct efx_filter_spec *spec; struct efx_filter_spec *spec;
unsigned int filter_idx; unsigned int filter_idx;
s32 count = 0; s32 count = 0;
spin_lock_bh(&efx->filter_lock); down_read(&efx->filter_sem);
table = efx->filter_state;
down_read(&table->lock);
for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
spec = efx_ef10_filter_entry_spec(table, filter_idx); spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (spec && spec->priority == priority) { if (spec && spec->priority == priority) {
@ -4752,73 +4726,44 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
filter_idx); filter_idx);
} }
} }
spin_unlock_bh(&efx->filter_lock); up_read(&table->lock);
up_read(&efx->filter_sem);
return count; return count;
} }
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
static void
efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
unsigned long filter_idx,
int rc, efx_dword_t *outbuf,
size_t outlen_actual);
static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
unsigned int filter_idx) unsigned int filter_idx)
{ {
struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_filter_table *table;
struct efx_filter_spec *spec; struct efx_filter_spec *spec;
MCDI_DECLARE_BUF(inbuf, bool ret;
MC_CMD_FILTER_OP_IN_HANDLE_OFST +
MC_CMD_FILTER_OP_IN_HANDLE_LEN);
bool ret = true;
spin_lock_bh(&efx->filter_lock); down_read(&efx->filter_sem);
table = efx->filter_state;
down_write(&table->lock);
spec = efx_ef10_filter_entry_spec(table, filter_idx); spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (!spec ||
(table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) || if (!spec || spec->priority != EFX_FILTER_PRI_HINT) {
spec->priority != EFX_FILTER_PRI_HINT || ret = true;
!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, goto out_unlock;
}
if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
flow_id, filter_idx)) { flow_id, filter_idx)) {
ret = false; ret = false;
goto out_unlock; goto out_unlock;
} }
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
MC_CMD_FILTER_OP_IN_OP_REMOVE); filter_idx, true) == 0;
MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
table->entry[filter_idx].handle);
if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
efx_ef10_filter_rfs_expire_complete, filter_idx))
ret = false;
else
table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
out_unlock: out_unlock:
spin_unlock_bh(&efx->filter_lock); up_write(&table->lock);
up_read(&efx->filter_sem);
return ret; return ret;
} }
static void
efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
unsigned long filter_idx,
int rc, efx_dword_t *outbuf,
size_t outlen_actual)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_filter_spec *spec =
efx_ef10_filter_entry_spec(table, filter_idx);
spin_lock_bh(&efx->filter_lock);
if (rc == 0) {
kfree(spec);
efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
}
table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
wake_up_all(&table->waitq);
spin_unlock_bh(&efx->filter_lock);
}
#endif /* CONFIG_RFS_ACCEL */ #endif /* CONFIG_RFS_ACCEL */
static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags) static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags)
@ -5011,9 +4956,9 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
table->vlan_filter = table->vlan_filter =
!!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER); !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
INIT_LIST_HEAD(&table->vlan_list); INIT_LIST_HEAD(&table->vlan_list);
init_rwsem(&table->lock);
efx->filter_state = table; efx->filter_state = table;
init_waitqueue_head(&table->waitq);
list_for_each_entry(vlan, &nic_data->vlan_list, list) { list_for_each_entry(vlan, &nic_data->vlan_list, list) {
rc = efx_ef10_filter_add_vlan(efx, vlan->vid); rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
@ -5055,7 +5000,7 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
if (!table) if (!table)
return; return;
spin_lock_bh(&efx->filter_lock); down_write(&table->lock);
for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
spec = efx_ef10_filter_entry_spec(table, filter_idx); spec = efx_ef10_filter_entry_spec(table, filter_idx);
@ -5093,15 +5038,11 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
} }
} }
table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
spin_unlock_bh(&efx->filter_lock);
rc = efx_ef10_filter_push(efx, spec, rc = efx_ef10_filter_push(efx, spec,
&table->entry[filter_idx].handle, &table->entry[filter_idx].handle,
ctx, false); ctx, false);
if (rc) if (rc)
failed++; failed++;
spin_lock_bh(&efx->filter_lock);
if (rc) { if (rc) {
not_restored: not_restored:
@ -5113,13 +5054,10 @@ not_restored:
kfree(spec); kfree(spec);
efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
} else {
table->entry[filter_idx].spec &=
~EFX_EF10_FILTER_FLAG_BUSY;
} }
} }
spin_unlock_bh(&efx->filter_lock); up_write(&table->lock);
/* This can happen validly if the MC's capabilities have changed, so /* This can happen validly if the MC's capabilities have changed, so
* is not an error. * is not an error.
@ -5187,6 +5125,8 @@ static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_filter_table *table = efx->filter_state;
unsigned int filter_idx; unsigned int filter_idx;
efx_rwsem_assert_write_locked(&table->lock);
if (*id != EFX_EF10_FILTER_ID_INVALID) { if (*id != EFX_EF10_FILTER_ID_INVALID) {
filter_idx = efx_ef10_filter_get_unsafe_id(*id); filter_idx = efx_ef10_filter_get_unsafe_id(*id);
if (!table->entry[filter_idx].spec) if (!table->entry[filter_idx].spec)
@ -5222,10 +5162,10 @@ static void efx_ef10_filter_mark_old(struct efx_nic *efx)
struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_filter_vlan *vlan; struct efx_ef10_filter_vlan *vlan;
spin_lock_bh(&efx->filter_lock); down_write(&table->lock);
list_for_each_entry(vlan, &table->vlan_list, list) list_for_each_entry(vlan, &table->vlan_list, list)
_efx_ef10_filter_vlan_mark_old(efx, vlan); _efx_ef10_filter_vlan_mark_old(efx, vlan);
spin_unlock_bh(&efx->filter_lock); up_write(&table->lock);
} }
static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx) static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
@ -5502,10 +5442,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
return rc; return rc;
} }
/* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD /* Remove filters that weren't renewed. */
* flag or removes these filters, we don't need to hold the filter_lock while
* scanning for these filters.
*/
static void efx_ef10_filter_remove_old(struct efx_nic *efx) static void efx_ef10_filter_remove_old(struct efx_nic *efx)
{ {
struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_filter_table *table = efx->filter_state;
@ -5514,6 +5451,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx)
int rc; int rc;
int i; int i;
down_write(&table->lock);
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
if (READ_ONCE(table->entry[i].spec) & if (READ_ONCE(table->entry[i].spec) &
EFX_EF10_FILTER_FLAG_AUTO_OLD) { EFX_EF10_FILTER_FLAG_AUTO_OLD) {
@ -5525,6 +5463,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx)
remove_failed++; remove_failed++;
} }
} }
up_write(&table->lock);
if (remove_failed) if (remove_failed)
netif_info(efx, drv, efx->net_dev, netif_info(efx, drv, efx->net_dev,

View File

@ -1783,7 +1783,6 @@ static int efx_probe_filters(struct efx_nic *efx)
{ {
int rc; int rc;
spin_lock_init(&efx->filter_lock);
init_rwsem(&efx->filter_sem); init_rwsem(&efx->filter_sem);
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem); down_write(&efx->filter_sem);

View File

@ -843,7 +843,7 @@ struct efx_rss_context {
* @loopback_mode: Loopback status * @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask * @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state * @loopback_selftest: Offline self-test private state
* @filter_sem: Filter table rw_semaphore, for freeing the table * @filter_sem: Filter table rw_semaphore, protects existence of @filter_state
* @filter_lock: Filter table lock, for mere content changes * @filter_lock: Filter table lock, for mere content changes
* @filter_state: Architecture-dependent filter table state * @filter_state: Architecture-dependent filter table state
* @rps_mutex: Protects RPS state of all channels * @rps_mutex: Protects RPS state of all channels