ice: add flag to distinguish reset from .ndo_bpf in XDP rings config
[ Upstream commit 744d197162c2070a6045a71e2666ed93a57cc65d ] Commit6624e780a5
("ice: split ice_vsi_setup into smaller functions") has placed ice_vsi_free_q_vectors() after ice_destroy_xdp_rings() in the rebuild process. The behaviour of the XDP rings config functions is context-dependent, so the change of order has led to ice_destroy_xdp_rings() doing additional work and removing XDP prog, when it was supposed to be preserved. Also, dependency on the PF state reset flags creates an additional, fortunately less common problem: * PFR is requested e.g. by tx_timeout handler * .ndo_bpf() is asked to delete the program, calls ice_destroy_xdp_rings(), but reset flag is set, so rings are destroyed without deleting the program * ice_vsi_rebuild tries to delete non-existent XDP rings, because the program is still on the VSI * system crashes With a similar race, when requested to attach a program, ice_prepare_xdp_rings() can actually skip setting the program in the VSI and nevertheless report success. Instead of reverting to the old order of function calls, add an enum argument to both ice_prepare_xdp_rings() and ice_destroy_xdp_rings() in order to distinguish between calls from rebuild and .ndo_bpf(). Fixes:efc2214b60
("ice: Add support for XDP") Reviewed-by: Igor Bagnucki <igor.bagnucki@intel.com> Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com> Reviewed-by: Simon Horman <horms@kernel.org> Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Link: https://lore.kernel.org/r/20240603-net-2024-05-30-intel-net-fixes-v2-4-e3563aa89b0c@intel.com Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
eab834acb4
commit
649b63f5da
|
@ -892,9 +892,16 @@ int ice_down(struct ice_vsi *vsi);
|
|||
int ice_down_up(struct ice_vsi *vsi);
|
||||
int ice_vsi_cfg_lan(struct ice_vsi *vsi);
|
||||
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
|
||||
|
||||
enum ice_xdp_cfg {
|
||||
ICE_XDP_CFG_FULL, /* Fully apply new config in .ndo_bpf() */
|
||||
ICE_XDP_CFG_PART, /* Save/use part of config in VSI rebuild */
|
||||
};
|
||||
|
||||
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
|
||||
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
|
||||
int ice_destroy_xdp_rings(struct ice_vsi *vsi);
|
||||
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
|
||||
enum ice_xdp_cfg cfg_type);
|
||||
int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
|
||||
int
|
||||
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||
u32 flags);
|
||||
|
|
|
@ -2462,7 +2462,8 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
|
|||
ret = ice_vsi_determine_xdp_res(vsi);
|
||||
if (ret)
|
||||
goto unroll_vector_base;
|
||||
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
|
||||
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
|
||||
ICE_XDP_CFG_PART);
|
||||
if (ret)
|
||||
goto unroll_vector_base;
|
||||
}
|
||||
|
@ -2613,7 +2614,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
|
|||
/* return value check can be skipped here, it always returns
|
||||
* 0 if reset is in progress
|
||||
*/
|
||||
ice_destroy_xdp_rings(vsi);
|
||||
ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
|
||||
|
||||
ice_vsi_clear_rings(vsi);
|
||||
ice_vsi_free_q_vectors(vsi);
|
||||
|
|
|
@ -2657,10 +2657,12 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
|
|||
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
|
||||
* @vsi: VSI to bring up Tx rings used by XDP
|
||||
* @prog: bpf program that will be assigned to VSI
|
||||
* @cfg_type: create from scratch or restore the existing configuration
|
||||
*
|
||||
* Return 0 on success and negative value on error
|
||||
*/
|
||||
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
|
||||
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
|
||||
enum ice_xdp_cfg cfg_type)
|
||||
{
|
||||
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
|
||||
int xdp_rings_rem = vsi->num_xdp_txq;
|
||||
|
@ -2736,7 +2738,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
|
|||
* taken into account at the end of ice_vsi_rebuild, where
|
||||
* ice_cfg_vsi_lan is being called
|
||||
*/
|
||||
if (ice_is_reset_in_progress(pf->state))
|
||||
if (cfg_type == ICE_XDP_CFG_PART)
|
||||
return 0;
|
||||
|
||||
/* tell the Tx scheduler that right now we have
|
||||
|
@ -2788,22 +2790,21 @@ err_map_xdp:
|
|||
/**
|
||||
* ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
|
||||
* @vsi: VSI to remove XDP rings
|
||||
* @cfg_type: disable XDP permanently or allow it to be restored later
|
||||
*
|
||||
* Detach XDP rings from irq vectors, clean up the PF bitmap and free
|
||||
* resources
|
||||
*/
|
||||
int ice_destroy_xdp_rings(struct ice_vsi *vsi)
|
||||
int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
|
||||
{
|
||||
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
|
||||
struct ice_pf *pf = vsi->back;
|
||||
int i, v_idx;
|
||||
|
||||
/* q_vectors are freed in reset path so there's no point in detaching
|
||||
* rings; in case of rebuild being triggered not from reset bits
|
||||
* in pf->state won't be set, so additionally check first q_vector
|
||||
* against NULL
|
||||
* rings
|
||||
*/
|
||||
if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
|
||||
if (cfg_type == ICE_XDP_CFG_PART)
|
||||
goto free_qmap;
|
||||
|
||||
ice_for_each_q_vector(vsi, v_idx) {
|
||||
|
@ -2844,7 +2845,7 @@ free_qmap:
|
|||
if (static_key_enabled(&ice_xdp_locking_key))
|
||||
static_branch_dec(&ice_xdp_locking_key);
|
||||
|
||||
if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
|
||||
if (cfg_type == ICE_XDP_CFG_PART)
|
||||
return 0;
|
||||
|
||||
ice_vsi_assign_bpf_prog(vsi, NULL);
|
||||
|
@ -2955,7 +2956,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
|
|||
if (xdp_ring_err) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
|
||||
} else {
|
||||
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
|
||||
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
|
||||
ICE_XDP_CFG_FULL);
|
||||
if (xdp_ring_err)
|
||||
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
|
||||
}
|
||||
|
@ -2966,7 +2968,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
|
|||
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
|
||||
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
|
||||
xdp_features_clear_redirect_target(vsi->netdev);
|
||||
xdp_ring_err = ice_destroy_xdp_rings(vsi);
|
||||
xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
|
||||
if (xdp_ring_err)
|
||||
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
|
||||
/* reallocate Rx queues that were used for zero-copy */
|
||||
|
|
Loading…
Reference in New Issue