ice: split ice_aq_wait_for_event() func into two
Mitigate race between registering on wait list and receiving AQ Response from FW. ice_aq_prep_for_event() should be called before sending AQ command, ice_aq_wait_for_event() should be called after sending AQ command, to wait for AQ Response. Please note, that this was found by reading the code, an actual race has not yet materialized. Reviewed-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: Przemek Kitszel <przemyslaw.kitszel@intel.com> Reviewed-by: Simon Horman <horms@kernel.org> Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel) Reviewed-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
This commit is contained in:
parent
b214b98a7f
commit
fb9840c4ec
|
@ -919,6 +919,7 @@ void ice_fdir_replay_fltrs(struct ice_pf *pf);
|
|||
int ice_fdir_create_dflt_rules(struct ice_pf *pf);
|
||||
|
||||
enum ice_aq_task_state {
|
||||
ICE_AQ_TASK_NOT_PREPARED,
|
||||
ICE_AQ_TASK_WAITING,
|
||||
ICE_AQ_TASK_COMPLETE,
|
||||
ICE_AQ_TASK_CANCELED,
|
||||
|
@ -931,8 +932,10 @@ struct ice_aq_task {
|
|||
u16 opcode;
|
||||
};
|
||||
|
||||
void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
|
||||
u16 opcode);
|
||||
int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
|
||||
u16 opcode, unsigned long timeout);
|
||||
unsigned long timeout);
|
||||
int ice_open(struct net_device *netdev);
|
||||
int ice_open_internal(struct net_device *netdev);
|
||||
int ice_stop(struct net_device *netdev);
|
||||
|
|
|
@ -302,6 +302,8 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
|
|||
dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n",
|
||||
block_size, module, offset);
|
||||
|
||||
ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_write);
|
||||
|
||||
err = ice_aq_update_nvm(hw, module, offset, block_size, block,
|
||||
last_cmd, 0, NULL);
|
||||
if (err) {
|
||||
|
@ -318,7 +320,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
|
|||
* is conservative and is intended to prevent failure to update when
|
||||
* firmware is slow to respond.
|
||||
*/
|
||||
err = ice_aq_wait_for_event(pf, &task, ice_aqc_opc_nvm_write, 15 * HZ);
|
||||
err = ice_aq_wait_for_event(pf, &task, 15 * HZ);
|
||||
if (err) {
|
||||
dev_err(dev, "Timed out while trying to flash module 0x%02x with block of size %u at offset %u, err %d\n",
|
||||
module, block_size, offset, err);
|
||||
|
@ -491,6 +493,8 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
|
|||
|
||||
devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT);
|
||||
|
||||
ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_erase);
|
||||
|
||||
err = ice_aq_erase_nvm(hw, module, NULL);
|
||||
if (err) {
|
||||
dev_err(dev, "Failed to erase %s (module 0x%02x), err %d aq_err %s\n",
|
||||
|
@ -501,7 +505,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
|
|||
goto out_notify_devlink;
|
||||
}
|
||||
|
||||
err = ice_aq_wait_for_event(pf, &task, ice_aqc_opc_nvm_erase, ICE_FW_ERASE_TIMEOUT * HZ);
|
||||
err = ice_aq_wait_for_event(pf, &task, ICE_FW_ERASE_TIMEOUT * HZ);
|
||||
if (err) {
|
||||
dev_err(dev, "Timed out waiting for firmware to respond with erase completion for %s (module 0x%02x), err %d\n",
|
||||
component, module, err);
|
||||
|
@ -566,6 +570,8 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
|
|||
u8 response_flags;
|
||||
int err;
|
||||
|
||||
ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_write_activate);
|
||||
|
||||
err = ice_nvm_write_activate(hw, activate_flags, &response_flags);
|
||||
if (err) {
|
||||
dev_err(dev, "Failed to switch active flash banks, err %d aq_err %s\n",
|
||||
|
@ -590,8 +596,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
|
|||
}
|
||||
}
|
||||
|
||||
err = ice_aq_wait_for_event(pf, &task, ice_aqc_opc_nvm_write_activate,
|
||||
30 * HZ);
|
||||
err = ice_aq_wait_for_event(pf, &task, 30 * HZ);
|
||||
if (err) {
|
||||
dev_err(dev, "Timed out waiting for firmware to switch active flash banks, err %d\n",
|
||||
err);
|
||||
|
|
|
@ -1251,30 +1251,24 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
|
|||
}
|
||||
|
||||
/**
|
||||
* ice_aq_wait_for_event - Wait for an AdminQ event from firmware
|
||||
* ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
|
||||
* @pf: pointer to the PF private structure
|
||||
* @task: ptr to task structure
|
||||
* @task: intermediate helper storage and identifier for waiting
|
||||
* @opcode: the opcode to wait for
|
||||
* @timeout: how long to wait, in jiffies
|
||||
*
|
||||
* Waits for a specific AdminQ completion event on the ARQ for a given PF. The
|
||||
* current thread will be put to sleep until the specified event occurs or
|
||||
* until the given timeout is reached.
|
||||
* Prepares to wait for a specific AdminQ completion event on the ARQ for
|
||||
* a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
|
||||
*
|
||||
* To obtain only the descriptor contents, pass an event without an allocated
|
||||
* Calls are separated to allow caller registering for event before sending
|
||||
* the command, which mitigates a race between registering and FW responding.
|
||||
*
|
||||
* To obtain only the descriptor contents, pass an task->event with null
|
||||
* msg_buf. If the complete data buffer is desired, allocate the
|
||||
* event->msg_buf with enough space ahead of time.
|
||||
*
|
||||
* Returns: zero on success, or a negative error code on failure.
|
||||
* task->event.msg_buf with enough space ahead of time.
|
||||
*/
|
||||
int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
|
||||
u16 opcode, unsigned long timeout)
|
||||
void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
|
||||
u16 opcode)
|
||||
{
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
unsigned long start;
|
||||
long ret;
|
||||
int err;
|
||||
|
||||
INIT_HLIST_NODE(&task->entry);
|
||||
task->opcode = opcode;
|
||||
task->state = ICE_AQ_TASK_WAITING;
|
||||
|
@ -1282,12 +1276,37 @@ int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
|
|||
spin_lock_bh(&pf->aq_wait_lock);
|
||||
hlist_add_head(&task->entry, &pf->aq_wait_list);
|
||||
spin_unlock_bh(&pf->aq_wait_lock);
|
||||
}
|
||||
|
||||
start = jiffies;
|
||||
/**
|
||||
* ice_aq_wait_for_event - Wait for an AdminQ event from firmware
|
||||
* @pf: pointer to the PF private structure
|
||||
* @task: ptr prepared by ice_aq_prep_for_event()
|
||||
* @timeout: how long to wait, in jiffies
|
||||
*
|
||||
* Waits for a specific AdminQ completion event on the ARQ for a given PF. The
|
||||
* current thread will be put to sleep until the specified event occurs or
|
||||
* until the given timeout is reached.
|
||||
*
|
||||
* Returns: zero on success, or a negative error code on failure.
|
||||
*/
|
||||
int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
|
||||
unsigned long timeout)
|
||||
{
|
||||
enum ice_aq_task_state *state = &task->state;
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
unsigned long start = jiffies;
|
||||
long ret;
|
||||
int err;
|
||||
|
||||
ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
|
||||
ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
|
||||
*state != ICE_AQ_TASK_WAITING,
|
||||
timeout);
|
||||
switch (task->state) {
|
||||
switch (*state) {
|
||||
case ICE_AQ_TASK_NOT_PREPARED:
|
||||
WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
case ICE_AQ_TASK_WAITING:
|
||||
err = ret < 0 ? ret : -ETIMEDOUT;
|
||||
break;
|
||||
|
@ -1298,7 +1317,7 @@ int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
|
|||
err = ret < 0 ? ret : 0;
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Unexpected AdminQ wait task state %u", task->state);
|
||||
WARN(1, "Unexpected AdminQ wait task state %u", *state);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
@ -1306,7 +1325,7 @@ int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
|
|||
dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
|
||||
jiffies_to_msecs(jiffies - start),
|
||||
jiffies_to_msecs(timeout),
|
||||
opcode);
|
||||
task->opcode);
|
||||
|
||||
spin_lock_bh(&pf->aq_wait_lock);
|
||||
hlist_del(&task->entry);
|
||||
|
@ -1342,7 +1361,9 @@ static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
|
|||
|
||||
spin_lock_bh(&pf->aq_wait_lock);
|
||||
hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
|
||||
if (task->state || task->opcode != opcode)
|
||||
if (task->state != ICE_AQ_TASK_WAITING)
|
||||
continue;
|
||||
if (task->opcode != opcode)
|
||||
continue;
|
||||
|
||||
task_ev = &task->event;
|
||||
|
|
Loading…
Reference in New Issue