iwlwifi: add a W/A for a scheduler hardware bug

In case we need to move the scheduler write pointer by
steps of 0x40, 0x80 or 0xc0, the scheduler gets stuck.
This leads to hardware error interrupts with status:
0x5A5A5A5A or alike.

In order to work around this, detect in the transport
layer that we are going to hit this case and tell iwlmvm
to increment the sequence number of the packets. This
allows to keep the requirement that the WiFi sequence
number is in sync with the index in the scheduler Tx queue
and it also allows to avoid the problematic sequence.
This means that from time to time, we will start a queue
from ssn + 1, but that shouldn't be a problem since we
don't switch to new queues for AMPDU now that we have
DQA which allows to keep the same queue while toggling
the AMPDU state.

This bug has been fixed on 9000 devices and up.

Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
This commit is contained in:
Emmanuel Grumbach 2017-05-07 15:00:31 +03:00 committed by Luca Coelho
parent 59df97f722
commit dcfbd67b4b
6 changed files with 71 additions and 33 deletions

View File

@ -482,7 +482,9 @@ struct iwl_trans_txq_scd_cfg {
* iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
* this one. The op_mode must not configure the HCMD queue. The scheduler
* configuration may be %NULL, in which case the hardware will not be
* configured. May sleep.
* configured. If true is returned, the operation mode needs to increment
* the sequence number of the packets routed to this queue because of a
* hardware scheduler bug. May sleep.
* @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic
* @txq_set_shared_mode: change Tx queue shared/unshared marking
@ -544,7 +546,7 @@ struct iwl_trans_ops {
void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs);
void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int queue_wdg_timeout);
void (*txq_disable)(struct iwl_trans *trans, int queue,
@ -952,7 +954,7 @@ static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
trans->ops->txq_disable(trans, queue, configure_scd);
}
static inline void
static inline bool
iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int queue_wdg_timeout)
@ -961,10 +963,11 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return;
return false;
}
trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
return trans->ops->txq_enable(trans, queue, ssn,
cfg, queue_wdg_timeout);
}
static inline void

View File

@ -1744,7 +1744,7 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
}
/* hw scheduler queue config */
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout);
int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,

View File

@ -758,7 +758,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
bool using_inactive_queue = false, same_sta = false;
unsigned long disable_agg_tids = 0;
enum iwl_mvm_agg_state queue_state;
bool shared_queue = false;
bool shared_queue = false, inc_ssn;
int ssn;
unsigned long tfd_queue_mask;
int ret;
@ -885,8 +885,12 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
}
ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
wdg_timeout);
inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
ssn, &cfg, wdg_timeout);
if (inc_ssn) {
ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
le16_add_cpu(&hdr->seq_ctrl, 0x10);
}
/*
* Mark queue as shared in transport if shared
@ -898,6 +902,13 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
spin_lock_bh(&mvmsta->lock);
/*
* This looks racy, but it is not. We have only one packet for
* this ra/tid in our Tx path since we stop the Qdisc when we
* need to allocate a new TFD queue.
*/
if (inc_ssn)
mvmsta->tid_data[tid].seq_number += 0x10;
mvmsta->tid_data[tid].txq_id = queue;
mvmsta->tid_data[tid].is_tid_active = true;
mvmsta->tfd_queue_msk |= BIT(queue);

View File

@ -757,35 +757,39 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
return queue;
}
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.action = SCD_CFG_ENABLE_QUEUE,
.window = cfg->frame_limit,
.sta_id = cfg->sta_id,
.ssn = cpu_to_le16(ssn),
.tx_fifo = cfg->fifo,
.aggregate = cfg->aggregate,
.tid = cfg->tid,
};
bool inc_ssn;
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return;
return false;
/* Send the enabling command if we need to */
if (iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
cfg->sta_id, cfg->tid)) {
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.action = SCD_CFG_ENABLE_QUEUE,
.window = cfg->frame_limit,
.sta_id = cfg->sta_id,
.ssn = cpu_to_le16(ssn),
.tx_fifo = cfg->fifo,
.aggregate = cfg->aggregate,
.tid = cfg->tid,
};
if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
cfg->sta_id, cfg->tid))
return false;
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
wdg_timeout);
WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
sizeof(struct iwl_scd_txq_cfg_cmd),
&cmd),
"Failed to configure queue %d on FIFO %d\n", queue,
cfg->fifo);
}
inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
NULL, wdg_timeout);
if (inc_ssn)
le16_add_cpu(&cmd.ssn, 1);
WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
"Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
return inc_ssn;
}
int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,

View File

@ -516,7 +516,7 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans);
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
void iwl_pcie_tx_free(struct iwl_trans *trans);
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout);
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,

View File

@ -1277,13 +1277,14 @@ static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
* combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[txq_id];
int fifo = -1;
bool scd_bug = false;
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
@ -1324,6 +1325,23 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
ssn = txq->read_ptr;
}
} else {
/*
* If we need to move the SCD write pointer by steps of
* 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let
* the op_mode know by returning true later.
* Do this only in case cfg is NULL since this trick can
* be done only if we have DQA enabled which is true for mvm
* only. And mvm never sets a cfg pointer.
* This is really ugly, but this is the easiest way out for
* this sad hardware issue.
* This bug has been fixed on devices 9000 and up.
*/
scd_bug = !trans->cfg->mq_rx_supported &&
!((ssn - txq->write_ptr) & 0x3f) &&
(ssn != txq->write_ptr);
if (scd_bug)
ssn++;
}
/* Place first TFD at index corresponding to start sequence number.
@ -1367,6 +1385,8 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
"Activate queue %d WrPtr: %d\n",
txq_id, ssn & 0xff);
}
return scd_bug;
}
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,