First set of fixes for 4.14
* A couple of bugzilla bugs related to multicast handling; * Two fixes for WoWLAN bugs that were causing queue hangs and re-initialization problems; * Two fixes for potential uninitialized variable use reported by Dan Carpenter in relation to a recently introduced patch; * A fix for buffer reordering in the newly supported 9000 device family; * Fix a race when starting aggregation; * Small fix for a recent patch to wake mac80211 queues; * Send non-bufferable management frames in the generic queue so they are not sent on queues that are under power-save; -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEF3LNfgb2BPWm68smoUecoho8xfoFAlm7u44ACgkQoUecoho8 xfqOPw//TF8p9ZqXknYahiaSM8pQFSXbnbPCa8IFnZs4yTV7f6htQ50bRjafFsbV nAUAXaJI3901QyP96yHKX09IYJ6QMFxiCiB2uFmwUdVw+WFRPd9fJMMIFYKKIQTp 2v3NA2odBrDzWZRp7HAipo6xQTkAEBqubrJqWMGLKoQUcMm7ocVLAazCj66dxbLd onyunys2+8DU2krGdDTZA8vlcXWSWvMtDzvsB3yfB4Do0rG+jQj5CDeB6ou7VKtq SvVr75qCc/4YHHCxWEU380Usdz36pgW22BVGp8OoAX1SRR26zpeT8g2FwiUIK8R/ 0+vSmbOFnAkXDMezNGQ74tdTB/AoT43kymEq2fCxll7gPMLabuYkZhSIBgIj6rZs KCcPcazP93CofTMpG9G/cLphsvGgB0WlYwQhcuUts3+goLKApvpbsKThWRPjsBW4 0VbH/TTDddaMEuDvm+I1wL49GiF1l2uLlzjuol9W2oM1MXnhFn/QXuntjrKKRwP0 XiLx07zjaS21jZaIJjRDf7LB02V/47YbJQ83p+kbDccKLhUOtSnbXmZsDfPRLWV+ klQF2kRusuZbVUrIvExKjJlloz1ydk6MnTP4W31kEyqtNjr1m92oKRvx8vHIZ+A5 id6KWed8DZU1VM4yFQSt/4LltN7ewdjlRaSOO0WO/C51iHV/vsA= =DM5q -----END PGP SIGNATURE----- Merge tag 'iwlwifi-for-kalle-2017-09-15' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-fixes First set of fixes for 4.14 * A couple of bugzilla bugs related to multicast handling; * Two fixes for WoWLAN bugs that were causing queue hangs and re-initialization problems; * Two fixes for potential uninitialized variable use reported by Dan Carpenter in relation to a recently introduced patch; * A fix for buffer reordering in the newly supported 9000 device family; * Fix a race when starting aggregation; * Small fix for a recent patch to wake mac80211 queues; * Send non-bufferable management frames in the generic queue so they are not sent on queues that are under power-save;
This commit is contained in:
commit
4c707c04f6
|
@ -2167,7 +2167,7 @@ out:
|
|||
* 1. We are not using a unified image
|
||||
* 2. We are using a unified image but had an error while exiting D3
|
||||
*/
|
||||
set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
|
||||
set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
|
||||
set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
|
||||
/*
|
||||
* When switching images we return 1, which causes mac80211
|
||||
|
|
|
@ -1546,6 +1546,11 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
|
|||
struct iwl_mvm_mc_iter_data *data = _data;
|
||||
struct iwl_mvm *mvm = data->mvm;
|
||||
struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = MCAST_FILTER_CMD,
|
||||
.flags = CMD_ASYNC,
|
||||
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
|
||||
};
|
||||
int ret, len;
|
||||
|
||||
/* if we don't have free ports, mcast frames will be dropped */
|
||||
|
@ -1560,7 +1565,10 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
|
|||
memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
|
||||
len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
|
||||
hcmd.len[0] = len;
|
||||
hcmd.data[0] = cmd;
|
||||
|
||||
ret = iwl_mvm_send_cmd(mvm, &hcmd);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
|
||||
}
|
||||
|
@ -1635,6 +1643,12 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
|
|||
if (!cmd)
|
||||
goto out;
|
||||
|
||||
if (changed_flags & FIF_ALLMULTI)
|
||||
cmd->pass_all = !!(*total_flags & FIF_ALLMULTI);
|
||||
|
||||
if (cmd->pass_all)
|
||||
cmd->count = 0;
|
||||
|
||||
iwl_mvm_recalc_multicast(mvm);
|
||||
out:
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
@ -2563,7 +2577,7 @@ static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
|
|||
* queues, so we should never get a second deferred
|
||||
* frame for the RA/TID.
|
||||
*/
|
||||
iwl_mvm_start_mac_queues(mvm, info->hw_queue);
|
||||
iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
|
||||
ieee80211_free_txskb(mvm->hw, skb);
|
||||
}
|
||||
}
|
||||
|
@ -3975,6 +3989,43 @@ out_unlock:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
|
||||
{
|
||||
if (drop) {
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
/* TODO new tx api */
|
||||
WARN_ONCE(1,
|
||||
"Need to implement flush TX queue\n");
|
||||
else
|
||||
iwl_mvm_flush_tx_path(mvm,
|
||||
iwl_mvm_flushable_queues(mvm) & queues,
|
||||
0);
|
||||
} else {
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
struct ieee80211_sta *sta;
|
||||
int i;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
|
||||
sta = rcu_dereference_protected(
|
||||
mvm->fw_id_to_mac_id[i],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
if (IS_ERR_OR_NULL(sta))
|
||||
continue;
|
||||
|
||||
iwl_mvm_wait_sta_queues_empty(mvm,
|
||||
iwl_mvm_sta_from_mac80211(sta));
|
||||
}
|
||||
|
||||
mutex_unlock(&mvm->mutex);
|
||||
} else {
|
||||
iwl_trans_wait_tx_queues_empty(mvm->trans,
|
||||
queues);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif, u32 queues, bool drop)
|
||||
{
|
||||
|
@ -3985,7 +4036,12 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
|
|||
int i;
|
||||
u32 msk = 0;
|
||||
|
||||
if (!vif || vif->type != NL80211_IFTYPE_STATION)
|
||||
if (!vif) {
|
||||
iwl_mvm_flush_no_vif(mvm, queues, drop);
|
||||
return;
|
||||
}
|
||||
|
||||
if (vif->type != NL80211_IFTYPE_STATION)
|
||||
return;
|
||||
|
||||
/* Make sure we're done with the deferred traffic before flushing */
|
||||
|
|
|
@ -661,7 +661,8 @@ static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
|
|||
(lq_sta->tx_agg_tid_en & BIT(tid)) &&
|
||||
(tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD)) {
|
||||
IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid);
|
||||
rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta);
|
||||
if (rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta) == 0)
|
||||
tid_data->state = IWL_AGG_QUEUED;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -672,11 +672,12 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
|
|||
* If there was a significant jump in the nssn - adjust.
|
||||
* If the SN is smaller than the NSSN it might need to first go into
|
||||
* the reorder buffer, in which case we just release up to it and the
|
||||
* rest of the function will take of storing it and releasing up to the
|
||||
* nssn
|
||||
* rest of the function will take care of storing it and releasing up to
|
||||
* the nssn
|
||||
*/
|
||||
if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
|
||||
buffer->buf_size)) {
|
||||
buffer->buf_size) ||
|
||||
!ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
|
||||
u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
|
||||
|
||||
iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn);
|
||||
|
|
|
@ -555,7 +555,7 @@ static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
|
|||
struct iwl_host_cmd cmd = {
|
||||
.id = SCAN_OFFLOAD_ABORT_CMD,
|
||||
};
|
||||
u32 status;
|
||||
u32 status = CAN_ABORT_STATUS;
|
||||
|
||||
ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
|
||||
if (ret)
|
||||
|
|
|
@ -1285,7 +1285,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
|
|||
{
|
||||
struct iwl_mvm_add_sta_cmd cmd;
|
||||
int ret;
|
||||
u32 status;
|
||||
u32 status = ADD_STA_SUCCESS;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
|
@ -2385,8 +2385,10 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
|
||||
return -EINVAL;
|
||||
|
||||
if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
|
||||
IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
|
||||
if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
|
||||
mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
|
||||
IWL_ERR(mvm,
|
||||
"Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
|
||||
mvmsta->tid_data[tid].state);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
|
|
@ -281,6 +281,7 @@ struct iwl_mvm_vif;
|
|||
* These states relate to a specific RA / TID.
|
||||
*
|
||||
* @IWL_AGG_OFF: aggregation is not used
|
||||
* @IWL_AGG_QUEUED: aggregation start work has been queued
|
||||
* @IWL_AGG_STARTING: aggregation are starting (between start and oper)
|
||||
* @IWL_AGG_ON: aggregation session is up
|
||||
* @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
|
||||
|
@ -290,6 +291,7 @@ struct iwl_mvm_vif;
|
|||
*/
|
||||
enum iwl_mvm_agg_state {
|
||||
IWL_AGG_OFF = 0,
|
||||
IWL_AGG_QUEUED,
|
||||
IWL_AGG_STARTING,
|
||||
IWL_AGG_ON,
|
||||
IWL_EMPTYING_HW_QUEUE_ADDBA,
|
||||
|
|
|
@ -529,6 +529,7 @@ int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
|
|||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
status = 0;
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP,
|
||||
CTDP_CONFIG_CMD),
|
||||
sizeof(cmd), &cmd, &status);
|
||||
|
|
|
@ -564,8 +564,8 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
|
|||
case NL80211_IFTYPE_AP:
|
||||
case NL80211_IFTYPE_ADHOC:
|
||||
/*
|
||||
* Handle legacy hostapd as well, where station will be added
|
||||
* only just before sending the association response.
|
||||
* Non-bufferable frames use the broadcast station, thus they
|
||||
* use the probe queue.
|
||||
* Also take care of the case where we send a deauth to a
|
||||
* station that we don't have, or similarly an association
|
||||
* response (with non-success status) for a station we can't
|
||||
|
@ -573,9 +573,9 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
|
|||
* Also, disassociate frames might happen, particular with
|
||||
* reason 7 ("Class 3 frame received from nonassociated STA").
|
||||
*/
|
||||
if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
|
||||
ieee80211_is_deauth(fc) || ieee80211_is_assoc_resp(fc) ||
|
||||
ieee80211_is_disassoc(fc))
|
||||
if (ieee80211_is_mgmt(fc) &&
|
||||
(!ieee80211_is_bufferable_mmpdu(fc) ||
|
||||
ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
|
||||
return mvm->probe_queue;
|
||||
if (info->hw_queue == info->control.vif->cab_queue)
|
||||
return mvmvif->cab_queue;
|
||||
|
|
Loading…
Reference in New Issue