2010-03-17 08:41:22 +08:00
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* GPL LICENSE SUMMARY
|
|
|
|
*
|
2011-04-06 00:42:00 +08:00
|
|
|
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
|
2010-03-17 08:41:22 +08:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of version 2 of the GNU General Public License as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
|
|
|
|
* USA
|
|
|
|
*
|
|
|
|
* The full GNU General Public License is included in this distribution
|
|
|
|
* in the file called LICENSE.GPL.
|
|
|
|
*
|
|
|
|
* Contact Information:
|
|
|
|
* Intel Linux Wireless <ilw@linux.intel.com>
|
|
|
|
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
|
|
|
*
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/sched.h>
|
2011-08-26 14:11:00 +08:00
|
|
|
#include <linux/ieee80211.h>
|
2010-03-17 08:41:22 +08:00
|
|
|
|
|
|
|
#include "iwl-dev.h"
|
|
|
|
#include "iwl-core.h"
|
|
|
|
#include "iwl-sta.h"
|
|
|
|
#include "iwl-io.h"
|
2010-03-18 04:34:34 +08:00
|
|
|
#include "iwl-helpers.h"
|
2010-03-17 08:41:23 +08:00
|
|
|
#include "iwl-agn-hw.h"
|
2010-03-18 04:34:36 +08:00
|
|
|
#include "iwl-agn.h"
|
2011-07-03 16:22:15 +08:00
|
|
|
#include "iwl-trans.h"
|
2010-03-17 08:41:22 +08:00
|
|
|
|
2010-03-18 04:34:34 +08:00
|
|
|
/*
|
|
|
|
* mac80211 queues, ACs, hardware queues, FIFOs.
|
|
|
|
*
|
|
|
|
* Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
|
|
|
|
*
|
|
|
|
* Mac80211 uses the following numbers, which we get as from it
|
|
|
|
* by way of skb_get_queue_mapping(skb):
|
|
|
|
*
|
|
|
|
* VO 0
|
|
|
|
* VI 1
|
|
|
|
* BE 2
|
|
|
|
* BK 3
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* Regular (not A-MPDU) frames are put into hardware queues corresponding
|
|
|
|
* to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
|
|
|
|
* own queue per aggregation session (RA/TID combination), such queues are
|
|
|
|
* set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
|
|
|
|
* order to map frames to the right queue, we also need an AC->hw queue
|
|
|
|
* mapping. This is implemented here.
|
|
|
|
*
|
|
|
|
* Due to the way hw queues are set up (by the hw specific modules like
|
|
|
|
* iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
|
|
|
|
* mapping.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static const u8 tid_to_ac[] = {
|
2010-11-18 03:33:27 +08:00
|
|
|
IEEE80211_AC_BE,
|
|
|
|
IEEE80211_AC_BK,
|
|
|
|
IEEE80211_AC_BK,
|
|
|
|
IEEE80211_AC_BE,
|
|
|
|
IEEE80211_AC_VI,
|
|
|
|
IEEE80211_AC_VI,
|
|
|
|
IEEE80211_AC_VO,
|
|
|
|
IEEE80211_AC_VO
|
2010-03-18 04:34:34 +08:00
|
|
|
};
|
|
|
|
|
2010-04-15 06:35:14 +08:00
|
|
|
static inline int get_ac_from_tid(u16 tid)
|
|
|
|
{
|
|
|
|
if (likely(tid < ARRAY_SIZE(tid_to_ac)))
|
|
|
|
return tid_to_ac[tid];
|
|
|
|
|
|
|
|
/* no support for TIDs 8-15 yet */
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2010-08-23 16:46:51 +08:00
|
|
|
static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
|
2010-03-18 04:34:34 +08:00
|
|
|
{
|
|
|
|
if (likely(tid < ARRAY_SIZE(tid_to_ac)))
|
2010-08-23 16:46:51 +08:00
|
|
|
return ctx->ac_to_fifo[tid_to_ac[tid]];
|
2010-03-18 04:34:34 +08:00
|
|
|
|
|
|
|
/* no support for TIDs 8-15 yet */
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2011-07-10 15:47:01 +08:00
|
|
|
static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id,
|
|
|
|
int tid)
|
2010-03-17 08:41:22 +08:00
|
|
|
{
|
2010-03-17 08:41:23 +08:00
|
|
|
if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
|
2010-10-06 23:10:00 +08:00
|
|
|
(IWLAGN_FIRST_AMPDU_QUEUE +
|
|
|
|
priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
|
2010-03-17 08:41:22 +08:00
|
|
|
IWL_WARN(priv,
|
|
|
|
"queue number out of range: %d, must be %d to %d\n",
|
2010-03-17 08:41:23 +08:00
|
|
|
txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
|
|
|
|
IWLAGN_FIRST_AMPDU_QUEUE +
|
2010-10-06 23:10:00 +08:00
|
|
|
priv->cfg->base_params->num_of_ampdu_queues - 1);
|
2010-03-17 08:41:22 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Modify device's station table to Tx this TID */
|
2011-03-16 02:33:03 +08:00
|
|
|
return iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
|
|
|
|
}
|
|
|
|
|
2011-07-08 23:46:29 +08:00
|
|
|
static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
|
|
|
|
struct ieee80211_tx_info *info,
|
|
|
|
__le16 fc, __le32 *tx_flags)
|
|
|
|
{
|
|
|
|
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
|
|
|
|
info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
|
|
|
|
info->flags & IEEE80211_TX_CTL_AMPDU)
|
|
|
|
*tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
|
|
|
|
}
|
|
|
|
|
2010-03-18 04:34:34 +08:00
|
|
|
/*
|
|
|
|
* handle build REPLY_TX command notification.
|
|
|
|
*/
|
|
|
|
static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
|
2011-07-24 01:24:47 +08:00
|
|
|
struct sk_buff *skb,
|
|
|
|
struct iwl_tx_cmd *tx_cmd,
|
|
|
|
struct ieee80211_tx_info *info,
|
|
|
|
struct ieee80211_hdr *hdr, u8 sta_id)
|
2010-03-18 04:34:34 +08:00
|
|
|
{
|
|
|
|
__le16 fc = hdr->frame_control;
|
|
|
|
__le32 tx_flags = tx_cmd->tx_flags;
|
|
|
|
|
|
|
|
tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
|
2011-07-07 20:11:51 +08:00
|
|
|
|
|
|
|
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
|
2010-03-18 04:34:34 +08:00
|
|
|
tx_flags |= TX_CMD_FLG_ACK_MSK;
|
2011-07-07 20:11:51 +08:00
|
|
|
else
|
|
|
|
tx_flags &= ~TX_CMD_FLG_ACK_MSK;
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2011-07-07 20:11:51 +08:00
|
|
|
if (ieee80211_is_probe_resp(fc))
|
|
|
|
tx_flags |= TX_CMD_FLG_TSF_MSK;
|
|
|
|
else if (ieee80211_is_back_req(fc))
|
2010-03-18 04:34:34 +08:00
|
|
|
tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
|
2010-08-23 22:56:56 +08:00
|
|
|
else if (info->band == IEEE80211_BAND_2GHZ &&
|
2010-10-06 23:10:00 +08:00
|
|
|
priv->cfg->bt_params &&
|
|
|
|
priv->cfg->bt_params->advanced_bt_coexist &&
|
2010-08-23 22:56:56 +08:00
|
|
|
(ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
|
|
|
|
ieee80211_is_reassoc_req(fc) ||
|
|
|
|
skb->protocol == cpu_to_be16(ETH_P_PAE)))
|
|
|
|
tx_flags |= TX_CMD_FLG_IGNORE_BT;
|
2010-03-18 04:34:34 +08:00
|
|
|
|
|
|
|
|
2011-07-24 01:24:47 +08:00
|
|
|
tx_cmd->sta_id = sta_id;
|
2010-03-18 04:34:34 +08:00
|
|
|
if (ieee80211_has_morefrags(fc))
|
|
|
|
tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
|
|
|
|
|
|
|
|
if (ieee80211_is_data_qos(fc)) {
|
|
|
|
u8 *qc = ieee80211_get_qos_ctl(hdr);
|
|
|
|
tx_cmd->tid_tspec = qc[0] & 0xf;
|
|
|
|
tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
|
|
|
|
} else {
|
|
|
|
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
|
|
|
|
}
|
|
|
|
|
2011-07-08 23:46:29 +08:00
|
|
|
iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
|
2010-03-18 04:34:34 +08:00
|
|
|
|
|
|
|
tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
|
|
|
|
if (ieee80211_is_mgmt(fc)) {
|
|
|
|
if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
|
|
|
|
tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
|
|
|
|
else
|
|
|
|
tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
|
|
|
|
} else {
|
|
|
|
tx_cmd->timeout.pm_frame_timeout = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
tx_cmd->driver_txop = 0;
|
|
|
|
tx_cmd->tx_flags = tx_flags;
|
|
|
|
tx_cmd->next_frame_len = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define RTS_DFAULT_RETRY_LIMIT 60
|
|
|
|
|
|
|
|
static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
|
2011-07-24 01:24:47 +08:00
|
|
|
struct iwl_tx_cmd *tx_cmd,
|
|
|
|
struct ieee80211_tx_info *info,
|
|
|
|
__le16 fc)
|
2010-03-18 04:34:34 +08:00
|
|
|
{
|
|
|
|
u32 rate_flags;
|
|
|
|
int rate_idx;
|
|
|
|
u8 rts_retry_limit;
|
|
|
|
u8 data_retry_limit;
|
|
|
|
u8 rate_plcp;
|
|
|
|
|
|
|
|
/* Set retry limit on DATA packets and Probe Responses*/
|
|
|
|
if (ieee80211_is_probe_resp(fc))
|
|
|
|
data_retry_limit = 3;
|
|
|
|
else
|
2010-03-24 02:37:59 +08:00
|
|
|
data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
|
2010-03-18 04:34:34 +08:00
|
|
|
tx_cmd->data_retry_limit = data_retry_limit;
|
|
|
|
|
|
|
|
/* Set retry limit on RTS packets */
|
|
|
|
rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
|
|
|
|
if (data_retry_limit < rts_retry_limit)
|
|
|
|
rts_retry_limit = data_retry_limit;
|
|
|
|
tx_cmd->rts_retry_limit = rts_retry_limit;
|
|
|
|
|
|
|
|
/* DATA packets will use the uCode station table for rate/antenna
|
|
|
|
* selection */
|
|
|
|
if (ieee80211_is_data(fc)) {
|
|
|
|
tx_cmd->initial_rate_index = 0;
|
|
|
|
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
|
2011-07-14 02:13:46 +08:00
|
|
|
#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
|
2011-07-08 23:46:28 +08:00
|
|
|
if (priv->tm_fixed_rate) {
|
|
|
|
/*
|
|
|
|
* rate overwrite by testmode
|
|
|
|
* we not only send lq command to change rate
|
|
|
|
* we also re-enforce per data pkt base.
|
|
|
|
*/
|
|
|
|
tx_cmd->tx_flags &= ~TX_CMD_FLG_STA_RATE_MSK;
|
|
|
|
memcpy(&tx_cmd->rate_n_flags, &priv->tm_fixed_rate,
|
|
|
|
sizeof(tx_cmd->rate_n_flags));
|
|
|
|
}
|
2011-07-14 02:13:46 +08:00
|
|
|
#endif
|
2010-03-18 04:34:34 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* If the current TX rate stored in mac80211 has the MCS bit set, it's
|
|
|
|
* not really a TX rate. Thus, we use the lowest supported rate for
|
|
|
|
* this band. Also use the lowest supported rate if the stored rate
|
|
|
|
* index is invalid.
|
|
|
|
*/
|
|
|
|
rate_idx = info->control.rates[0].idx;
|
|
|
|
if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
|
|
|
|
(rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
|
|
|
|
rate_idx = rate_lowest_index(&priv->bands[info->band],
|
|
|
|
info->control.sta);
|
|
|
|
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
|
|
|
|
if (info->band == IEEE80211_BAND_5GHZ)
|
|
|
|
rate_idx += IWL_FIRST_OFDM_RATE;
|
|
|
|
/* Get PLCP rate for tx_cmd->rate_n_flags */
|
|
|
|
rate_plcp = iwl_rates[rate_idx].plcp;
|
|
|
|
/* Zero out flags for this packet */
|
|
|
|
rate_flags = 0;
|
|
|
|
|
|
|
|
/* Set CCK flag as needed */
|
|
|
|
if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
|
|
|
|
rate_flags |= RATE_MCS_CCK_MSK;
|
|
|
|
|
|
|
|
/* Set up antennas */
|
2010-10-06 23:10:00 +08:00
|
|
|
if (priv->cfg->bt_params &&
|
|
|
|
priv->cfg->bt_params->advanced_bt_coexist &&
|
|
|
|
priv->bt_full_concurrent) {
|
2010-08-23 22:57:04 +08:00
|
|
|
/* operated as 1x1 in full concurrency mode */
|
|
|
|
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
|
2011-08-26 14:10:39 +08:00
|
|
|
first_antenna(hw_params(priv).valid_tx_ant));
|
2010-08-23 22:57:04 +08:00
|
|
|
} else
|
|
|
|
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
|
2011-08-26 14:10:39 +08:00
|
|
|
hw_params(priv).valid_tx_ant);
|
2010-03-18 04:34:34 +08:00
|
|
|
rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
|
|
|
|
|
|
|
|
/* Set the rate in the TX cmd */
|
|
|
|
tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
|
|
|
|
struct ieee80211_tx_info *info,
|
|
|
|
struct iwl_tx_cmd *tx_cmd,
|
|
|
|
struct sk_buff *skb_frag,
|
|
|
|
int sta_id)
|
|
|
|
{
|
|
|
|
struct ieee80211_key_conf *keyconf = info->control.hw_key;
|
|
|
|
|
2010-08-10 15:46:38 +08:00
|
|
|
switch (keyconf->cipher) {
|
|
|
|
case WLAN_CIPHER_SUITE_CCMP:
|
2010-03-18 04:34:34 +08:00
|
|
|
tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
|
|
|
|
memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
|
|
|
|
if (info->flags & IEEE80211_TX_CTL_AMPDU)
|
|
|
|
tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
|
|
|
|
IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
|
|
|
|
break;
|
|
|
|
|
2010-08-10 15:46:38 +08:00
|
|
|
case WLAN_CIPHER_SUITE_TKIP:
|
2010-03-18 04:34:34 +08:00
|
|
|
tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
|
mac80211: fix TKIP races, make API easier to use
Our current TKIP code races against itself on TX
since we can process multiple packets at the same
time on different ACs, but they all share the TX
context for TKIP. This can lead to bad IVs etc.
Also, the crypto offload helper code just obtains
the P1K/P2K from the cache, and can update it as
well, but there's no guarantee that packets are
really processed in order.
To fix these issues, first introduce a spinlock
that will protect the IV16/IV32 values in the TX
context. This first step makes sure that we don't
assign the same IV multiple times or get confused
in other ways.
Secondly, change the way the P1K cache works. I
add a field "p1k_iv32" that stores the value of
the IV32 when the P1K was last recomputed, and
if different from the last time, then a new P1K
is recomputed. This can cause the P1K computation
to flip back and forth if packets are processed
out of order. All this also happens under the new
spinlock.
Finally, because there are argument differences,
split up the ieee80211_get_tkip_key() API into
ieee80211_get_tkip_p1k() and ieee80211_get_tkip_p2k()
and give them the correct arguments.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-07-08 04:28:01 +08:00
|
|
|
ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
|
2010-03-18 04:34:34 +08:00
|
|
|
IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
|
|
|
|
break;
|
|
|
|
|
2010-08-10 15:46:38 +08:00
|
|
|
case WLAN_CIPHER_SUITE_WEP104:
|
|
|
|
tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
|
|
|
|
/* fall through */
|
|
|
|
case WLAN_CIPHER_SUITE_WEP40:
|
2010-03-18 04:34:34 +08:00
|
|
|
tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
|
|
|
|
(keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
|
|
|
|
|
|
|
|
memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
|
|
|
|
|
|
|
|
IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
|
|
|
|
"with key %d\n", keyconf->keyidx);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2010-08-10 15:46:38 +08:00
|
|
|
IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
|
2010-03-18 04:34:34 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* start REPLY_TX command process
|
|
|
|
*/
|
|
|
|
int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct iwl_station_priv *sta_priv = NULL;
|
2010-08-27 23:53:46 +08:00
|
|
|
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
|
2011-07-03 16:22:15 +08:00
|
|
|
struct iwl_tx_cmd *tx_cmd;
|
2010-11-11 10:25:42 +08:00
|
|
|
int txq_id;
|
2011-07-03 16:22:15 +08:00
|
|
|
|
2010-03-18 04:34:34 +08:00
|
|
|
u16 seq_number = 0;
|
|
|
|
__le16 fc;
|
|
|
|
u8 hdr_len;
|
2011-07-03 16:22:15 +08:00
|
|
|
u16 len;
|
2010-03-18 04:34:34 +08:00
|
|
|
u8 sta_id;
|
|
|
|
u8 tid = 0;
|
|
|
|
unsigned long flags;
|
2010-11-17 03:51:38 +08:00
|
|
|
bool is_agg = false;
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2011-07-24 01:24:47 +08:00
|
|
|
if (info->control.vif)
|
2010-08-27 23:53:46 +08:00
|
|
|
ctx = iwl_rxon_ctx_from_vif(info->control.vif);
|
|
|
|
|
2011-08-26 14:10:43 +08:00
|
|
|
spin_lock_irqsave(&priv->shrd->lock, flags);
|
2010-03-18 04:34:34 +08:00
|
|
|
if (iwl_is_rfkill(priv)) {
|
|
|
|
IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
|
2011-04-28 22:27:10 +08:00
|
|
|
goto drop_unlock_priv;
|
2010-03-18 04:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
fc = hdr->frame_control;
|
|
|
|
|
|
|
|
#ifdef CONFIG_IWLWIFI_DEBUG
|
|
|
|
if (ieee80211_is_auth(fc))
|
|
|
|
IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
|
|
|
|
else if (ieee80211_is_assoc_req(fc))
|
|
|
|
IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
|
|
|
|
else if (ieee80211_is_reassoc_req(fc))
|
|
|
|
IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
|
|
|
|
#endif
|
|
|
|
|
|
|
|
hdr_len = ieee80211_hdrlen(fc);
|
|
|
|
|
2011-04-29 23:51:06 +08:00
|
|
|
/* For management frames use broadcast id to do not break aggregation */
|
|
|
|
if (!ieee80211_is_data(fc))
|
|
|
|
sta_id = ctx->bcast_sta_id;
|
|
|
|
else {
|
|
|
|
/* Find index into station table for destination station */
|
|
|
|
sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
|
|
|
|
if (sta_id == IWL_INVALID_STATION) {
|
|
|
|
IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
|
|
|
|
hdr->addr1);
|
2011-05-17 02:55:42 +08:00
|
|
|
goto drop_unlock_priv;
|
2011-04-29 23:51:06 +08:00
|
|
|
}
|
2010-03-18 04:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
|
|
|
|
|
2011-07-03 16:22:15 +08:00
|
|
|
if (info->control.sta)
|
|
|
|
sta_priv = (void *)info->control.sta->drv_priv;
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2010-11-17 03:51:04 +08:00
|
|
|
if (sta_priv && sta_priv->asleep &&
|
|
|
|
(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
|
2010-03-18 04:34:34 +08:00
|
|
|
/*
|
|
|
|
* This sends an asynchronous command to the device,
|
|
|
|
* but we can rely on it being processed before the
|
|
|
|
* next frame is processed -- and the next frame to
|
|
|
|
* this station is the one that will consume this
|
|
|
|
* counter.
|
|
|
|
* For now set the counter to just 1 since we do not
|
|
|
|
* support uAPSD yet.
|
|
|
|
*/
|
|
|
|
iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
|
|
|
|
}
|
|
|
|
|
2010-08-23 16:46:51 +08:00
|
|
|
/*
|
|
|
|
* Send this frame after DTIM -- there's a special queue
|
|
|
|
* reserved for this for contexts that support AP mode.
|
|
|
|
*/
|
|
|
|
if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
|
|
|
|
txq_id = ctx->mcast_queue;
|
|
|
|
/*
|
|
|
|
* The microcode will clear the more data
|
|
|
|
* bit in the last frame it transmits.
|
|
|
|
*/
|
|
|
|
hdr->frame_control |=
|
|
|
|
cpu_to_le16(IEEE80211_FCTL_MOREDATA);
|
2011-07-24 01:24:47 +08:00
|
|
|
} else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
|
|
|
|
txq_id = IWL_AUX_QUEUE;
|
|
|
|
else
|
2010-08-23 16:46:51 +08:00
|
|
|
txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
|
2010-05-05 17:26:06 +08:00
|
|
|
|
2011-08-26 14:10:43 +08:00
|
|
|
/* irqs already disabled/saved above when locking priv->shrd->lock */
|
2011-08-26 14:10:47 +08:00
|
|
|
spin_lock(&priv->shrd->sta_lock);
|
2010-05-05 17:26:06 +08:00
|
|
|
|
2010-03-18 04:34:34 +08:00
|
|
|
if (ieee80211_is_data_qos(fc)) {
|
2011-07-03 16:22:15 +08:00
|
|
|
u8 *qc = NULL;
|
2010-03-18 04:34:34 +08:00
|
|
|
qc = ieee80211_get_qos_ctl(hdr);
|
|
|
|
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
|
2011-04-28 22:27:10 +08:00
|
|
|
|
|
|
|
if (WARN_ON_ONCE(tid >= MAX_TID_COUNT))
|
|
|
|
goto drop_unlock_sta;
|
|
|
|
|
2010-03-18 04:34:34 +08:00
|
|
|
seq_number = priv->stations[sta_id].tid[tid].seq_number;
|
|
|
|
seq_number &= IEEE80211_SCTL_SEQ;
|
|
|
|
hdr->seq_ctrl = hdr->seq_ctrl &
|
|
|
|
cpu_to_le16(IEEE80211_SCTL_FRAG);
|
|
|
|
hdr->seq_ctrl |= cpu_to_le16(seq_number);
|
|
|
|
seq_number += 0x10;
|
|
|
|
/* aggregation is on for this <sta,tid> */
|
|
|
|
if (info->flags & IEEE80211_TX_CTL_AMPDU &&
|
|
|
|
priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
|
|
|
|
txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
|
2010-11-17 03:51:38 +08:00
|
|
|
is_agg = true;
|
2010-03-18 04:34:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 14:10:48 +08:00
|
|
|
tx_cmd = iwl_trans_get_tx_cmd(trans(priv), txq_id);
|
2011-07-03 16:22:15 +08:00
|
|
|
if (unlikely(!tx_cmd))
|
2011-04-28 22:27:10 +08:00
|
|
|
goto drop_unlock_sta;
|
2010-03-18 04:34:34 +08:00
|
|
|
|
|
|
|
/* Copy MAC header from skb into command buffer */
|
|
|
|
memcpy(tx_cmd->hdr, hdr, hdr_len);
|
|
|
|
|
|
|
|
/* Total # bytes to be transmitted */
|
|
|
|
len = (u16)skb->len;
|
|
|
|
tx_cmd->len = cpu_to_le16(len);
|
|
|
|
|
|
|
|
if (info->control.hw_key)
|
|
|
|
iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
|
|
|
|
|
|
|
|
/* TODO need this for burst mode later on */
|
2010-08-23 22:56:56 +08:00
|
|
|
iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
|
2010-03-18 04:34:34 +08:00
|
|
|
iwl_dbg_log_tx_data_frame(priv, len, hdr);
|
|
|
|
|
|
|
|
iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
|
|
|
|
|
|
|
|
iwl_update_stats(priv, true, fc, len);
|
|
|
|
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 14:10:48 +08:00
|
|
|
if (iwl_trans_tx(trans(priv), skb, tx_cmd, txq_id, fc, is_agg, ctx))
|
2011-07-03 16:22:15 +08:00
|
|
|
goto drop_unlock_sta;
|
2011-04-28 22:27:10 +08:00
|
|
|
|
|
|
|
if (ieee80211_is_data_qos(fc)) {
|
|
|
|
priv->stations[sta_id].tid[tid].tfds_in_queue++;
|
|
|
|
if (!ieee80211_has_morefrags(fc))
|
|
|
|
priv->stations[sta_id].tid[tid].seq_number = seq_number;
|
|
|
|
}
|
|
|
|
|
2011-08-26 14:10:47 +08:00
|
|
|
spin_unlock(&priv->shrd->sta_lock);
|
2011-08-26 14:10:43 +08:00
|
|
|
spin_unlock_irqrestore(&priv->shrd->lock, flags);
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2010-11-17 03:51:38 +08:00
|
|
|
/*
|
|
|
|
* Avoid atomic ops if it isn't an associated client.
|
|
|
|
* Also, if this is a packet for aggregation, don't
|
|
|
|
* increase the counter because the ucode will stop
|
|
|
|
* aggregation queues when their respective station
|
|
|
|
* goes to sleep.
|
|
|
|
*/
|
|
|
|
if (sta_priv && sta_priv->client && !is_agg)
|
2010-03-18 04:34:34 +08:00
|
|
|
atomic_inc(&sta_priv->pending_frames);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2011-04-28 22:27:10 +08:00
|
|
|
drop_unlock_sta:
|
2011-08-26 14:10:47 +08:00
|
|
|
spin_unlock(&priv->shrd->sta_lock);
|
2011-04-28 22:27:10 +08:00
|
|
|
drop_unlock_priv:
|
2011-08-26 14:10:43 +08:00
|
|
|
spin_unlock_irqrestore(&priv->shrd->lock, flags);
|
2010-03-18 04:34:34 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find first available (lowest unused) Tx Queue, mark it "active".
|
|
|
|
* Called only when finding queue for aggregation.
|
|
|
|
* Should never return anything < 7, because they should already
|
|
|
|
* be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
|
|
|
|
*/
|
|
|
|
static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv)
|
|
|
|
{
|
|
|
|
int txq_id;
|
|
|
|
|
2011-08-26 14:10:39 +08:00
|
|
|
for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++)
|
2010-03-18 04:34:34 +08:00
|
|
|
if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
|
|
|
|
return txq_id;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2010-04-29 19:43:07 +08:00
|
|
|
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
2010-05-01 02:30:46 +08:00
|
|
|
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
|
2010-03-18 04:34:34 +08:00
|
|
|
{
|
|
|
|
int sta_id;
|
|
|
|
int tx_fifo;
|
|
|
|
int txq_id;
|
|
|
|
int ret;
|
|
|
|
unsigned long flags;
|
|
|
|
struct iwl_tid_data *tid_data;
|
|
|
|
|
2010-08-23 16:46:51 +08:00
|
|
|
tx_fifo = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
|
2010-03-18 04:34:34 +08:00
|
|
|
if (unlikely(tx_fifo < 0))
|
|
|
|
return tx_fifo;
|
|
|
|
|
2011-05-27 23:40:32 +08:00
|
|
|
IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
|
|
|
|
sta->addr, tid);
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2010-05-01 02:30:46 +08:00
|
|
|
sta_id = iwl_sta_id(sta);
|
2010-03-18 04:34:34 +08:00
|
|
|
if (sta_id == IWL_INVALID_STATION) {
|
|
|
|
IWL_ERR(priv, "Start AGG on invalid station\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
if (unlikely(tid >= MAX_TID_COUNT))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
|
|
|
|
IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
txq_id = iwlagn_txq_ctx_activate_free(priv);
|
|
|
|
if (txq_id == -1) {
|
|
|
|
IWL_ERR(priv, "No free aggregation queue available\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
2011-08-26 14:10:47 +08:00
|
|
|
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
2010-03-18 04:34:34 +08:00
|
|
|
tid_data = &priv->stations[sta_id].tid[tid];
|
|
|
|
*ssn = SEQ_TO_SN(tid_data->seq_number);
|
|
|
|
tid_data->agg.txq_id = txq_id;
|
2011-03-16 02:33:03 +08:00
|
|
|
tid_data->agg.tx_fifo = tx_fifo;
|
2010-11-11 10:25:45 +08:00
|
|
|
iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
|
2011-08-26 14:10:47 +08:00
|
|
|
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2011-03-16 02:33:03 +08:00
|
|
|
ret = iwlagn_txq_agg_enable(priv, txq_id, sta_id, tid);
|
2010-03-18 04:34:34 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2011-08-26 14:10:47 +08:00
|
|
|
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
2010-05-05 17:26:06 +08:00
|
|
|
tid_data = &priv->stations[sta_id].tid[tid];
|
2010-03-18 04:34:34 +08:00
|
|
|
if (tid_data->tfds_in_queue == 0) {
|
|
|
|
IWL_DEBUG_HT(priv, "HW queue is empty\n");
|
|
|
|
tid_data->agg.state = IWL_AGG_ON;
|
2010-05-01 02:30:46 +08:00
|
|
|
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
2010-03-18 04:34:34 +08:00
|
|
|
} else {
|
|
|
|
IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
|
|
|
|
tid_data->tfds_in_queue);
|
|
|
|
tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
|
|
|
|
}
|
2011-08-26 14:10:47 +08:00
|
|
|
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
2010-03-18 04:34:34 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-04-29 19:43:07 +08:00
|
|
|
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
2010-05-01 02:30:46 +08:00
|
|
|
struct ieee80211_sta *sta, u16 tid)
|
2010-03-18 04:34:34 +08:00
|
|
|
{
|
2010-08-23 22:57:17 +08:00
|
|
|
int tx_fifo_id, txq_id, sta_id, ssn;
|
2010-03-18 04:34:34 +08:00
|
|
|
struct iwl_tid_data *tid_data;
|
|
|
|
int write_ptr, read_ptr;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2010-08-23 16:46:51 +08:00
|
|
|
tx_fifo_id = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
|
2010-03-18 04:34:34 +08:00
|
|
|
if (unlikely(tx_fifo_id < 0))
|
|
|
|
return tx_fifo_id;
|
|
|
|
|
2010-05-01 02:30:46 +08:00
|
|
|
sta_id = iwl_sta_id(sta);
|
2010-03-18 04:34:34 +08:00
|
|
|
|
|
|
|
if (sta_id == IWL_INVALID_STATION) {
|
|
|
|
IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
2011-08-26 14:10:47 +08:00
|
|
|
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
2010-05-05 17:26:06 +08:00
|
|
|
|
2010-03-18 04:34:34 +08:00
|
|
|
tid_data = &priv->stations[sta_id].tid[tid];
|
|
|
|
ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
|
|
|
|
txq_id = tid_data->agg.txq_id;
|
2010-08-23 22:57:17 +08:00
|
|
|
|
|
|
|
switch (priv->stations[sta_id].tid[tid].agg.state) {
|
|
|
|
case IWL_EMPTYING_HW_QUEUE_ADDBA:
|
|
|
|
/*
|
|
|
|
* This can happen if the peer stops aggregation
|
|
|
|
* again before we've had a chance to drain the
|
|
|
|
* queue we selected previously, i.e. before the
|
|
|
|
* session was really started completely.
|
|
|
|
*/
|
|
|
|
IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
|
|
|
|
goto turn_off;
|
|
|
|
case IWL_AGG_ON:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
|
|
|
|
}
|
|
|
|
|
2010-03-18 04:34:34 +08:00
|
|
|
write_ptr = priv->txq[txq_id].q.write_ptr;
|
|
|
|
read_ptr = priv->txq[txq_id].q.read_ptr;
|
|
|
|
|
|
|
|
/* The queue is not empty */
|
|
|
|
if (write_ptr != read_ptr) {
|
|
|
|
IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
|
|
|
|
priv->stations[sta_id].tid[tid].agg.state =
|
|
|
|
IWL_EMPTYING_HW_QUEUE_DELBA;
|
2011-08-26 14:10:47 +08:00
|
|
|
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
2010-03-18 04:34:34 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
IWL_DEBUG_HT(priv, "HW queue is empty\n");
|
2010-08-23 22:57:17 +08:00
|
|
|
turn_off:
|
2010-03-18 04:34:34 +08:00
|
|
|
priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
|
|
|
|
|
2010-05-05 17:26:06 +08:00
|
|
|
/* do not restore/save irqs */
|
2011-08-26 14:10:47 +08:00
|
|
|
spin_unlock(&priv->shrd->sta_lock);
|
2011-08-26 14:10:43 +08:00
|
|
|
spin_lock(&priv->shrd->lock);
|
2010-05-05 17:26:06 +08:00
|
|
|
|
2010-03-18 04:34:34 +08:00
|
|
|
/*
|
|
|
|
* the only reason this call can fail is queue number out of range,
|
|
|
|
* which can happen if uCode is reloaded and all the station
|
|
|
|
* information are lost. if it is outside the range, there is no need
|
|
|
|
* to deactivate the uCode queue, just return "success" to allow
|
|
|
|
* mac80211 to clean up it own data.
|
|
|
|
*/
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 14:10:48 +08:00
|
|
|
iwl_trans_txq_agg_disable(trans(priv), txq_id, ssn, tx_fifo_id);
|
2011-08-26 14:10:43 +08:00
|
|
|
spin_unlock_irqrestore(&priv->shrd->lock, flags);
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2010-05-01 02:30:46 +08:00
|
|
|
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
2010-03-18 04:34:34 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int iwlagn_txq_check_empty(struct iwl_priv *priv,
|
|
|
|
int sta_id, u8 tid, int txq_id)
|
|
|
|
{
|
|
|
|
struct iwl_queue *q = &priv->txq[txq_id].q;
|
|
|
|
u8 *addr = priv->stations[sta_id].sta.sta.addr;
|
|
|
|
struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
|
2010-08-23 16:46:40 +08:00
|
|
|
struct iwl_rxon_context *ctx;
|
|
|
|
|
|
|
|
ctx = &priv->contexts[priv->stations[sta_id].ctxid];
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2011-08-26 14:10:47 +08:00
|
|
|
lockdep_assert_held(&priv->shrd->sta_lock);
|
2010-05-05 17:26:06 +08:00
|
|
|
|
2010-03-18 04:34:34 +08:00
|
|
|
switch (priv->stations[sta_id].tid[tid].agg.state) {
|
|
|
|
case IWL_EMPTYING_HW_QUEUE_DELBA:
|
|
|
|
/* We are reclaiming the last packet of the */
|
|
|
|
/* aggregated HW queue */
|
|
|
|
if ((txq_id == tid_data->agg.txq_id) &&
|
|
|
|
(q->read_ptr == q->write_ptr)) {
|
|
|
|
u16 ssn = SEQ_TO_SN(tid_data->seq_number);
|
2010-08-23 16:46:51 +08:00
|
|
|
int tx_fifo = get_fifo_from_tid(ctx, tid);
|
2010-03-18 04:34:34 +08:00
|
|
|
IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 14:10:48 +08:00
|
|
|
iwl_trans_txq_agg_disable(trans(priv), txq_id,
|
2011-07-11 13:51:04 +08:00
|
|
|
ssn, tx_fifo);
|
2010-03-18 04:34:34 +08:00
|
|
|
tid_data->agg.state = IWL_AGG_OFF;
|
2010-08-23 16:46:40 +08:00
|
|
|
ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
|
2010-03-18 04:34:34 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case IWL_EMPTYING_HW_QUEUE_ADDBA:
|
|
|
|
/* We are reclaiming the last packet of the queue */
|
|
|
|
if (tid_data->tfds_in_queue == 0) {
|
|
|
|
IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
|
|
|
|
tid_data->agg.state = IWL_AGG_ON;
|
2010-08-23 16:46:40 +08:00
|
|
|
ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
|
2010-03-18 04:34:34 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2010-05-05 17:26:06 +08:00
|
|
|
|
2010-03-18 04:34:34 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-11-17 03:51:38 +08:00
|
|
|
static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
|
|
|
|
struct iwl_rxon_context *ctx,
|
|
|
|
const u8 *addr1)
|
2010-03-18 04:34:34 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_sta *sta;
|
|
|
|
struct iwl_station_priv *sta_priv;
|
|
|
|
|
2010-06-08 03:20:38 +08:00
|
|
|
rcu_read_lock();
|
2010-11-17 03:51:38 +08:00
|
|
|
sta = ieee80211_find_sta(ctx->vif, addr1);
|
2010-03-18 04:34:34 +08:00
|
|
|
if (sta) {
|
|
|
|
sta_priv = (void *)sta->drv_priv;
|
|
|
|
/* avoid atomic ops if this isn't a client */
|
|
|
|
if (sta_priv->client &&
|
|
|
|
atomic_dec_return(&sta_priv->pending_frames) == 0)
|
|
|
|
ieee80211_sta_block_awake(priv->hw, sta, false);
|
|
|
|
}
|
2010-06-08 03:20:38 +08:00
|
|
|
rcu_read_unlock();
|
2010-11-17 03:51:38 +08:00
|
|
|
}
|
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
/**
|
|
|
|
* translate ucode response to mac80211 tx status control values
|
|
|
|
*/
|
|
|
|
void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
|
|
|
|
struct ieee80211_tx_info *info)
|
2010-11-17 03:51:38 +08:00
|
|
|
{
|
2011-08-26 14:11:00 +08:00
|
|
|
struct ieee80211_tx_rate *r = &info->control.rates[0];
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
info->antenna_sel_tx =
|
|
|
|
((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
|
|
|
|
if (rate_n_flags & RATE_MCS_HT_MSK)
|
|
|
|
r->flags |= IEEE80211_TX_RC_MCS;
|
|
|
|
if (rate_n_flags & RATE_MCS_GF_MSK)
|
|
|
|
r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
|
|
|
|
if (rate_n_flags & RATE_MCS_HT40_MSK)
|
|
|
|
r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
|
|
|
|
if (rate_n_flags & RATE_MCS_DUP_MSK)
|
|
|
|
r->flags |= IEEE80211_TX_RC_DUP_DATA;
|
|
|
|
if (rate_n_flags & RATE_MCS_SGI_MSK)
|
|
|
|
r->flags |= IEEE80211_TX_RC_SHORT_GI;
|
|
|
|
r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
|
2010-03-18 04:34:34 +08:00
|
|
|
}
|
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
#ifdef CONFIG_IWLWIFI_DEBUG
|
|
|
|
const char *iwl_get_tx_fail_reason(u32 status)
|
2010-03-18 04:34:34 +08:00
|
|
|
{
|
2011-08-26 14:11:00 +08:00
|
|
|
#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
|
|
|
|
#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
switch (status & TX_STATUS_MSK) {
|
|
|
|
case TX_STATUS_SUCCESS:
|
|
|
|
return "SUCCESS";
|
|
|
|
TX_STATUS_POSTPONE(DELAY);
|
|
|
|
TX_STATUS_POSTPONE(FEW_BYTES);
|
|
|
|
TX_STATUS_POSTPONE(BT_PRIO);
|
|
|
|
TX_STATUS_POSTPONE(QUIET_PERIOD);
|
|
|
|
TX_STATUS_POSTPONE(CALC_TTAK);
|
|
|
|
TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
|
|
|
|
TX_STATUS_FAIL(SHORT_LIMIT);
|
|
|
|
TX_STATUS_FAIL(LONG_LIMIT);
|
|
|
|
TX_STATUS_FAIL(FIFO_UNDERRUN);
|
|
|
|
TX_STATUS_FAIL(DRAIN_FLOW);
|
|
|
|
TX_STATUS_FAIL(RFKILL_FLUSH);
|
|
|
|
TX_STATUS_FAIL(LIFE_EXPIRE);
|
|
|
|
TX_STATUS_FAIL(DEST_PS);
|
|
|
|
TX_STATUS_FAIL(HOST_ABORTED);
|
|
|
|
TX_STATUS_FAIL(BT_RETRY);
|
|
|
|
TX_STATUS_FAIL(STA_INVALID);
|
|
|
|
TX_STATUS_FAIL(FRAG_DROPPED);
|
|
|
|
TX_STATUS_FAIL(TID_DISABLE);
|
|
|
|
TX_STATUS_FAIL(FIFO_FLUSHED);
|
|
|
|
TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
|
|
|
|
TX_STATUS_FAIL(PASSIVE_NO_RX);
|
|
|
|
TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
|
2010-03-18 04:34:34 +08:00
|
|
|
}
|
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
return "UNKNOWN";
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
#undef TX_STATUS_FAIL
|
|
|
|
#undef TX_STATUS_POSTPONE
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_IWLWIFI_DEBUG */
|
2011-04-20 21:57:14 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
|
|
|
|
struct iwlagn_tx_resp *tx_resp)
|
|
|
|
{
|
|
|
|
struct agg_tx_status *frame_status = &tx_resp->status;
|
|
|
|
int tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
|
|
|
|
IWLAGN_TX_RES_TID_POS;
|
|
|
|
int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
|
|
|
|
IWLAGN_TX_RES_RA_POS;
|
|
|
|
struct iwl_ht_agg *agg = &priv->stations[sta_id].tid[tid].agg;
|
|
|
|
u32 status = le16_to_cpu(tx_resp->status.status);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (agg->wait_for_ba)
|
|
|
|
IWL_DEBUG_TX_REPLY(priv,
|
|
|
|
"got tx response w/o block-ack\n");
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
|
|
|
|
agg->wait_for_ba = (tx_resp->frame_count > 1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the BT kill count is non-zero, we'll get this
|
|
|
|
* notification again.
|
|
|
|
*/
|
|
|
|
if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
|
|
|
|
priv->cfg->bt_params &&
|
|
|
|
priv->cfg->bt_params->advanced_bt_coexist) {
|
|
|
|
IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
|
|
|
|
}
|
2011-04-20 21:57:14 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
/* Construct bit-map of pending frames within Tx window */
|
|
|
|
for (i = 0; i < tx_resp->frame_count; i++) {
|
|
|
|
u16 fstatus = le16_to_cpu(frame_status[i].status);
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
if (status & AGG_TX_STATUS_MSK)
|
|
|
|
iwlagn_count_agg_tx_err_status(priv, fstatus);
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
|
|
|
|
AGG_TX_STATE_ABORT_MSK))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
|
|
|
|
"try-count (0x%08x)\n",
|
|
|
|
iwl_get_agg_tx_fail_reason(fstatus),
|
|
|
|
fstatus & AGG_TX_STATUS_MSK,
|
|
|
|
fstatus & AGG_TX_TRY_MSK);
|
2010-03-18 04:34:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
|
|
|
|
{
|
|
|
|
return le32_to_cpup((__le32 *)&tx_resp->status +
|
|
|
|
tx_resp->frame_count) & MAX_SN;
|
|
|
|
}
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
|
2010-03-18 04:34:34 +08:00
|
|
|
{
|
2011-08-26 14:11:00 +08:00
|
|
|
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
|
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
|
|
|
|
int txq_id = SEQ_TO_QUEUE(sequence);
|
|
|
|
int cmd_index = SEQ_TO_INDEX(sequence);
|
|
|
|
struct iwl_tx_queue *txq = &priv->txq[txq_id];
|
|
|
|
struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
|
|
|
|
struct ieee80211_hdr *hdr;
|
|
|
|
u32 status = le16_to_cpu(tx_resp->status.status);
|
|
|
|
u32 ssn = iwlagn_get_scd_ssn(tx_resp);
|
|
|
|
int tid;
|
|
|
|
int sta_id;
|
|
|
|
int freed;
|
2010-03-18 04:34:34 +08:00
|
|
|
struct ieee80211_tx_info *info;
|
2011-08-26 14:11:00 +08:00
|
|
|
unsigned long flags;
|
|
|
|
struct sk_buff_head skbs;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct iwl_rxon_context *ctx;
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
if ((cmd_index >= txq->q.n_bd) ||
|
|
|
|
(iwl_queue_used(&txq->q, cmd_index) == 0)) {
|
|
|
|
IWL_ERR(priv, "%s: Read index for DMA queue txq_id (%d) "
|
|
|
|
"cmd_index %d is out of range [0-%d] %d %d\n",
|
|
|
|
__func__, txq_id, cmd_index, txq->q.n_bd,
|
|
|
|
txq->q.write_ptr, txq->q.read_ptr);
|
|
|
|
return;
|
2010-03-18 04:34:34 +08:00
|
|
|
}
|
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
txq->time_stamp = jiffies;
|
2011-03-17 08:17:36 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
|
|
|
|
IWLAGN_TX_RES_TID_POS;
|
|
|
|
sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
|
|
|
|
IWLAGN_TX_RES_RA_POS;
|
2010-12-01 05:24:36 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
2010-12-01 05:24:36 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
if (txq->sched_retry)
|
|
|
|
iwl_rx_reply_tx_agg(priv, tx_resp);
|
|
|
|
|
|
|
|
if (tx_resp->frame_count == 1) {
|
|
|
|
bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
|
|
|
|
|
|
|
|
__skb_queue_head_init(&skbs);
|
|
|
|
/*we can free until ssn % q.n_bd not inclusive */
|
|
|
|
iwl_trans_reclaim(trans(priv), txq_id, ssn, status, &skbs);
|
|
|
|
freed = 0;
|
|
|
|
while (!skb_queue_empty(&skbs)) {
|
|
|
|
skb = __skb_dequeue(&skbs);
|
|
|
|
hdr = (struct ieee80211_hdr *)skb->data;
|
|
|
|
|
|
|
|
if (!ieee80211_is_data_qos(hdr->frame_control))
|
|
|
|
priv->last_seq_ctl = tx_resp->seq_ctl;
|
|
|
|
|
|
|
|
info = IEEE80211_SKB_CB(skb);
|
|
|
|
ctx = info->driver_data[0];
|
|
|
|
|
|
|
|
memset(&info->status, 0, sizeof(info->status));
|
|
|
|
|
|
|
|
if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
|
|
|
|
iwl_is_associated_ctx(ctx) && ctx->vif &&
|
|
|
|
ctx->vif->type == NL80211_IFTYPE_STATION) {
|
|
|
|
ctx->last_tx_rejected = true;
|
|
|
|
iwl_stop_queue(priv, &priv->txq[txq_id]);
|
|
|
|
|
|
|
|
IWL_DEBUG_TX_REPLY(priv,
|
|
|
|
"TXQ %d status %s (0x%08x) "
|
|
|
|
"rate_n_flags 0x%x retries %d\n",
|
|
|
|
txq_id,
|
|
|
|
iwl_get_tx_fail_reason(status),
|
|
|
|
status,
|
|
|
|
le32_to_cpu(tx_resp->rate_n_flags),
|
|
|
|
tx_resp->failure_frame);
|
|
|
|
|
|
|
|
IWL_DEBUG_TX_REPLY(priv,
|
|
|
|
"FrameCnt = %d, idx=%d\n",
|
|
|
|
tx_resp->frame_count, cmd_index);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check if BAR is needed */
|
|
|
|
if (is_agg && !iwl_is_tx_success(status))
|
|
|
|
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
|
|
|
|
iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
|
|
|
|
tx_resp, is_agg);
|
|
|
|
if (!is_agg)
|
|
|
|
iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
|
|
|
|
|
|
|
|
ieee80211_tx_status_irqsafe(priv->hw, skb);
|
|
|
|
|
|
|
|
freed++;
|
|
|
|
}
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
WARN_ON(!is_agg && freed != 1);
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
|
|
|
|
iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
|
|
|
|
}
|
2010-03-18 04:34:36 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
iwl_check_abort_status(priv, tx_resp->frame_count, status);
|
|
|
|
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
2010-03-18 04:34:36 +08:00
|
|
|
}
|
|
|
|
|
2010-03-18 04:34:34 +08:00
|
|
|
/**
|
|
|
|
* iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
|
|
|
|
*
|
|
|
|
* Handles block-acknowledge notification from device, which reports success
|
|
|
|
* of frames sent via aggregation.
|
|
|
|
*/
|
|
|
|
void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
|
|
|
|
struct iwl_rx_mem_buffer *rxb)
|
|
|
|
{
|
|
|
|
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
|
struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
|
|
|
|
struct iwl_tx_queue *txq = NULL;
|
|
|
|
struct iwl_ht_agg *agg;
|
2011-08-26 14:11:00 +08:00
|
|
|
struct sk_buff_head reclaimed_skbs;
|
|
|
|
struct ieee80211_tx_info *info;
|
|
|
|
struct ieee80211_hdr *hdr;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
unsigned long flags;
|
2010-03-18 04:34:34 +08:00
|
|
|
int index;
|
|
|
|
int sta_id;
|
|
|
|
int tid;
|
2011-08-26 14:11:00 +08:00
|
|
|
int freed;
|
2010-03-18 04:34:34 +08:00
|
|
|
|
|
|
|
/* "flow" corresponds to Tx queue */
|
|
|
|
u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
|
|
|
|
|
|
|
|
/* "ssn" is start of block-ack Tx window, corresponds to index
|
|
|
|
* (in Tx queue's circular buffer) of first TFD/frame in window */
|
|
|
|
u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
|
|
|
|
|
2011-08-26 14:10:39 +08:00
|
|
|
if (scd_flow >= hw_params(priv).max_txq_num) {
|
2010-03-18 04:34:34 +08:00
|
|
|
IWL_ERR(priv,
|
|
|
|
"BUG_ON scd_flow is bigger than number of queues\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
txq = &priv->txq[scd_flow];
|
|
|
|
sta_id = ba_resp->sta_id;
|
|
|
|
tid = ba_resp->tid;
|
|
|
|
agg = &priv->stations[sta_id].tid[tid].agg;
|
2011-08-26 14:11:00 +08:00
|
|
|
|
|
|
|
/* Find index of block-ack window */
|
|
|
|
index = ba_resp_scd_ssn & (txq->q.n_bd - 1);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
|
|
|
|
2010-06-02 08:13:58 +08:00
|
|
|
if (unlikely(agg->txq_id != scd_flow)) {
|
2010-07-31 23:34:11 +08:00
|
|
|
/*
|
|
|
|
* FIXME: this is a uCode bug which need to be addressed,
|
|
|
|
* log the information and return for now!
|
|
|
|
* since it is possible happen very often and in order
|
|
|
|
* not to fill the syslog, don't enable the logging by default
|
|
|
|
*/
|
|
|
|
IWL_DEBUG_TX_REPLY(priv,
|
|
|
|
"BA scd_flow %d does not match txq_id %d\n",
|
2010-06-02 08:13:58 +08:00
|
|
|
scd_flow, agg->txq_id);
|
2011-08-26 14:11:00 +08:00
|
|
|
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
2010-06-02 08:13:58 +08:00
|
|
|
return;
|
|
|
|
}
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
if (unlikely(!agg->wait_for_ba)) {
|
|
|
|
if (unlikely(ba_resp->bitmap))
|
|
|
|
IWL_ERR(priv, "Received BA when not expected\n");
|
|
|
|
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
2010-03-18 04:34:34 +08:00
|
|
|
|
|
|
|
IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
|
|
|
|
"sta_id = %d\n",
|
|
|
|
agg->wait_for_ba,
|
|
|
|
(u8 *) &ba_resp->sta_addr_lo32,
|
|
|
|
ba_resp->sta_id);
|
2011-08-26 14:11:00 +08:00
|
|
|
IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
|
|
|
|
"scd_flow = %d, scd_ssn = %d\n",
|
2010-03-18 04:34:34 +08:00
|
|
|
ba_resp->tid,
|
|
|
|
ba_resp->seq_ctl,
|
|
|
|
(unsigned long long)le64_to_cpu(ba_resp->bitmap),
|
|
|
|
ba_resp->scd_flow,
|
|
|
|
ba_resp->scd_ssn);
|
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
/* Mark that the expected block-ack response arrived */
|
|
|
|
agg->wait_for_ba = 0;
|
|
|
|
|
|
|
|
/* Sanity check values reported by uCode */
|
|
|
|
if (ba_resp->txed_2_done > ba_resp->txed) {
|
|
|
|
IWL_DEBUG_TX_REPLY(priv,
|
|
|
|
"bogus sent(%d) and ack(%d) count\n",
|
|
|
|
ba_resp->txed, ba_resp->txed_2_done);
|
|
|
|
/*
|
|
|
|
* set txed_2_done = txed,
|
|
|
|
* so it won't impact rate scale
|
|
|
|
*/
|
|
|
|
ba_resp->txed = ba_resp->txed_2_done;
|
|
|
|
}
|
|
|
|
IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
|
|
|
|
ba_resp->txed, ba_resp->txed_2_done);
|
|
|
|
|
|
|
|
__skb_queue_head_init(&reclaimed_skbs);
|
2010-03-18 04:34:34 +08:00
|
|
|
|
|
|
|
/* Release all TFDs before the SSN, i.e. all TFDs in front of
|
|
|
|
* block-ack window (we assume that they've been successfully
|
|
|
|
* transmitted ... if not, it's too late anyway). */
|
2011-08-26 14:11:00 +08:00
|
|
|
iwl_trans_reclaim(trans(priv), scd_flow, ba_resp_scd_ssn, 0,
|
|
|
|
&reclaimed_skbs);
|
|
|
|
freed = 0;
|
|
|
|
while (!skb_queue_empty(&reclaimed_skbs)) {
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
skb = __skb_dequeue(&reclaimed_skbs);
|
|
|
|
hdr = (struct ieee80211_hdr *)skb->data;
|
2010-09-23 00:02:02 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
if (ieee80211_is_data_qos(hdr->frame_control))
|
|
|
|
freed++;
|
|
|
|
else
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
|
|
|
|
if (freed == 0) {
|
|
|
|
/* this is the first skb we deliver in this batch */
|
|
|
|
/* put the rate scaling data there */
|
|
|
|
info = IEEE80211_SKB_CB(skb);
|
|
|
|
memset(&info->status, 0, sizeof(info->status));
|
|
|
|
info->flags |= IEEE80211_TX_STAT_ACK;
|
|
|
|
info->flags |= IEEE80211_TX_STAT_AMPDU;
|
|
|
|
info->status.ampdu_ack_len = ba_resp->txed_2_done;
|
|
|
|
info->status.ampdu_len = ba_resp->txed;
|
|
|
|
iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags,
|
|
|
|
info);
|
|
|
|
}
|
2010-09-23 00:02:02 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
ieee80211_tx_status_irqsafe(priv->hw, skb);
|
2010-09-23 00:02:02 +08:00
|
|
|
}
|
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
|
|
|
|
iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
|
2010-09-23 00:02:02 +08:00
|
|
|
|
2011-08-26 14:11:00 +08:00
|
|
|
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
2010-09-23 00:02:02 +08:00
|
|
|
}
|