Merge tag 'for-linville-20140725' of git://github.com/kvalo/ath
This commit is contained in:
commit
0da4cc6e62
|
@ -546,7 +546,7 @@ static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
|
|||
|
||||
int ath10k_htc_wait_target(struct ath10k_htc *htc)
|
||||
{
|
||||
int status = 0;
|
||||
int i, status = 0;
|
||||
struct ath10k_htc_svc_conn_req conn_req;
|
||||
struct ath10k_htc_svc_conn_resp conn_resp;
|
||||
struct ath10k_htc_msg *msg;
|
||||
|
@ -556,10 +556,26 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
|
|||
|
||||
status = wait_for_completion_timeout(&htc->ctl_resp,
|
||||
ATH10K_HTC_WAIT_TIMEOUT_HZ);
|
||||
if (status <= 0) {
|
||||
if (status == 0) {
|
||||
/* Workaround: In some cases the PCI HIF doesn't
|
||||
* receive interrupt for the control response message
|
||||
* even if the buffer was completed. It is suspected
|
||||
* iomap writes unmasking PCI CE irqs aren't propagated
|
||||
* properly in KVM PCI-passthrough sometimes.
|
||||
*/
|
||||
ath10k_warn("failed to receive control response completion, polling..\n");
|
||||
|
||||
for (i = 0; i < CE_COUNT; i++)
|
||||
ath10k_hif_send_complete_check(htc->ar, i, 1);
|
||||
|
||||
status = wait_for_completion_timeout(&htc->ctl_resp,
|
||||
ATH10K_HTC_WAIT_TIMEOUT_HZ);
|
||||
|
||||
if (status == 0)
|
||||
status = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
if (status < 0) {
|
||||
ath10k_err("ctl_resp never came in (%d)\n", status);
|
||||
return status;
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include "txrx.h"
|
||||
#include "debug.h"
|
||||
#include "trace.h"
|
||||
#include "mac.h"
|
||||
|
||||
#include <linux/log2.h>
|
||||
|
||||
|
@ -307,7 +308,8 @@ static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
|
|||
static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
|
||||
u8 **fw_desc, int *fw_desc_len,
|
||||
struct sk_buff **head_msdu,
|
||||
struct sk_buff **tail_msdu)
|
||||
struct sk_buff **tail_msdu,
|
||||
u32 *attention)
|
||||
{
|
||||
int msdu_len, msdu_chaining = 0;
|
||||
struct sk_buff *msdu;
|
||||
|
@ -357,6 +359,11 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
|
|||
break;
|
||||
}
|
||||
|
||||
*attention |= __le32_to_cpu(rx_desc->attention.flags) &
|
||||
(RX_ATTENTION_FLAGS_TKIP_MIC_ERR |
|
||||
RX_ATTENTION_FLAGS_DECRYPT_ERR |
|
||||
RX_ATTENTION_FLAGS_FCS_ERR |
|
||||
RX_ATTENTION_FLAGS_MGMT_TYPE);
|
||||
/*
|
||||
* Copy the FW rx descriptor for this MSDU from the rx
|
||||
* indication message into the MSDU's netbuf. HL uses the
|
||||
|
@ -1215,13 +1222,15 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
|
|||
for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
|
||||
struct sk_buff *msdu_head, *msdu_tail;
|
||||
|
||||
attention = 0;
|
||||
msdu_head = NULL;
|
||||
msdu_tail = NULL;
|
||||
ret = ath10k_htt_rx_amsdu_pop(htt,
|
||||
&fw_desc,
|
||||
&fw_desc_len,
|
||||
&msdu_head,
|
||||
&msdu_tail);
|
||||
&msdu_tail,
|
||||
&attention);
|
||||
|
||||
if (ret < 0) {
|
||||
ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
|
||||
|
@ -1233,7 +1242,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
|
|||
rxd = container_of((void *)msdu_head->data,
|
||||
struct htt_rx_desc,
|
||||
msdu_payload);
|
||||
attention = __le32_to_cpu(rxd->attention.flags);
|
||||
|
||||
if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
|
||||
status,
|
||||
|
@ -1286,6 +1294,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
|
|||
u8 *fw_desc;
|
||||
int fw_desc_len, hdrlen, paramlen;
|
||||
int trim;
|
||||
u32 attention = 0;
|
||||
|
||||
fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
|
||||
fw_desc = (u8 *)frag->fw_msdu_rx_desc;
|
||||
|
@ -1295,7 +1304,8 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
|
|||
|
||||
spin_lock_bh(&htt->rx_ring.lock);
|
||||
ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
|
||||
&msdu_head, &msdu_tail);
|
||||
&msdu_head, &msdu_tail,
|
||||
&attention);
|
||||
spin_unlock_bh(&htt->rx_ring.lock);
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
|
||||
|
@ -1312,10 +1322,8 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
|
|||
|
||||
hdr = (struct ieee80211_hdr *)msdu_head->data;
|
||||
rxd = (void *)msdu_head->data - sizeof(*rxd);
|
||||
tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) &
|
||||
RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
|
||||
decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) &
|
||||
RX_ATTENTION_FLAGS_DECRYPT_ERR);
|
||||
tkip_mic_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
|
||||
decrypt_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
|
||||
fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
|
||||
RX_MSDU_START_INFO1_DECAP_FORMAT);
|
||||
|
||||
|
@ -1422,6 +1430,86 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
|
|||
}
|
||||
}
|
||||
|
||||
static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
|
||||
{
|
||||
struct htt_rx_addba *ev = &resp->rx_addba;
|
||||
struct ath10k_peer *peer;
|
||||
struct ath10k_vif *arvif;
|
||||
u16 info0, tid, peer_id;
|
||||
|
||||
info0 = __le16_to_cpu(ev->info0);
|
||||
tid = MS(info0, HTT_RX_BA_INFO0_TID);
|
||||
peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_HTT,
|
||||
"htt rx addba tid %hu peer_id %hu size %hhu\n",
|
||||
tid, peer_id, ev->window_size);
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
peer = ath10k_peer_find_by_id(ar, peer_id);
|
||||
if (!peer) {
|
||||
ath10k_warn("received addba event for invalid peer_id: %hu\n",
|
||||
peer_id);
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
arvif = ath10k_get_arvif(ar, peer->vdev_id);
|
||||
if (!arvif) {
|
||||
ath10k_warn("received addba event for invalid vdev_id: %u\n",
|
||||
peer->vdev_id);
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_HTT,
|
||||
"htt rx start rx ba session sta %pM tid %hu size %hhu\n",
|
||||
peer->addr, tid, ev->window_size);
|
||||
|
||||
ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
}
|
||||
|
||||
static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
|
||||
{
|
||||
struct htt_rx_delba *ev = &resp->rx_delba;
|
||||
struct ath10k_peer *peer;
|
||||
struct ath10k_vif *arvif;
|
||||
u16 info0, tid, peer_id;
|
||||
|
||||
info0 = __le16_to_cpu(ev->info0);
|
||||
tid = MS(info0, HTT_RX_BA_INFO0_TID);
|
||||
peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_HTT,
|
||||
"htt rx delba tid %hu peer_id %hu\n",
|
||||
tid, peer_id);
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
peer = ath10k_peer_find_by_id(ar, peer_id);
|
||||
if (!peer) {
|
||||
ath10k_warn("received addba event for invalid peer_id: %hu\n",
|
||||
peer_id);
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
arvif = ath10k_get_arvif(ar, peer->vdev_id);
|
||||
if (!arvif) {
|
||||
ath10k_warn("received addba event for invalid vdev_id: %u\n",
|
||||
peer->vdev_id);
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_HTT,
|
||||
"htt rx stop rx ba session sta %pM tid %hu\n",
|
||||
peer->addr, tid);
|
||||
|
||||
ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
}
|
||||
|
||||
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
struct ath10k_htt *htt = &ar->htt;
|
||||
|
@ -1516,9 +1604,25 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
|||
trace_ath10k_htt_stats(skb->data, skb->len);
|
||||
break;
|
||||
case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
|
||||
/* Firmware can return tx frames if it's unable to fully
|
||||
* process them and suspects host may be able to fix it. ath10k
|
||||
* sends all tx frames as already inspected so this shouldn't
|
||||
* happen unless fw has a bug.
|
||||
*/
|
||||
ath10k_warn("received an unexpected htt tx inspect event\n");
|
||||
break;
|
||||
case HTT_T2H_MSG_TYPE_RX_ADDBA:
|
||||
ath10k_htt_rx_addba(ar, resp);
|
||||
break;
|
||||
case HTT_T2H_MSG_TYPE_RX_DELBA:
|
||||
case HTT_T2H_MSG_TYPE_RX_FLUSH:
|
||||
ath10k_htt_rx_delba(ar, resp);
|
||||
break;
|
||||
case HTT_T2H_MSG_TYPE_RX_FLUSH: {
|
||||
/* Ignore this event because mac80211 takes care of Rx
|
||||
* aggregation reordering.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
|
||||
resp->hdr.msg_type);
|
||||
|
|
|
@ -531,6 +531,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
|||
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
|
||||
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
|
||||
|
||||
/* Prevent firmware from sending up tx inspection requests. There's
|
||||
* nothing ath10k can do with frames requested for inspection so force
|
||||
* it to simply rely a regular tx completion with discard status.
|
||||
*/
|
||||
flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
|
||||
|
||||
skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
|
||||
skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
|
||||
skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
|
||||
|
|
|
@ -1865,15 +1865,13 @@ static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Frames sent to the FW have to be in "Native Wifi" format.
|
||||
* Strip the QoS field from the 802.11 header.
|
||||
/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
|
||||
* Control in the header.
|
||||
*/
|
||||
static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
|
||||
struct ieee80211_tx_control *control,
|
||||
struct sk_buff *skb)
|
||||
static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (void *)skb->data;
|
||||
struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
|
||||
u8 *qos_ctl;
|
||||
|
||||
if (!ieee80211_is_data_qos(hdr->frame_control))
|
||||
|
@ -1883,6 +1881,16 @@ static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
|
|||
memmove(skb->data + IEEE80211_QOS_CTL_LEN,
|
||||
skb->data, (void *)qos_ctl - (void *)skb->data);
|
||||
skb_pull(skb, IEEE80211_QOS_CTL_LEN);
|
||||
|
||||
/* Fw/Hw generates a corrupted QoS Control Field for QoS NullFunc
|
||||
* frames. Powersave is handled by the fw/hw so QoS NyllFunc frames are
|
||||
* used only for CQM purposes (e.g. hostapd station keepalive ping) so
|
||||
* it is safe to downgrade to NullFunc.
|
||||
*/
|
||||
if (ieee80211_is_qos_nullfunc(hdr->frame_control)) {
|
||||
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
|
||||
cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
|
||||
}
|
||||
}
|
||||
|
||||
static void ath10k_tx_wep_key_work(struct work_struct *work)
|
||||
|
@ -1919,14 +1927,13 @@ unlock:
|
|||
mutex_unlock(&arvif->ar->conf_mutex);
|
||||
}
|
||||
|
||||
static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
|
||||
static void ath10k_tx_h_update_wep_key(struct ieee80211_vif *vif,
|
||||
struct ieee80211_key_conf *key,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
||||
struct ath10k *ar = arvif->ar;
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ieee80211_key_conf *key = info->control.hw_key;
|
||||
|
||||
if (!ieee80211_has_protected(hdr->frame_control))
|
||||
return;
|
||||
|
@ -1948,11 +1955,11 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
|
|||
ieee80211_queue_work(ar->hw, &arvif->wep_key_work);
|
||||
}
|
||||
|
||||
static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
|
||||
static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
|
||||
struct ieee80211_vif *vif,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
||||
|
||||
/* This is case only for P2P_GO */
|
||||
|
@ -2254,33 +2261,28 @@ static void ath10k_tx(struct ieee80211_hw *hw,
|
|||
struct ieee80211_tx_control *control,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ath10k *ar = hw->priv;
|
||||
u8 tid, vdev_id;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
struct ieee80211_key_conf *key = info->control.hw_key;
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
|
||||
/* We should disable CCK RATE due to P2P */
|
||||
if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
|
||||
ath10k_dbg(ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
|
||||
|
||||
/* we must calculate tid before we apply qos workaround
|
||||
* as we'd lose the qos control field */
|
||||
tid = ath10k_tx_h_get_tid(hdr);
|
||||
vdev_id = ath10k_tx_h_get_vdev_id(ar, info);
|
||||
ATH10K_SKB_CB(skb)->htt.is_offchan = false;
|
||||
ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
|
||||
ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, info);
|
||||
|
||||
/* it makes no sense to process injected frames like that */
|
||||
if (info->control.vif &&
|
||||
info->control.vif->type != NL80211_IFTYPE_MONITOR) {
|
||||
ath10k_tx_h_qos_workaround(hw, control, skb);
|
||||
ath10k_tx_h_update_wep_key(skb);
|
||||
ath10k_tx_h_add_p2p_noa_ie(ar, skb);
|
||||
ath10k_tx_h_seq_no(skb);
|
||||
if (vif && vif->type != NL80211_IFTYPE_MONITOR) {
|
||||
ath10k_tx_h_nwifi(hw, skb);
|
||||
ath10k_tx_h_update_wep_key(vif, key, skb);
|
||||
ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
|
||||
ath10k_tx_h_seq_no(vif, skb);
|
||||
}
|
||||
|
||||
ATH10K_SKB_CB(skb)->vdev_id = vdev_id;
|
||||
ATH10K_SKB_CB(skb)->htt.is_offchan = false;
|
||||
ATH10K_SKB_CB(skb)->htt.tid = tid;
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
ATH10K_SKB_CB(skb)->htt.is_offchan = true;
|
||||
|
@ -4331,6 +4333,38 @@ static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_ampdu_action(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
enum ieee80211_ampdu_mlme_action action,
|
||||
struct ieee80211_sta *sta, u16 tid, u16 *ssn,
|
||||
u8 buf_size)
|
||||
{
|
||||
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
|
||||
arvif->vdev_id, sta->addr, tid, action);
|
||||
|
||||
switch (action) {
|
||||
case IEEE80211_AMPDU_RX_START:
|
||||
case IEEE80211_AMPDU_RX_STOP:
|
||||
/* HTT AddBa/DelBa events trigger mac80211 Rx BA session
|
||||
* creation/removal. Do we need to verify this?
|
||||
*/
|
||||
return 0;
|
||||
case IEEE80211_AMPDU_TX_START:
|
||||
case IEEE80211_AMPDU_TX_STOP_CONT:
|
||||
case IEEE80211_AMPDU_TX_STOP_FLUSH:
|
||||
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
|
||||
case IEEE80211_AMPDU_TX_OPERATIONAL:
|
||||
/* Firmware offloads Tx aggregation entirely so deny mac80211
|
||||
* Tx aggregation requests.
|
||||
*/
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static const struct ieee80211_ops ath10k_ops = {
|
||||
.tx = ath10k_tx,
|
||||
.start = ath10k_start,
|
||||
|
@ -4358,6 +4392,7 @@ static const struct ieee80211_ops ath10k_ops = {
|
|||
.set_bitrate_mask = ath10k_set_bitrate_mask,
|
||||
.sta_rc_update = ath10k_sta_rc_update,
|
||||
.get_tsf = ath10k_get_tsf,
|
||||
.ampdu_action = ath10k_ampdu_action,
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = ath10k_suspend,
|
||||
.resume = ath10k_resume,
|
||||
|
@ -4698,7 +4733,6 @@ int ath10k_mac_register(struct ath10k *ar)
|
|||
|
||||
ar->hw->wiphy->interface_modes =
|
||||
BIT(NL80211_IFTYPE_STATION) |
|
||||
BIT(NL80211_IFTYPE_ADHOC) |
|
||||
BIT(NL80211_IFTYPE_AP);
|
||||
|
||||
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
|
||||
|
@ -4768,6 +4802,8 @@ int ath10k_mac_register(struct ath10k *ar)
|
|||
ar->hw->wiphy->iface_combinations = ath10k_if_comb;
|
||||
ar->hw->wiphy->n_iface_combinations =
|
||||
ARRAY_SIZE(ath10k_if_comb);
|
||||
|
||||
ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
|
||||
}
|
||||
|
||||
ar->hw->netdev_features = NETIF_F_HW_CSUM;
|
||||
|
|
|
@ -43,11 +43,11 @@ static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
|
|||
return (struct ath10k_vif *)vif->drv_priv;
|
||||
}
|
||||
|
||||
static inline void ath10k_tx_h_seq_no(struct sk_buff *skb)
|
||||
static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
|
||||
|
|
|
@ -726,18 +726,12 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
|
|||
unsigned int nbytes, max_nbytes;
|
||||
unsigned int transfer_id;
|
||||
unsigned int flags;
|
||||
int err;
|
||||
int err, num_replenish = 0;
|
||||
|
||||
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
|
||||
&ce_data, &nbytes, &transfer_id,
|
||||
&flags) == 0) {
|
||||
err = ath10k_pci_post_rx_pipe(pipe_info, 1);
|
||||
if (unlikely(err)) {
|
||||
/* FIXME: retry */
|
||||
ath10k_warn("failed to replenish CE rx ring %d: %d\n",
|
||||
pipe_info->pipe_num, err);
|
||||
}
|
||||
|
||||
num_replenish++;
|
||||
skb = transfer_context;
|
||||
max_nbytes = skb->len + skb_tailroom(skb);
|
||||
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
|
||||
|
@ -753,6 +747,13 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
|
|||
skb_put(skb, nbytes);
|
||||
cb->rx_completion(ar, skb, pipe_info->pipe_num);
|
||||
}
|
||||
|
||||
err = ath10k_pci_post_rx_pipe(pipe_info, num_replenish);
|
||||
if (unlikely(err)) {
|
||||
/* FIXME: retry */
|
||||
ath10k_warn("failed to replenish CE rx ring %d (%d bufs): %d\n",
|
||||
pipe_info->pipe_num, num_replenish, err);
|
||||
}
|
||||
}
|
||||
|
||||
static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
|
||||
|
|
|
@ -119,8 +119,7 @@ struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar,
|
||||
int peer_id)
|
||||
struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
|
||||
{
|
||||
struct ath10k_peer *peer;
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
|
|||
|
||||
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
|
||||
const u8 *addr);
|
||||
struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id);
|
||||
int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id,
|
||||
const u8 *addr);
|
||||
int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id,
|
||||
|
|
|
@ -1432,7 +1432,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
|
|||
continue;
|
||||
}
|
||||
|
||||
ath10k_tx_h_seq_no(bcn);
|
||||
ath10k_tx_h_seq_no(arvif->vif, bcn);
|
||||
ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
|
||||
ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
|
||||
|
||||
|
|
Loading…
Reference in New Issue