Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git

ath.git patches for v5.6. Major changes:

ath11k

* a new driver for Qualcomm Wi-Fi 6 (IEEE 802.11ax) devices

ath10k

* significant improvements on receive throughput and firmware download
  with SDIO bus

* report signal strength for each chain also on SDIO

* set max mtu to 1500 on SDIO devices
This commit is contained in:
Kalle Valo 2019-12-10 11:34:30 +02:00
commit 57725b5bc5
69 changed files with 49530 additions and 111 deletions

View File

@ -0,0 +1,273 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
# Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
%YAML 1.2
---
$id: http://devicetree.org/schemas/net/wireless/qcom,ath11k.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Technologies ath11k wireless devices Generic Binding
maintainers:
- Kalle Valo <kvalo@codeaurora.org>
description: |
These are dt entries for Qualcomm Technologies, Inc. IEEE 802.11ax
devices, for example like AHB based IPQ8074.
properties:
compatible:
const: qcom,ipq8074-wifi
reg:
maxItems: 1
interrupts:
items:
- description: misc-pulse1 interrupt events
- description: misc-latch interrupt events
- description: sw exception interrupt events
- description: watchdog interrupt events
- description: interrupt event for ring CE0
- description: interrupt event for ring CE1
- description: interrupt event for ring CE2
- description: interrupt event for ring CE3
- description: interrupt event for ring CE4
- description: interrupt event for ring CE5
- description: interrupt event for ring CE6
- description: interrupt event for ring CE7
- description: interrupt event for ring CE8
- description: interrupt event for ring CE9
- description: interrupt event for ring CE10
- description: interrupt event for ring CE11
- description: interrupt event for ring host2wbm-desc-feed
- description: interrupt event for ring host2reo-re-injection
- description: interrupt event for ring host2reo-command
- description: interrupt event for ring host2rxdma-monitor-ring3
- description: interrupt event for ring host2rxdma-monitor-ring2
- description: interrupt event for ring host2rxdma-monitor-ring1
- description: interrupt event for ring reo2ost-exception
- description: interrupt event for ring wbm2host-rx-release
- description: interrupt event for ring reo2host-status
- description: interrupt event for ring reo2host-destination-ring4
- description: interrupt event for ring reo2host-destination-ring3
- description: interrupt event for ring reo2host-destination-ring2
- description: interrupt event for ring reo2host-destination-ring1
- description: interrupt event for ring rxdma2host-monitor-destination-mac3
- description: interrupt event for ring rxdma2host-monitor-destination-mac2
- description: interrupt event for ring rxdma2host-monitor-destination-mac1
- description: interrupt event for ring ppdu-end-interrupts-mac3
- description: interrupt event for ring ppdu-end-interrupts-mac2
- description: interrupt event for ring ppdu-end-interrupts-mac1
- description: interrupt event for ring rxdma2host-monitor-status-ring-mac3
- description: interrupt event for ring rxdma2host-monitor-status-ring-mac2
- description: interrupt event for ring rxdma2host-monitor-status-ring-mac1
- description: interrupt event for ring host2rxdma-host-buf-ring-mac3
- description: interrupt event for ring host2rxdma-host-buf-ring-mac2
- description: interrupt event for ring host2rxdma-host-buf-ring-mac1
- description: interrupt event for ring rxdma2host-destination-ring-mac3
- description: interrupt event for ring rxdma2host-destination-ring-mac2
- description: interrupt event for ring rxdma2host-destination-ring-mac1
- description: interrupt event for ring host2tcl-input-ring4
- description: interrupt event for ring host2tcl-input-ring3
- description: interrupt event for ring host2tcl-input-ring2
- description: interrupt event for ring host2tcl-input-ring1
- description: interrupt event for ring wbm2host-tx-completions-ring3
- description: interrupt event for ring wbm2host-tx-completions-ring2
- description: interrupt event for ring wbm2host-tx-completions-ring1
- description: interrupt event for ring tcl2host-status-ring
interrupt-names:
items:
- const: misc-pulse1
- const: misc-latch
- const: sw-exception
- const: watchdog
- const: ce0
- const: ce1
- const: ce2
- const: ce3
- const: ce4
- const: ce5
- const: ce6
- const: ce7
- const: ce8
- const: ce9
- const: ce10
- const: ce11
- const: host2wbm-desc-feed
- const: host2reo-re-injection
- const: host2reo-command
- const: host2rxdma-monitor-ring3
- const: host2rxdma-monitor-ring2
- const: host2rxdma-monitor-ring1
- const: reo2ost-exception
- const: wbm2host-rx-release
- const: reo2host-status
- const: reo2host-destination-ring4
- const: reo2host-destination-ring3
- const: reo2host-destination-ring2
- const: reo2host-destination-ring1
- const: rxdma2host-monitor-destination-mac3
- const: rxdma2host-monitor-destination-mac2
- const: rxdma2host-monitor-destination-mac1
- const: ppdu-end-interrupts-mac3
- const: ppdu-end-interrupts-mac2
- const: ppdu-end-interrupts-mac1
- const: rxdma2host-monitor-status-ring-mac3
- const: rxdma2host-monitor-status-ring-mac2
- const: rxdma2host-monitor-status-ring-mac1
- const: host2rxdma-host-buf-ring-mac3
- const: host2rxdma-host-buf-ring-mac2
- const: host2rxdma-host-buf-ring-mac1
- const: rxdma2host-destination-ring-mac3
- const: rxdma2host-destination-ring-mac2
- const: rxdma2host-destination-ring-mac1
- const: host2tcl-input-ring4
- const: host2tcl-input-ring3
- const: host2tcl-input-ring2
- const: host2tcl-input-ring1
- const: wbm2host-tx-completions-ring3
- const: wbm2host-tx-completions-ring2
- const: wbm2host-tx-completions-ring1
- const: tcl2host-status-ring
qcom,rproc:
$ref: /schemas/types.yaml#definitions/phandle
description:
DT entry of q6v5-wcss remoteproc driver.
Phandle to a node that can contain the following properties
* compatible
* reg
* reg-names
required:
- compatible
- reg
- interrupts
- interrupt-names
- qcom,rproc
additionalProperties: false
examples:
- |
q6v5_wcss: q6v5_wcss@CD00000 {
compatible = "qcom,ipq8074-wcss-pil";
reg = <0xCD00000 0x4040>,
<0x4AB000 0x20>;
reg-names = "qdsp6",
"rmb";
};
wifi0: wifi@c000000 {
compatible = "qcom,ipq8074-wifi";
reg = <0xc000000 0x2000000>;
interrupts = <0 320 1>,
<0 319 1>,
<0 318 1>,
<0 317 1>,
<0 316 1>,
<0 315 1>,
<0 314 1>,
<0 311 1>,
<0 310 1>,
<0 411 1>,
<0 410 1>,
<0 40 1>,
<0 39 1>,
<0 302 1>,
<0 301 1>,
<0 37 1>,
<0 36 1>,
<0 296 1>,
<0 295 1>,
<0 294 1>,
<0 293 1>,
<0 292 1>,
<0 291 1>,
<0 290 1>,
<0 289 1>,
<0 288 1>,
<0 239 1>,
<0 236 1>,
<0 235 1>,
<0 234 1>,
<0 233 1>,
<0 232 1>,
<0 231 1>,
<0 230 1>,
<0 229 1>,
<0 228 1>,
<0 224 1>,
<0 223 1>,
<0 203 1>,
<0 183 1>,
<0 180 1>,
<0 179 1>,
<0 178 1>,
<0 177 1>,
<0 176 1>,
<0 163 1>,
<0 162 1>,
<0 160 1>,
<0 159 1>,
<0 158 1>,
<0 157 1>,
<0 156 1>;
interrupt-names = "misc-pulse1",
"misc-latch",
"sw-exception",
"watchdog",
"ce0",
"ce1",
"ce2",
"ce3",
"ce4",
"ce5",
"ce6",
"ce7",
"ce8",
"ce9",
"ce10",
"ce11",
"host2wbm-desc-feed",
"host2reo-re-injection",
"host2reo-command",
"host2rxdma-monitor-ring3",
"host2rxdma-monitor-ring2",
"host2rxdma-monitor-ring1",
"reo2ost-exception",
"wbm2host-rx-release",
"reo2host-status",
"reo2host-destination-ring4",
"reo2host-destination-ring3",
"reo2host-destination-ring2",
"reo2host-destination-ring1",
"rxdma2host-monitor-destination-mac3",
"rxdma2host-monitor-destination-mac2",
"rxdma2host-monitor-destination-mac1",
"ppdu-end-interrupts-mac3",
"ppdu-end-interrupts-mac2",
"ppdu-end-interrupts-mac1",
"rxdma2host-monitor-status-ring-mac3",
"rxdma2host-monitor-status-ring-mac2",
"rxdma2host-monitor-status-ring-mac1",
"host2rxdma-host-buf-ring-mac3",
"host2rxdma-host-buf-ring-mac2",
"host2rxdma-host-buf-ring-mac1",
"rxdma2host-destination-ring-mac3",
"rxdma2host-destination-ring-mac2",
"rxdma2host-destination-ring-mac1",
"host2tcl-input-ring4",
"host2tcl-input-ring3",
"host2tcl-input-ring2",
"host2tcl-input-ring1",
"wbm2host-tx-completions-ring3",
"wbm2host-tx-completions-ring2",
"wbm2host-tx-completions-ring1",
"tcl2host-status-ring";
qcom,rproc = <&q6v5_wcss>;
};

View File

@ -13644,6 +13644,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
S: Supported
F: drivers/net/wireless/ath/ath10k/
QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
M: Kalle Valo <kvalo@codeaurora.org>
L: ath11k@lists.infradead.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
S: Supported
F: drivers/net/wireless/ath/ath11k/
QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
L: linux-wireless@vger.kernel.org

View File

@ -62,5 +62,6 @@ source "drivers/net/wireless/ath/ar5523/Kconfig"
source "drivers/net/wireless/ath/wil6210/Kconfig"
source "drivers/net/wireless/ath/ath10k/Kconfig"
source "drivers/net/wireless/ath/wcn36xx/Kconfig"
source "drivers/net/wireless/ath/ath11k/Kconfig"
endif

View File

@ -7,6 +7,7 @@ obj-$(CONFIG_AR5523) += ar5523/
obj-$(CONFIG_WIL6210) += wil6210/
obj-$(CONFIG_ATH10K) += ath10k/
obj-$(CONFIG_WCN36XX) += wcn36xx/
obj-$(CONFIG_ATH11K) += ath11k/
obj-$(CONFIG_ATH_COMMON) += ath.o

View File

@ -346,6 +346,52 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
return 0;
}
static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 length)
{
struct bmi_cmd *cmd;
u32 hdrlen = sizeof(cmd->id) + sizeof(cmd->lz_data);
u32 txlen;
int ret;
size_t buf_len;
ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%pK length %d\n",
buffer, length);
if (ar->bmi.done_sent) {
ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
buf_len = sizeof(*cmd) + BMI_MAX_LARGE_DATA_SIZE - BMI_MAX_DATA_SIZE;
cmd = kzalloc(buf_len, GFP_KERNEL);
if (!cmd)
return -ENOMEM;
while (length) {
txlen = min(length, BMI_MAX_LARGE_DATA_SIZE - hdrlen);
WARN_ON_ONCE(txlen & 3);
cmd->id = __cpu_to_le32(BMI_LZ_DATA);
cmd->lz_data.len = __cpu_to_le32(txlen);
memcpy(cmd->lz_data.payload, buffer, txlen);
ret = ath10k_hif_exchange_bmi_msg(ar, cmd, hdrlen + txlen,
NULL, NULL);
if (ret) {
ath10k_warn(ar, "unable to write to the device\n");
return ret;
}
buffer += txlen;
length -= txlen;
}
kfree(cmd);
return 0;
}
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
{
struct bmi_cmd cmd;
@ -430,7 +476,11 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
if (trailer_len > 0)
memcpy(trailer, buffer + head_len, trailer_len);
ret = ath10k_bmi_lz_data(ar, buffer, head_len);
if (ar->hw_params.bmi_large_size_download)
ret = ath10k_bmi_lz_data_large(ar, buffer, head_len);
else
ret = ath10k_bmi_lz_data(ar, buffer, head_len);
if (ret)
return ret;

View File

@ -45,6 +45,15 @@
sizeof(u32) + \
sizeof(u32))
/* Maximum data size used for large BMI transfers */
#define BMI_MAX_LARGE_DATA_SIZE 2048
/* len = cmd + addr + length */
#define BMI_MAX_LARGE_CMDBUF_SIZE (BMI_MAX_LARGE_DATA_SIZE + \
sizeof(u32) + \
sizeof(u32) + \
sizeof(u32))
/* BMI Commands */
enum bmi_cmd_id {
@ -258,6 +267,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
const void *buffer, u32 length);
int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val);

View File

@ -189,6 +189,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.uart_pin_workaround = true,
.tx_stats_over_pktlog = false,
.bmi_large_size_download = true,
},
{
.id = QCA6174_HW_2_1_VERSION,
@ -714,18 +715,6 @@ static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
if (ret)
return ret;
/* Explicitly set fwlog prints to zero as target may turn it on
* based on scratch registers.
*/
ret = ath10k_bmi_read32(ar, hi_option_flag, &param);
if (ret)
return ret;
param |= HI_OPTION_DISABLE_DBGLOG;
ret = ath10k_bmi_write32(ar, hi_option_flag, param);
if (ret)
return ret;
return 0;
}
@ -3231,6 +3220,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_waitqueue_head(&ar->htt.empty_tx_wq);
init_waitqueue_head(&ar->wmi.tx_credits_wq);
skb_queue_head_init(&ar->htt.rx_indication_head);
init_completion(&ar->offchan_tx_completed);
INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
skb_queue_head_init(&ar->offchan_tx_queue);

View File

@ -124,6 +124,7 @@ struct ath10k_skb_cb {
struct ath10k_skb_rxcb {
dma_addr_t paddr;
struct hlist_node hlist;
u8 eid;
};
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
@ -1180,6 +1181,7 @@ struct ath10k {
struct {
/* protected by data_lock */
u32 rx_crc_err_drop;
u32 fw_crash_counter;
u32 fw_warm_reset_counter;
u32 fw_cold_reset_counter;

View File

@ -1094,6 +1094,7 @@ static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = {
"d_rts_good",
"d_tx_power", /* in .5 dbM I think */
"d_rx_crc_err", /* fcs_bad */
"d_rx_crc_err_drop", /* frame with FCS error, dropped late in kernel */
"d_no_beacon",
"d_tx_mpdus_queued",
"d_tx_msdu_queued",
@ -1193,6 +1194,7 @@ void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
data[i++] = pdev_stats->rts_good;
data[i++] = pdev_stats->chan_tx_power;
data[i++] = pdev_stats->fcs_bad;
data[i++] = ar->stats.rx_crc_err_drop;
data[i++] = pdev_stats->no_beacons;
data[i++] = pdev_stats->mpdu_enqued;
data[i++] = pdev_stats->msdu_enqued;

View File

@ -270,7 +270,7 @@ ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
struct ath10k *ar = htc->ar;
int bundle_cnt = len / sizeof(*report);
if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE)) {
if (!bundle_cnt || (bundle_cnt > htc->max_msgs_per_htc_bundle)) {
ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
bundle_cnt);
return -EINVAL;
@ -800,8 +800,8 @@ setup:
&ep->ul_pipe_id,
&ep->dl_pipe_id);
if (status) {
ath10k_warn(ar, "unsupported HTC service id: %d\n",
ep->service_id);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC service id: %d\n",
ep->service_id);
return status;
}
@ -878,8 +878,8 @@ static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar)
&ul_pipe_id,
&dl_pipe_id);
if (status) {
ath10k_warn(ar, "unsupported HTC service id: %d\n",
ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC pktlog service id: %d\n",
ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
return false;
}

View File

@ -12,6 +12,7 @@
#include <linux/bug.h>
#include <linux/skbuff.h>
#include <linux/timer.h>
#include <linux/bitfield.h>
struct ath10k;
@ -39,7 +40,7 @@ struct ath10k;
* 4-byte aligned.
*/
#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 8
#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 32
enum ath10k_htc_tx_flags {
ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
@ -49,9 +50,27 @@ enum ath10k_htc_tx_flags {
enum ath10k_htc_rx_flags {
ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK = 0x01,
ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0
};
#define ATH10K_HTC_FLAG_BUNDLE_MASK GENMASK(7, 4)
/* bits 2-3 are for extra bundle count bits 4-5 */
#define ATH10K_HTC_BUNDLE_EXTRA_MASK GENMASK(3, 2)
#define ATH10K_HTC_BUNDLE_EXTRA_SHIFT 4
static inline unsigned int ath10k_htc_get_bundle_count(u8 max_msgs, u8 flags)
{
unsigned int count, extra_count = 0;
count = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, flags);
if (max_msgs > 16)
extra_count = FIELD_GET(ATH10K_HTC_BUNDLE_EXTRA_MASK, flags) <<
ATH10K_HTC_BUNDLE_EXTRA_SHIFT;
return count + extra_count;
}
struct ath10k_htc_hdr {
u8 eid; /* @enum ath10k_htc_ep_id */
u8 flags; /* @enum ath10k_htc_tx_flags, ath10k_htc_rx_flags */

View File

@ -1869,6 +1869,8 @@ struct ath10k_htt {
struct ath10k *ar;
enum ath10k_htc_ep_id eid;
struct sk_buff_head rx_indication_head;
u8 target_version_major;
u8 target_version_minor;
struct completion target_version_received;
@ -2283,6 +2285,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
struct sk_buff *skb);
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget);
void ath10k_htt_set_tx_ops(struct ath10k_htt *htt);
void ath10k_htt_set_rx_ops(struct ath10k_htt *htt);
#endif

View File

@ -1285,6 +1285,13 @@ static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
status = IEEE80211_SKB_RXCB(skb);
if (!(ar->filter_flags & FIF_FCSFAIL) &&
status->flag & RX_FLAG_FAILED_FCS_CRC) {
ar->stats.rx_crc_err_drop++;
dev_kfree_skb_any(skb);
return;
}
ath10k_dbg(ar, ATH10K_DBG_DATA,
"rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
skb,
@ -2196,8 +2203,8 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
HTT_RX_IND_MPDU_STATUS_OK &&
mpdu_ranges->mpdu_range_status !=
HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) {
ath10k_warn(ar, "MPDU range status: %d\n",
mpdu_ranges->mpdu_range_status);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n",
mpdu_ranges->mpdu_range_status);
goto err;
}
@ -2235,8 +2242,10 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
hdr = (struct ieee80211_hdr *)skb->data;
qos = ieee80211_is_data_qos(hdr->frame_control);
rx_status = IEEE80211_SKB_RXCB(skb);
rx_status->chains |= BIT(0);
memset(rx_status, 0, sizeof(*rx_status));
if (rx->ppdu.combined_rssi == 0) {
/* SDIO firmware does not provide signal */
rx_status->signal = 0;
@ -2350,7 +2359,10 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);
}
ieee80211_rx_ni(ar->hw, skb);
if (ar->napi.dev)
ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
else
ieee80211_rx_ni(ar->hw, skb);
/* We have delivered the skb to the upper layers (mac80211) so we
* must not free it.
@ -3751,14 +3763,12 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IND:
if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
return ath10k_htt_rx_proc_rx_ind_hl(htt,
&resp->rx_ind_hl,
skb,
HTT_RX_PN_CHECK,
HTT_RX_NON_TKIP_MIC);
else
if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
} else {
skb_queue_tail(&htt->rx_indication_head, skb);
return false;
}
break;
case HTT_T2H_MSG_TYPE_PEER_MAP: {
struct htt_peer_map_event ev = {
@ -3948,6 +3958,37 @@ static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
return quota;
}
int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget)
{
struct htt_resp *resp;
struct ath10k_htt *htt = &ar->htt;
struct sk_buff *skb;
bool release;
int quota;
for (quota = 0; quota < budget; quota++) {
skb = skb_dequeue(&htt->rx_indication_head);
if (!skb)
break;
resp = (struct htt_resp *)skb->data;
release = ath10k_htt_rx_proc_rx_ind_hl(htt,
&resp->rx_ind_hl,
skb,
HTT_RX_PN_CHECK,
HTT_RX_NON_TKIP_MIC);
if (release)
dev_kfree_skb_any(skb);
ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n",
skb_queue_len(&htt->rx_indication_head));
}
return quota;
}
EXPORT_SYMBOL(ath10k_htt_rx_hl_indication);
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
{
struct ath10k_htt *htt = &ar->htt;

View File

@ -613,6 +613,9 @@ struct ath10k_hw_params {
/* target supporting fw download via diag ce */
bool fw_diag_ce_download;
/* target supporting fw download via large size BMI */
bool bmi_large_size_download;
/* need to set uart pin if disable uart print, workaround for a
* firmware bug
*/

View File

@ -6329,6 +6329,9 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (sta && sta->tdls)
ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
ar->wmi.peer_param->authorize, 1);
else if (sta && cmd == SET_KEY && (key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
ath10k_wmi_peer_set_param(ar, arvif->vdev_id, peer_addr,
ar->wmi.peer_param->authorize, 1);
exit:
mutex_unlock(&ar->conf_mutex);
@ -8908,6 +8911,7 @@ int ath10k_mac_register(struct ath10k *ar)
WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
ar->hw->wiphy->max_sched_scan_plan_iterations =
WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
ar->hw->wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
}
ar->hw->vif_data_size = sizeof(struct ath10k_vif);

View File

@ -279,7 +279,15 @@ static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi *qmi)
if (ret < 0)
goto out;
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
/* end = 1 triggers a CRC check on the BDF. If this fails, we
* get a QMI_ERR_MALFORMED_MSG_V01 error, but the FW is still
* willing to use the BDF. For some platforms, all the valid
* released BDFs fail this CRC check, so attempt to detect this
* scenario and treat it as non-fatal.
*/
if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
!(req->end == 1 &&
resp.resp.result == QMI_ERR_MALFORMED_MSG_V01)) {
ath10k_err(ar, "failed to download board data file: %d\n",
resp.resp.error);
ret = -EINVAL;
@ -635,7 +643,9 @@ static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
if (ret < 0)
goto out;
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
/* older FW didn't support this request, which is not fatal */
if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
resp.resp.error != QMI_ERR_NOT_SUPPORTED_V01) {
ath10k_err(ar, "host capability request rejected: %d\n", resp.resp.error);
ret = -EINVAL;
goto out;

View File

@ -24,6 +24,8 @@
#include "trace.h"
#include "sdio.h"
#define ATH10K_SDIO_VSG_BUF_SIZE (64 * 1024)
/* inlined helper functions */
static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,
@ -417,6 +419,7 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
struct ath10k_htc *htc = &ar->htc;
struct ath10k_sdio_rx_data *pkt;
struct ath10k_htc_ep *ep;
struct ath10k_skb_rxcb *cb;
enum ath10k_htc_ep_id id;
int ret, i, *n_lookahead_local;
u32 *lookaheads_local;
@ -462,10 +465,16 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
if (ret)
goto out;
if (!pkt->trailer_only)
ep->ep_ops.ep_rx_complete(ar_sdio->ar, pkt->skb);
else
if (!pkt->trailer_only) {
cb = ATH10K_SKB_RXCB(pkt->skb);
cb->eid = id;
skb_queue_tail(&ar_sdio->rx_head, pkt->skb);
queue_work(ar->workqueue_aux,
&ar_sdio->async_work_rx);
} else {
kfree_skb(pkt->skb);
}
/* The RX complete handler now owns the skb...*/
pkt->skb = NULL;
@ -484,21 +493,22 @@ out:
return ret;
}
static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar,
struct ath10k_sdio_rx_data *rx_pkts,
struct ath10k_htc_hdr *htc_hdr,
size_t full_len, size_t act_len,
size_t *bndl_cnt)
static int ath10k_sdio_mbox_alloc_bundle(struct ath10k *ar,
struct ath10k_sdio_rx_data *rx_pkts,
struct ath10k_htc_hdr *htc_hdr,
size_t full_len, size_t act_len,
size_t *bndl_cnt)
{
int ret, i;
u8 max_msgs = ar->htc.max_msgs_per_htc_bundle;
*bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags);
*bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags);
if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE) {
if (*bndl_cnt > max_msgs) {
ath10k_warn(ar,
"HTC bundle length %u exceeds maximum %u\n",
le16_to_cpu(htc_hdr->len),
HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
max_msgs);
return -ENOMEM;
}
@ -529,12 +539,11 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
size_t full_len, act_len;
bool last_in_bundle;
int ret, i;
int pkt_cnt = 0;
if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
ath10k_warn(ar,
"the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
n_lookaheads,
ATH10K_SDIO_MAX_RX_MSGS);
ath10k_warn(ar, "the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);
ret = -ENOMEM;
goto err;
}
@ -543,10 +552,8 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
last_in_bundle = false;
if (le16_to_cpu(htc_hdr->len) >
ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
ath10k_warn(ar,
"payload length %d exceeds max htc length: %zu\n",
if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
ath10k_warn(ar, "payload length %d exceeds max htc length: %zu\n",
le16_to_cpu(htc_hdr->len),
ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
ret = -ENOMEM;
@ -557,36 +564,37 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
ath10k_warn(ar,
"rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
ath10k_warn(ar, "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
htc_hdr->eid, htc_hdr->flags,
le16_to_cpu(htc_hdr->len));
ret = -EINVAL;
goto err;
}
if (htc_hdr->flags & ATH10K_HTC_FLAG_BUNDLE_MASK) {
if (ath10k_htc_get_bundle_count(
ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) {
/* HTC header indicates that every packet to follow
* has the same padded length so that it can be
* optimally fetched as a full bundle.
*/
size_t bndl_cnt;
ret = ath10k_sdio_mbox_alloc_pkt_bundle(ar,
&ar_sdio->rx_pkts[i],
htc_hdr,
full_len,
act_len,
&bndl_cnt);
ret = ath10k_sdio_mbox_alloc_bundle(ar,
&ar_sdio->rx_pkts[pkt_cnt],
htc_hdr,
full_len,
act_len,
&bndl_cnt);
if (ret) {
ath10k_warn(ar, "alloc_bundle error %d\n", ret);
ath10k_warn(ar, "failed to allocate a bundle: %d\n",
ret);
goto err;
}
n_lookaheads += bndl_cnt;
i += bndl_cnt;
/*Next buffer will be the last in the bundle */
pkt_cnt += bndl_cnt;
/* next buffer will be the last in the bundle */
last_in_bundle = true;
}
@ -597,7 +605,7 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i],
ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt],
act_len,
full_len,
last_in_bundle,
@ -606,9 +614,11 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
goto err;
}
pkt_cnt++;
}
ar_sdio->n_rx_pkts = i;
ar_sdio->n_rx_pkts = pkt_cnt;
return 0;
@ -622,59 +632,74 @@ err:
return ret;
}
static int ath10k_sdio_mbox_rx_packet(struct ath10k *ar,
struct ath10k_sdio_rx_data *pkt)
static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0];
struct sk_buff *skb = pkt->skb;
struct ath10k_htc_hdr *htc_hdr;
int ret;
ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
skb->data, pkt->alloc_len);
if (ret)
goto out;
/* Update actual length. The original length may be incorrect,
* as the FW will bundle multiple packets as long as their sizes
* fit within the same aligned length (pkt->alloc_len).
*/
htc_hdr = (struct ath10k_htc_hdr *)skb->data;
pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
if (pkt->act_len > pkt->alloc_len) {
ath10k_warn(ar, "rx packet too large (%zu > %zu)\n",
pkt->act_len, pkt->alloc_len);
ret = -EMSGSIZE;
goto out;
if (ret) {
ar_sdio->n_rx_pkts = 0;
ath10k_sdio_mbox_free_rx_pkt(pkt);
return ret;
}
htc_hdr = (struct ath10k_htc_hdr *)skb->data;
pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
skb_put(skb, pkt->act_len);
out:
pkt->status = ret;
return ret;
}
static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
static int ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k *ar)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
struct ath10k_sdio_rx_data *pkt;
struct ath10k_htc_hdr *htc_hdr;
int ret, i;
u32 pkt_offset, virt_pkt_len;
virt_pkt_len = 0;
for (i = 0; i < ar_sdio->n_rx_pkts; i++)
virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len;
if (virt_pkt_len > ATH10K_SDIO_VSG_BUF_SIZE) {
ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len);
ret = -E2BIG;
goto err;
}
ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
ar_sdio->vsg_buffer, virt_pkt_len);
if (ret) {
ath10k_warn(ar, "failed to read bundle packets: %d", ret);
goto err;
}
pkt_offset = 0;
for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
ret = ath10k_sdio_mbox_rx_packet(ar,
&ar_sdio->rx_pkts[i]);
if (ret)
goto err;
pkt = &ar_sdio->rx_pkts[i];
htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset);
pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
skb_put_data(pkt->skb, htc_hdr, pkt->act_len);
pkt_offset += pkt->alloc_len;
}
return 0;
err:
/* Free all packets that was not successfully fetched. */
for (; i < ar_sdio->n_rx_pkts; i++)
for (i = 0; i < ar_sdio->n_rx_pkts; i++)
ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
ar_sdio->n_rx_pkts = 0;
return ret;
}
@ -717,7 +742,10 @@ static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
*/
*done = false;
ret = ath10k_sdio_mbox_rx_fetch(ar);
if (ar_sdio->n_rx_pkts > 1)
ret = ath10k_sdio_mbox_rx_fetch_bundle(ar);
else
ret = ath10k_sdio_mbox_rx_fetch(ar);
/* Process fetched packets. This will potentially update
* n_lookaheads depending on if the packets contain lookahead
@ -1293,6 +1321,31 @@ static void __ath10k_sdio_write_async(struct ath10k *ar,
ath10k_sdio_free_bus_req(ar, req);
}
/* To improve throughput use workqueue to deliver packets to HTC layer,
* this way SDIO bus is utilised much better.
*/
static void ath10k_rx_indication_async_work(struct work_struct *work)
{
struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
async_work_rx);
struct ath10k *ar = ar_sdio->ar;
struct ath10k_htc_ep *ep;
struct ath10k_skb_rxcb *cb;
struct sk_buff *skb;
while (true) {
skb = skb_dequeue(&ar_sdio->rx_head);
if (!skb)
break;
cb = ATH10K_SKB_RXCB(skb);
ep = &ar->htc.endpoint[cb->eid];
ep->ep_ops.ep_rx_complete(ar, skb);
}
if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
napi_schedule(&ar->napi);
}
static void ath10k_sdio_write_async_work(struct work_struct *work)
{
struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
@ -1681,6 +1734,8 @@ static int ath10k_sdio_hif_start(struct ath10k *ar)
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
int ret;
napi_enable(&ar->napi);
/* Sleep 20 ms before HIF interrupts are disabled.
* This will give target plenty of time to process the BMI done
* request before interrupts are disabled.
@ -1805,13 +1860,16 @@ static void ath10k_sdio_hif_stop(struct ath10k *ar)
}
spin_unlock_bh(&ar_sdio->wr_async_lock);
napi_synchronize(&ar->napi);
napi_disable(&ar->napi);
}
#ifdef CONFIG_PM
static int ath10k_sdio_hif_suspend(struct ath10k *ar)
{
return -EOPNOTSUPP;
return 0;
}
static int ath10k_sdio_hif_resume(struct ath10k *ar)
@ -1961,7 +2019,26 @@ static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
*/
static int ath10k_sdio_pm_suspend(struct device *device)
{
return 0;
struct sdio_func *func = dev_to_sdio_func(device);
struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
struct ath10k *ar = ar_sdio->ar;
mmc_pm_flag_t pm_flag, pm_caps;
int ret;
if (!device_may_wakeup(ar->dev))
return 0;
pm_flag = MMC_PM_KEEP_POWER;
ret = sdio_set_host_pm_flags(func, pm_flag);
if (ret) {
pm_caps = sdio_get_host_pm_caps(func);
ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n",
pm_flag, pm_caps, ret);
return ret;
}
return ret;
}
static int ath10k_sdio_pm_resume(struct device *device)
@ -1980,6 +2057,20 @@ static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,
#endif /* CONFIG_PM_SLEEP */
static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget)
{
struct ath10k *ar = container_of(ctx, struct ath10k, napi);
int done;
done = ath10k_htt_rx_hl_indication(ar, budget);
ath10k_dbg(ar, ATH10K_DBG_SDIO, "napi poll: done: %d, budget:%d\n", done, budget);
if (done < budget)
napi_complete_done(ctx, done);
return done;
}
static int ath10k_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
@ -2005,6 +2096,9 @@ static int ath10k_sdio_probe(struct sdio_func *func,
return -ENOMEM;
}
netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll,
ATH10K_NAPI_BUDGET);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
func->num, func->vendor, func->device,
@ -2020,6 +2114,12 @@ static int ath10k_sdio_probe(struct sdio_func *func,
goto err_core_destroy;
}
ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL);
if (!ar_sdio->vsg_buffer) {
ret = -ENOMEM;
goto err_core_destroy;
}
ar_sdio->irq_data.irq_en_reg =
devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
GFP_KERNEL);
@ -2028,7 +2128,7 @@ static int ath10k_sdio_probe(struct sdio_func *func,
goto err_core_destroy;
}
ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_CMDBUF_SIZE, GFP_KERNEL);
ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL);
if (!ar_sdio->bmi_buf) {
ret = -ENOMEM;
goto err_core_destroy;
@ -2057,6 +2157,9 @@ static int ath10k_sdio_probe(struct sdio_func *func,
for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
skb_queue_head_init(&ar_sdio->rx_head);
INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work);
dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, id->device);
switch (dev_id_base) {
case QCA_MANUFACTURER_ID_AR6005_BASE:
@ -2080,6 +2183,8 @@ static int ath10k_sdio_probe(struct sdio_func *func,
bus_params.chip_id = 0;
bus_params.hl_msdu_ids = true;
ar->hw->max_mtu = ETH_DATA_LEN;
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
@ -2106,6 +2211,9 @@ static void ath10k_sdio_remove(struct sdio_func *func)
func->num, func->vendor, func->device);
ath10k_core_unregister(ar);
netif_napi_del(&ar->napi);
ath10k_core_destroy(ar);
flush_workqueue(ar_sdio->workqueue);

View File

@ -89,10 +89,10 @@
* to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE).
*
* in this case the driver must allocate
* (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE) skb's.
* (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2) skb's.
*/
#define ATH10K_SDIO_MAX_RX_MSGS \
(HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE)
(HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2)
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
@ -126,7 +126,6 @@ struct ath10k_sdio_rx_data {
bool part_of_bundle;
bool last_in_bundle;
bool trailer_only;
int status;
};
struct ath10k_sdio_irq_proc_regs {
@ -138,8 +137,8 @@ struct ath10k_sdio_irq_proc_regs {
u8 rx_lookahead_valid;
u8 host_int_status2;
u8 gmbox_rx_avail;
__le32 rx_lookahead[2];
__le32 rx_gmbox_lookahead_alias[2];
__le32 rx_lookahead[2 * ATH10K_HIF_MBOX_NUM_MAX];
__le32 int_status_enable;
};
struct ath10k_sdio_irq_enable_regs {
@ -187,6 +186,9 @@ struct ath10k_sdio {
struct ath10k_sdio_bus_request bus_req[ATH10K_SDIO_BUS_REQUEST_MAX_NUM];
/* free list of bus requests */
struct list_head bus_req_freeq;
struct sk_buff_head rx_head;
/* protects access to bus_req_freeq */
spinlock_t lock;
@ -196,6 +198,13 @@ struct ath10k_sdio {
struct ath10k *ar;
struct ath10k_sdio_irq_data irq_data;
/* temporary buffer for sdio read.
* It is allocated when probe, and used for receive bundled packets,
* the read for bundled packets is not parallel, so it does not need
* protected.
*/
u8 *vsg_buffer;
/* temporary buffer for BMI requests */
u8 *bmi_buf;
@ -206,6 +215,8 @@ struct ath10k_sdio {
struct list_head wr_asyncq;
/* protects access to wr_asyncq */
spinlock_t wr_async_lock;
struct work_struct async_work_rx;
};
static inline struct ath10k_sdio *ath10k_sdio_priv(struct ath10k *ar)

View File

@ -1563,13 +1563,16 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
ret = ath10k_qmi_init(ar, msa_size);
if (ret) {
ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
goto err_core_destroy;
goto err_power_off;
}
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
return 0;
err_power_off:
ath10k_hw_power_off(ar);
err_free_irq:
ath10k_snoc_free_irq(ar);

View File

@ -841,7 +841,7 @@ static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
const struct wmi_tlv_mgmt_rx_ev *ev;
const u8 *frame;
u32 msdu_len;
int ret;
int ret, i;
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
@ -865,6 +865,9 @@ static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
arg->phy_mode = ev->phy_mode;
arg->rate = ev->rate;
for (i = 0; i < ARRAY_SIZE(ev->rssi); i++)
arg->rssi[i] = ev->rssi[i];
msdu_len = __le32_to_cpu(arg->buf_len);
if (skb->len < (frame - skb->data) + msdu_len) {
@ -3707,6 +3710,7 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
struct wmi_tlv *tlv;
struct sk_buff *skb;
__le32 *channel_list;
u16 tlv_len;
size_t len;
void *ptr;
u32 i;
@ -3764,10 +3768,12 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
/* nlo_configured_parameters(nlo_list) */
cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
WMI_NLO_MAX_SSIDS));
tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
sizeof(struct nlo_configured_parameters);
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
tlv->len = __cpu_to_le16(len);
tlv->len = __cpu_to_le16(tlv_len);
ptr += sizeof(*tlv);
nlo_list = ptr;

View File

@ -2463,10 +2463,10 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
u32 rx_status;
u32 channel;
u32 phy_mode;
u32 snr;
u32 snr, rssi;
u32 rate;
u16 fc;
int ret;
int ret, i;
ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
if (ret) {
@ -2525,6 +2525,20 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
status->freq = ieee80211_channel_to_frequency(channel, status->band);
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
BUILD_BUG_ON(ARRAY_SIZE(status->chain_signal) != ARRAY_SIZE(arg.rssi));
for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
status->chains &= ~BIT(i);
rssi = __le32_to_cpu(arg.rssi[i]);
ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt rssi[%d]:%d\n", i, arg.rssi[i]);
if (rssi != ATH10K_INVALID_RSSI && rssi != 0) {
status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + rssi;
status->chains |= BIT(i);
}
}
status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
hdr = (struct ieee80211_hdr *)skb->data;

View File

@ -6786,6 +6786,7 @@ struct wmi_peer_delete_resp_ev_arg {
struct wmi_mac_addr peer_addr;
};
#define WMI_MGMT_RX_NUM_RSSI 4
struct wmi_mgmt_rx_ev_arg {
__le32 channel;
__le32 snr;
@ -6794,6 +6795,7 @@ struct wmi_mgmt_rx_ev_arg {
__le32 buf_len;
__le32 status; /* %WMI_RX_STATUS_ */
struct wmi_mgmt_rx_ext_info ext_info;
__le32 rssi[WMI_MGMT_RX_NUM_RSSI];
};
struct wmi_ch_info_ev_arg {

View File

@ -0,0 +1,35 @@
# SPDX-License-Identifier: BSD-3-Clause-Clear
config ATH11K
tristate "Qualcomm Technologies 802.11ax chipset support"
depends on MAC80211 && HAS_DMA
depends on REMOTEPROC
depends on ARCH_QCOM || COMPILE_TEST
select ATH_COMMON
select QCOM_QMI_HELPERS
---help---
This module adds support for Qualcomm Technologies 802.11ax family of
chipsets.
If you choose to build a module, it'll be called ath11k.
config ATH11K_DEBUG
bool "QCA ath11k debugging"
depends on ATH11K
---help---
Enables debug support
If unsure, say Y to make it easier to debug problems.
config ATH11K_DEBUGFS
bool "QCA ath11k debugfs support"
depends on ATH11K && DEBUG_FS
---help---
Enable ath11k debugfs support
If unsure, say Y to make it easier to debug problems.
config ATH11K_TRACING
bool "ath11k tracing support"
depends on ATH11K && EVENT_TRACING
---help---
Select this to use ath11k tracing infrastructure.

View File

@ -0,0 +1,26 @@
# SPDX-License-Identifier: BSD-3-Clause-Clear
obj-$(CONFIG_ATH11K) += ath11k.o
ath11k-y += core.o \
hal.o \
hal_tx.o \
hal_rx.o \
ahb.o \
wmi.o \
mac.o \
reg.o \
htc.o \
qmi.o \
dp.o \
dp_tx.o \
dp_rx.o \
debug.o \
ce.o \
peer.o
ath11k-$(CONFIG_ATH11K_DEBUGFS) += debug_htt_stats.o
ath11k-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#ifndef ATH11K_AHB_H
#define ATH11K_AHB_H
#include "core.h"
#define ATH11K_AHB_RECOVERY_TIMEOUT (3 * HZ)
struct ath11k_base;
static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
{
return ioread32(ab->mem + offset);
}
static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
{
iowrite32(value, ab->mem + offset);
}
void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab);
void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab);
int ath11k_ahb_start(struct ath11k_base *ab);
void ath11k_ahb_stop(struct ath11k_base *ab);
int ath11k_ahb_power_up(struct ath11k_base *ab);
void ath11k_ahb_power_down(struct ath11k_base *ab);
int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe);
int ath11k_ahb_init(void);
void ath11k_ahb_exit(void);
#endif

View File

@ -0,0 +1,805 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include "dp_rx.h"
#include "debug.h"
static const struct ce_attr host_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 16,
.src_sz_max = 2048,
.dest_nentries = 0,
},
/* CE1: target->host HTT + HTC control */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath11k_htc_rx_completion_handler,
},
/* CE2: target->host WMI */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath11k_htc_rx_completion_handler,
},
/* CE3: host->target WMI (mac0) */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
},
/* CE4: host->target HTT */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 2048,
.src_sz_max = 256,
.dest_nentries = 0,
},
/* CE5: target->host pktlog */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
},
/* CE6: target autonomous hif_memcpy */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
/* CE7: host->target WMI (mac1) */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
},
/* CE8: target autonomous hif_memcpy */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
/* CE9: host->target WMI (mac2) */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
},
/* CE10: target->host HTT */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath11k_htc_rx_completion_handler,
},
/* CE11: Not used */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
};
static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
struct sk_buff *skb, dma_addr_t paddr)
{
struct ath11k_base *ab = pipe->ab;
struct ath11k_ce_ring *ring = pipe->dest_ring;
struct hal_srng *srng;
unsigned int write_index;
unsigned int nentries_mask = ring->nentries_mask;
u32 *desc;
int ret;
lockdep_assert_held(&ab->ce.ce_lock);
write_index = ring->write_index;
srng = &ab->hal.srng_list[ring->hal_ring_id];
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
ret = -ENOSPC;
goto exit;
}
desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
if (!desc) {
ret = -ENOSPC;
goto exit;
}
ath11k_hal_ce_dst_set_desc(desc, paddr);
ring->skb[write_index] = skb;
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
ring->write_index = write_index;
pipe->rx_buf_needed--;
ret = 0;
exit:
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return ret;
}
static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
{
struct ath11k_base *ab = pipe->ab;
struct sk_buff *skb;
dma_addr_t paddr;
int ret = 0;
if (!(pipe->dest_ring || pipe->status_ring))
return 0;
spin_lock_bh(&ab->ce.ce_lock);
while (pipe->rx_buf_needed) {
skb = dev_alloc_skb(pipe->buf_sz);
if (!skb) {
ret = -ENOMEM;
goto exit;
}
WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
paddr = dma_map_single(ab->dev, skb->data,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ab->dev, paddr))) {
ath11k_warn(ab, "failed to dma map ce rx buf\n");
dev_kfree_skb_any(skb);
ret = -EIO;
goto exit;
}
ATH11K_SKB_RXCB(skb)->paddr = paddr;
ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
if (ret) {
ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
dma_unmap_single(ab->dev, paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
goto exit;
}
}
exit:
spin_unlock_bh(&ab->ce.ce_lock);
return ret;
}
static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
struct sk_buff **skb, int *nbytes)
{
struct ath11k_base *ab = pipe->ab;
struct hal_srng *srng;
unsigned int sw_index;
unsigned int nentries_mask;
u32 *desc;
int ret = 0;
spin_lock_bh(&ab->ce.ce_lock);
sw_index = pipe->dest_ring->sw_index;
nentries_mask = pipe->dest_ring->nentries_mask;
srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
if (!desc) {
ret = -EIO;
goto err;
}
*nbytes = ath11k_hal_ce_dst_status_get_length(desc);
if (*nbytes == 0) {
ret = -EIO;
goto err;
}
*skb = pipe->dest_ring->skb[sw_index];
pipe->dest_ring->skb[sw_index] = NULL;
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
pipe->dest_ring->sw_index = sw_index;
pipe->rx_buf_needed++;
err:
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
spin_unlock_bh(&ab->ce.ce_lock);
return ret;
}
static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
{
struct ath11k_base *ab = pipe->ab;
struct sk_buff *skb;
struct sk_buff_head list;
unsigned int nbytes, max_nbytes;
int ret;
__skb_queue_head_init(&list);
while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
max_nbytes = skb->len + skb_tailroom(skb);
dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
max_nbytes, DMA_FROM_DEVICE);
if (unlikely(max_nbytes < nbytes)) {
ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
nbytes, max_nbytes);
dev_kfree_skb_any(skb);
continue;
}
skb_put(skb, nbytes);
__skb_queue_tail(&list, skb);
}
while ((skb = __skb_dequeue(&list))) {
ath11k_dbg(ab, ATH11K_DBG_AHB, "rx ce pipe %d len %d\n",
pipe->pipe_num, skb->len);
pipe->recv_cb(ab, skb);
}
ret = ath11k_ce_rx_post_pipe(pipe);
if (ret && ret != -ENOSPC) {
ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
pipe->pipe_num, ret);
mod_timer(&ab->rx_replenish_retry,
jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
}
}
static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe)
{
struct ath11k_base *ab = pipe->ab;
struct hal_srng *srng;
unsigned int sw_index;
unsigned int nentries_mask;
struct sk_buff *skb;
u32 *desc;
spin_lock_bh(&ab->ce.ce_lock);
sw_index = pipe->src_ring->sw_index;
nentries_mask = pipe->src_ring->nentries_mask;
srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
desc = ath11k_hal_srng_src_reap_next(ab, srng);
if (!desc) {
skb = ERR_PTR(-EIO);
goto err_unlock;
}
skb = pipe->src_ring->skb[sw_index];
pipe->src_ring->skb[sw_index] = NULL;
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
pipe->src_ring->sw_index = sw_index;
err_unlock:
spin_unlock_bh(&srng->lock);
spin_unlock_bh(&ab->ce.ce_lock);
return skb;
}
static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe)
{
struct ath11k_base *ab = pipe->ab;
struct sk_buff *skb;
while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
if (!skb)
continue;
dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
}
static int ath11k_ce_init_ring(struct ath11k_base *ab,
struct ath11k_ce_ring *ce_ring,
int ce_id, enum hal_ring_type type)
{
struct hal_srng_params params = { 0 };
int ret;
params.ring_base_paddr = ce_ring->base_addr_ce_space;
params.ring_base_vaddr = ce_ring->base_addr_owner_space;
params.num_entries = ce_ring->nentries;
switch (type) {
case HAL_CE_SRC:
if (!(CE_ATTR_DIS_INTR & host_ce_config_wlan[ce_id].flags))
params.intr_batch_cntr_thres_entries = 1;
break;
case HAL_CE_DST:
params.max_buffer_len = host_ce_config_wlan[ce_id].src_sz_max;
if (!(host_ce_config_wlan[ce_id].flags & CE_ATTR_DIS_INTR)) {
params.intr_timer_thres_us = 1024;
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
params.low_threshold = ce_ring->nentries - 3;
}
break;
case HAL_CE_DST_STATUS:
if (!(host_ce_config_wlan[ce_id].flags & CE_ATTR_DIS_INTR)) {
params.intr_batch_cntr_thres_entries = 1;
params.intr_timer_thres_us = 0x1000;
}
break;
default:
ath11k_warn(ab, "Invalid CE ring type %d\n", type);
return -EINVAL;
}
/* TODO: Init other params needed by HAL to init the ring */
ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, &params);
if (ret < 0) {
ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
ret, ce_id);
return ret;
}
ce_ring->hal_ring_id = ret;
return 0;
}
static struct ath11k_ce_ring *
ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
{
struct ath11k_ce_ring *ce_ring;
dma_addr_t base_addr;
ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
if (ce_ring == NULL)
return ERR_PTR(-ENOMEM);
ce_ring->nentries = nentries;
ce_ring->nentries_mask = nentries - 1;
/* Legacy platforms that do not support cache
* coherent DMA are unsupported
*/
ce_ring->base_addr_owner_space_unaligned =
dma_alloc_coherent(ab->dev,
nentries * desc_sz + CE_DESC_RING_ALIGN,
&base_addr, GFP_KERNEL);
if (!ce_ring->base_addr_owner_space_unaligned) {
kfree(ce_ring);
return ERR_PTR(-ENOMEM);
}
ce_ring->base_addr_ce_space_unaligned = base_addr;
ce_ring->base_addr_owner_space = PTR_ALIGN(
ce_ring->base_addr_owner_space_unaligned,
CE_DESC_RING_ALIGN);
ce_ring->base_addr_ce_space = ALIGN(
ce_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
return ce_ring;
}
static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
{
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
const struct ce_attr *attr = &host_ce_config_wlan[ce_id];
int nentries;
int desc_sz;
pipe->attr_flags = attr->flags;
if (attr->src_nentries) {
pipe->send_cb = ath11k_ce_send_done_cb;
nentries = roundup_pow_of_two(attr->src_nentries);
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
pipe->src_ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
if (!pipe->src_ring)
return -ENOMEM;
}
if (attr->dest_nentries) {
pipe->recv_cb = attr->recv_cb;
nentries = roundup_pow_of_two(attr->dest_nentries);
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
pipe->dest_ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
if (!pipe->dest_ring)
return -ENOMEM;
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
pipe->status_ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
if (!pipe->status_ring)
return -ENOMEM;
}
return 0;
}
void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
{
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
if (pipe->send_cb)
pipe->send_cb(pipe);
if (pipe->recv_cb)
ath11k_ce_recv_process_cb(pipe);
}
void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
{
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
pipe->send_cb(pipe);
}
int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
u16 transfer_id)
{
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
struct hal_srng *srng;
u32 *desc;
unsigned int write_index, sw_index;
unsigned int nentries_mask;
int ret = 0;
u8 byte_swap_data = 0;
int num_used;
/* Check if some entries could be regained by handling tx completion if
* the CE has interrupts disabled and the used entries is more than the
* defined usage threshold.
*/
if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
spin_lock_bh(&ab->ce.ce_lock);
write_index = pipe->src_ring->write_index;
sw_index = pipe->src_ring->sw_index;
if (write_index >= sw_index)
num_used = write_index - sw_index;
else
num_used = pipe->src_ring->nentries - sw_index +
write_index;
spin_unlock_bh(&ab->ce.ce_lock);
if (num_used > ATH11K_CE_USAGE_THRESHOLD)
ath11k_ce_poll_send_completed(ab, pipe->pipe_num);
}
if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
return -ESHUTDOWN;
spin_lock_bh(&ab->ce.ce_lock);
write_index = pipe->src_ring->write_index;
nentries_mask = pipe->src_ring->nentries_mask;
srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
ath11k_hal_srng_access_end(ab, srng);
ret = -ENOBUFS;
goto err_unlock;
}
desc = ath11k_hal_srng_src_get_next_reaped(ab, srng);
if (!desc) {
ath11k_hal_srng_access_end(ab, srng);
ret = -ENOBUFS;
goto err_unlock;
}
if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
byte_swap_data = 1;
ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr,
skb->len, transfer_id, byte_swap_data);
pipe->src_ring->skb[write_index] = skb;
pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
write_index);
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
spin_unlock_bh(&ab->ce.ce_lock);
return 0;
err_unlock:
spin_unlock_bh(&srng->lock);
spin_unlock_bh(&ab->ce.ce_lock);
return ret;
}
static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
{
struct ath11k_base *ab = pipe->ab;
struct ath11k_ce_ring *ring = pipe->dest_ring;
struct sk_buff *skb;
int i;
if (!(ring && pipe->buf_sz))
return;
for (i = 0; i < ring->nentries; i++) {
skb = ring->skb[i];
if (!skb)
continue;
ring->skb[i] = NULL;
dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
}
void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *pipe;
int pipe_num;
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe = &ab->ce.ce_pipe[pipe_num];
ath11k_ce_rx_pipe_cleanup(pipe);
/* Cleanup any src CE's which have interrupts disabled */
ath11k_ce_poll_send_completed(ab, pipe_num);
/* NOTE: Should we also clean up tx buffer in all pipes? */
}
}
void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *pipe;
int i;
int ret;
for (i = 0; i < CE_COUNT; i++) {
pipe = &ab->ce.ce_pipe[i];
ret = ath11k_ce_rx_post_pipe(pipe);
if (ret) {
if (ret == -ENOSPC)
continue;
ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
i, ret);
mod_timer(&ab->rx_replenish_retry,
jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
return;
}
}
}
void ath11k_ce_rx_replenish_retry(struct timer_list *t)
{
struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry);
ath11k_ce_rx_post_buf(ab);
}
int ath11k_ce_init_pipes(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *pipe;
int i;
int ret;
for (i = 0; i < CE_COUNT; i++) {
pipe = &ab->ce.ce_pipe[i];
if (pipe->src_ring) {
ret = ath11k_ce_init_ring(ab, pipe->src_ring, i,
HAL_CE_SRC);
if (ret) {
ath11k_warn(ab, "failed to init src ring: %d\n",
ret);
/* Should we clear any partial init */
return ret;
}
pipe->src_ring->write_index = 0;
pipe->src_ring->sw_index = 0;
}
if (pipe->dest_ring) {
ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i,
HAL_CE_DST);
if (ret) {
ath11k_warn(ab, "failed to init dest ring: %d\n",
ret);
/* Should we clear any partial init */
return ret;
}
pipe->rx_buf_needed = pipe->dest_ring->nentries ?
pipe->dest_ring->nentries - 2 : 0;
pipe->dest_ring->write_index = 0;
pipe->dest_ring->sw_index = 0;
}
if (pipe->status_ring) {
ret = ath11k_ce_init_ring(ab, pipe->status_ring, i,
HAL_CE_DST_STATUS);
if (ret) {
ath11k_warn(ab, "failed to init dest status ing: %d\n",
ret);
/* Should we clear any partial init */
return ret;
}
pipe->status_ring->write_index = 0;
pipe->status_ring->sw_index = 0;
}
}
return 0;
}
void ath11k_ce_free_pipes(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *pipe;
int desc_sz;
int i;
for (i = 0; i < CE_COUNT; i++) {
pipe = &ab->ce.ce_pipe[i];
if (pipe->src_ring) {
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
dma_free_coherent(ab->dev,
pipe->src_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
pipe->src_ring->base_addr_owner_space,
pipe->src_ring->base_addr_ce_space);
kfree(pipe->src_ring);
pipe->src_ring = NULL;
}
if (pipe->dest_ring) {
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
dma_free_coherent(ab->dev,
pipe->dest_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
pipe->dest_ring->base_addr_owner_space,
pipe->dest_ring->base_addr_ce_space);
kfree(pipe->dest_ring);
pipe->dest_ring = NULL;
}
if (pipe->status_ring) {
desc_sz =
ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
dma_free_coherent(ab->dev,
pipe->status_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
pipe->status_ring->base_addr_owner_space,
pipe->status_ring->base_addr_ce_space);
kfree(pipe->status_ring);
pipe->status_ring = NULL;
}
}
}
int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *pipe;
int i;
int ret;
const struct ce_attr *attr;
spin_lock_init(&ab->ce.ce_lock);
for (i = 0; i < CE_COUNT; i++) {
attr = &host_ce_config_wlan[i];
pipe = &ab->ce.ce_pipe[i];
pipe->pipe_num = i;
pipe->ab = ab;
pipe->buf_sz = attr->src_sz_max;
ret = ath11k_ce_alloc_pipe(ab, i);
if (ret) {
/* Free any parial successful allocation */
ath11k_ce_free_pipes(ab);
return ret;
}
}
return 0;
}
/* For Big Endian Host, Copy Engine byte_swap is enabled
* When Copy Engine does byte_swap, need to byte swap again for the
* Host to get/put buffer content in the correct byte order
*/
void ath11k_ce_byte_swap(void *mem, u32 len)
{
int i;
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
if (!mem)
return;
for (i = 0; i < (len / 4); i++) {
*(u32 *)mem = swab32(*(u32 *)mem);
mem += 4;
}
}
}
int ath11k_ce_get_attr_flags(int ce_id)
{
if (ce_id >= CE_COUNT)
return -EINVAL;
return host_ce_config_wlan[ce_id].flags;
}

View File

@ -0,0 +1,183 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#ifndef ATH11K_CE_H
#define ATH11K_CE_H
#define CE_COUNT 12
/* Byte swap data words */
#define CE_ATTR_BYTE_SWAP_DATA 2
/* no interrupt on copy completion */
#define CE_ATTR_DIS_INTR 8
/* Host software's Copy Engine configuration. */
#ifdef __BIG_ENDIAN
#define CE_ATTR_FLAGS CE_ATTR_BYTE_SWAP_DATA
#else
#define CE_ATTR_FLAGS 0
#endif
/* Threshold to poll for tx completion in case of Interrupt disabled CE's */
#define ATH11K_CE_USAGE_THRESHOLD 32
void ath11k_ce_byte_swap(void *mem, u32 len);
/*
* Directions for interconnect pipe configuration.
* These definitions may be used during configuration and are shared
* between Host and Target.
*
* Pipe Directions are relative to the Host, so PIPEDIR_IN means
* "coming IN over air through Target to Host" as with a WiFi Rx operation.
* Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
* as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
* Target since things that are "PIPEDIR_OUT" are coming IN to the Target
* over the interconnect.
*/
#define PIPEDIR_NONE 0
#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
#define PIPEDIR_INOUT 3 /* bidirectional */
#define PIPEDIR_INOUT_H2H 4 /* bidirectional, host to host */
/* CE address/mask */
#define CE_HOST_IE_ADDRESS 0x00A1803C
#define CE_HOST_IE_2_ADDRESS 0x00A18040
#define CE_HOST_IE_3_ADDRESS CE_HOST_IE_ADDRESS
#define CE_HOST_IE_3_SHIFT 0xC
#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
#define ATH11K_CE_RX_POST_RETRY_JIFFIES 50
struct ath11k_base;
/*
* Establish a mapping between a service/direction and a pipe.
* Configuration information for a Copy Engine pipe and services.
* Passed from Host to Target through QMI message and must be in
* little endian format.
*/
struct service_to_pipe {
__le32 service_id;
__le32 pipedir;
__le32 pipenum;
};
/*
* Configuration information for a Copy Engine pipe.
* Passed from Host to Target through QMI message during startup (one per CE).
*
* NOTE: Structure is shared between Host software and Target firmware!
*/
struct ce_pipe_config {
__le32 pipenum;
__le32 pipedir;
__le32 nentries;
__le32 nbytes_max;
__le32 flags;
__le32 reserved;
};
struct ce_attr {
/* CE_ATTR_* values */
unsigned int flags;
/* #entries in source ring - Must be a power of 2 */
unsigned int src_nentries;
/*
* Max source send size for this CE.
* This is also the minimum size of a destination buffer.
*/
unsigned int src_sz_max;
/* #entries in destination ring - Must be a power of 2 */
unsigned int dest_nentries;
void (*recv_cb)(struct ath11k_base *, struct sk_buff *);
};
#define CE_DESC_RING_ALIGN 8
struct ath11k_ce_ring {
/* Number of entries in this ring; must be power of 2 */
unsigned int nentries;
unsigned int nentries_mask;
/* For dest ring, this is the next index to be processed
* by software after it was/is received into.
*
* For src ring, this is the last descriptor that was sent
* and completion processed by software.
*
* Regardless of src or dest ring, this is an invariant
* (modulo ring size):
* write index >= read index >= sw_index
*/
unsigned int sw_index;
/* cached copy */
unsigned int write_index;
/* Start of DMA-coherent area reserved for descriptors */
/* Host address space */
void *base_addr_owner_space_unaligned;
/* CE address space */
u32 base_addr_ce_space_unaligned;
/* Actual start of descriptors.
* Aligned to descriptor-size boundary.
* Points into reserved DMA-coherent area, above.
*/
/* Host address space */
void *base_addr_owner_space;
/* CE address space */
u32 base_addr_ce_space;
/* HAL ring id */
u32 hal_ring_id;
/* keep last */
struct sk_buff *skb[0];
};
struct ath11k_ce_pipe {
struct ath11k_base *ab;
u16 pipe_num;
unsigned int attr_flags;
unsigned int buf_sz;
unsigned int rx_buf_needed;
void (*send_cb)(struct ath11k_ce_pipe *);
void (*recv_cb)(struct ath11k_base *, struct sk_buff *);
struct tasklet_struct intr_tq;
struct ath11k_ce_ring *src_ring;
struct ath11k_ce_ring *dest_ring;
struct ath11k_ce_ring *status_ring;
};
struct ath11k_ce {
struct ath11k_ce_pipe ce_pipe[CE_COUNT];
/* Protects rings of all ce pipes */
spinlock_t ce_lock;
};
void ath11k_ce_cleanup_pipes(struct ath11k_base *ab);
void ath11k_ce_rx_replenish_retry(struct timer_list *t);
void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id);
int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
u16 transfer_id);
void ath11k_ce_rx_post_buf(struct ath11k_base *ab);
int ath11k_ce_init_pipes(struct ath11k_base *ab);
int ath11k_ce_alloc_pipes(struct ath11k_base *ab);
void ath11k_ce_free_pipes(struct ath11k_base *ab);
int ath11k_ce_get_attr_flags(int ce_id);
void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id);
#endif

View File

@ -0,0 +1,795 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/remoteproc.h>
#include <linux/firmware.h>
#include "ahb.h"
#include "core.h"
#include "dp_tx.h"
#include "dp_rx.h"
#include "debug.h"
unsigned int ath11k_debug_mask;
module_param_named(debug_mask, ath11k_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
static const struct ath11k_hw_params ath11k_hw_params = {
.name = "ipq8074",
.fw = {
.dir = IPQ8074_FW_DIR,
.board_size = IPQ8074_MAX_BOARD_DATA_SZ,
.cal_size = IPQ8074_MAX_CAL_DATA_SZ,
},
};
/* Map from pdev index to hw mac index */
u8 ath11k_core_get_hw_mac_id(struct ath11k_base *ab, int pdev_idx)
{
switch (pdev_idx) {
case 0:
return 0;
case 1:
return 2;
case 2:
return 1;
default:
ath11k_warn(ab, "Invalid pdev idx %d\n", pdev_idx);
return ATH11K_INVALID_HW_MAC_ID;
}
}
static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
size_t name_len)
{
/* Note: bus is fixed to ahb. When other bus type supported,
* make it to dynamic.
*/
scnprintf(name, name_len,
"bus=ahb,qmi-chip-id=%d,qmi-board-id=%d",
ab->qmi.target.chip_id,
ab->qmi.target.board_id);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot using board name '%s'\n", name);
return 0;
}
static const struct firmware *ath11k_fetch_fw_file(struct ath11k_base *ab,
const char *dir,
const char *file)
{
char filename[100];
const struct firmware *fw;
int ret;
if (file == NULL)
return ERR_PTR(-ENOENT);
if (dir == NULL)
dir = ".";
snprintf(filename, sizeof(filename), "%s/%s", dir, file);
ret = firmware_request_nowarn(&fw, filename, ab->dev);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot fw request '%s': %d\n",
filename, ret);
if (ret)
return ERR_PTR(ret);
ath11k_warn(ab, "Downloading BDF: %s, size: %zu\n",
filename, fw->size);
return fw;
}
void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
{
if (!IS_ERR(bd->fw))
release_firmware(bd->fw);
memset(bd, 0, sizeof(*bd));
}
static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab,
struct ath11k_board_data *bd,
const void *buf, size_t buf_len,
const char *boardname,
int bd_ie_type)
{
const struct ath11k_fw_ie *hdr;
bool name_match_found;
int ret, board_ie_id;
size_t board_ie_len;
const void *board_ie_data;
name_match_found = false;
/* go through ATH11K_BD_IE_BOARD_ elements */
while (buf_len > sizeof(struct ath11k_fw_ie)) {
hdr = buf;
board_ie_id = le32_to_cpu(hdr->id);
board_ie_len = le32_to_cpu(hdr->len);
board_ie_data = hdr->data;
buf_len -= sizeof(*hdr);
buf += sizeof(*hdr);
if (buf_len < ALIGN(board_ie_len, 4)) {
ath11k_err(ab, "invalid ATH11K_BD_IE_BOARD length: %zu < %zu\n",
buf_len, ALIGN(board_ie_len, 4));
ret = -EINVAL;
goto out;
}
switch (board_ie_id) {
case ATH11K_BD_IE_BOARD_NAME:
ath11k_dbg_dump(ab, ATH11K_DBG_BOOT, "board name", "",
board_ie_data, board_ie_len);
if (board_ie_len != strlen(boardname))
break;
ret = memcmp(board_ie_data, boardname, strlen(boardname));
if (ret)
break;
name_match_found = true;
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"boot found match for name '%s'",
boardname);
break;
case ATH11K_BD_IE_BOARD_DATA:
if (!name_match_found)
/* no match found */
break;
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"boot found board data for '%s'", boardname);
bd->data = board_ie_data;
bd->len = board_ie_len;
ret = 0;
goto out;
default:
ath11k_warn(ab, "unknown ATH11K_BD_IE_BOARD found: %d\n",
board_ie_id);
break;
}
/* jump over the padding */
board_ie_len = ALIGN(board_ie_len, 4);
buf_len -= board_ie_len;
buf += board_ie_len;
}
/* no match found */
ret = -ENOENT;
out:
return ret;
}
static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
struct ath11k_board_data *bd,
const char *boardname)
{
size_t len, magic_len;
const u8 *data;
char *filename = ATH11K_BOARD_API2_FILE;
size_t ie_len;
struct ath11k_fw_ie *hdr;
int ret, ie_id;
if (!bd->fw)
bd->fw = ath11k_fetch_fw_file(ab,
ab->hw_params.fw.dir,
filename);
if (IS_ERR(bd->fw))
return PTR_ERR(bd->fw);
data = bd->fw->data;
len = bd->fw->size;
/* magic has extra null byte padded */
magic_len = strlen(ATH11K_BOARD_MAGIC) + 1;
if (len < magic_len) {
ath11k_err(ab, "failed to find magic value in %s/%s, file too short: %zu\n",
ab->hw_params.fw.dir, filename, len);
ret = -EINVAL;
goto err;
}
if (memcmp(data, ATH11K_BOARD_MAGIC, magic_len)) {
ath11k_err(ab, "found invalid board magic\n");
ret = -EINVAL;
goto err;
}
/* magic is padded to 4 bytes */
magic_len = ALIGN(magic_len, 4);
if (len < magic_len) {
ath11k_err(ab, "failed: %s/%s too small to contain board data, len: %zu\n",
ab->hw_params.fw.dir, filename, len);
ret = -EINVAL;
goto err;
}
data += magic_len;
len -= magic_len;
while (len > sizeof(struct ath11k_fw_ie)) {
hdr = (struct ath11k_fw_ie *)data;
ie_id = le32_to_cpu(hdr->id);
ie_len = le32_to_cpu(hdr->len);
len -= sizeof(*hdr);
data = hdr->data;
if (len < ALIGN(ie_len, 4)) {
ath11k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
ie_id, ie_len, len);
return -EINVAL;
}
switch (ie_id) {
case ATH11K_BD_IE_BOARD:
ret = ath11k_core_parse_bd_ie_board(ab, bd, data,
ie_len,
boardname,
ATH11K_BD_IE_BOARD);
if (ret == -ENOENT)
/* no match found, continue */
break;
else if (ret)
/* there was an error, bail out */
goto err;
/* either found or error, so stop searching */
goto out;
}
/* jump over the padding */
ie_len = ALIGN(ie_len, 4);
len -= ie_len;
data += ie_len;
}
out:
if (!bd->data || !bd->len) {
ath11k_err(ab,
"failed to fetch board data for %s from %s/%s\n",
boardname, ab->hw_params.fw.dir, filename);
ret = -ENODATA;
goto err;
}
return 0;
err:
ath11k_core_free_bdf(ab, bd);
return ret;
}
static int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
struct ath11k_board_data *bd)
{
bd->fw = ath11k_fetch_fw_file(ab,
ab->hw_params.fw.dir,
ATH11K_DEFAULT_BOARD_FILE);
if (IS_ERR(bd->fw))
return PTR_ERR(bd->fw);
bd->data = bd->fw->data;
bd->len = bd->fw->size;
return 0;
}
#define BOARD_NAME_SIZE 100
int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
{
char boardname[BOARD_NAME_SIZE];
int ret;
ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
if (ret) {
ath11k_err(ab, "failed to create board name: %d", ret);
return ret;
}
ab->bd_api = 2;
ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname);
if (!ret)
goto success;
ab->bd_api = 1;
ret = ath11k_core_fetch_board_data_api_1(ab, bd);
if (ret) {
ath11k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n",
ab->hw_params.fw.dir);
return ret;
}
success:
ath11k_dbg(ab, ATH11K_DBG_BOOT, "using board api %d\n", ab->bd_api);
return 0;
}
static void ath11k_core_stop(struct ath11k_base *ab)
{
if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
ath11k_qmi_firmware_stop(ab);
ath11k_ahb_stop(ab);
ath11k_wmi_detach(ab);
ath11k_dp_pdev_reo_cleanup(ab);
/* De-Init of components as needed */
}
static int ath11k_core_soc_create(struct ath11k_base *ab)
{
int ret;
ret = ath11k_qmi_init_service(ab);
if (ret) {
ath11k_err(ab, "failed to initialize qmi :%d\n", ret);
return ret;
}
ret = ath11k_debug_soc_create(ab);
if (ret) {
ath11k_err(ab, "failed to create ath11k debugfs\n");
goto err_qmi_deinit;
}
ret = ath11k_ahb_power_up(ab);
if (ret) {
ath11k_err(ab, "failed to power up :%d\n", ret);
goto err_debugfs_reg;
}
return 0;
err_debugfs_reg:
ath11k_debug_soc_destroy(ab);
err_qmi_deinit:
ath11k_qmi_deinit_service(ab);
return ret;
}
static void ath11k_core_soc_destroy(struct ath11k_base *ab)
{
ath11k_debug_soc_destroy(ab);
ath11k_dp_free(ab);
ath11k_reg_free(ab);
ath11k_qmi_deinit_service(ab);
}
static int ath11k_core_pdev_create(struct ath11k_base *ab)
{
int ret;
ret = ath11k_debug_pdev_create(ab);
if (ret) {
ath11k_err(ab, "failed to create core pdev debugfs: %d\n", ret);
return ret;
}
ret = ath11k_mac_register(ab);
if (ret) {
ath11k_err(ab, "failed register the radio with mac80211: %d\n", ret);
goto err_pdev_debug;
}
ret = ath11k_dp_pdev_alloc(ab);
if (ret) {
ath11k_err(ab, "failed to attach DP pdev: %d\n", ret);
goto err_mac_unregister;
}
return 0;
err_mac_unregister:
ath11k_mac_unregister(ab);
err_pdev_debug:
ath11k_debug_pdev_destroy(ab);
return ret;
}
static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
{
ath11k_mac_unregister(ab);
ath11k_ahb_ext_irq_disable(ab);
ath11k_dp_pdev_free(ab);
ath11k_debug_pdev_destroy(ab);
}
static int ath11k_core_start(struct ath11k_base *ab,
enum ath11k_firmware_mode mode)
{
int ret;
ret = ath11k_qmi_firmware_start(ab, mode);
if (ret) {
ath11k_err(ab, "failed to attach wmi: %d\n", ret);
return ret;
}
ret = ath11k_wmi_attach(ab);
if (ret) {
ath11k_err(ab, "failed to attach wmi: %d\n", ret);
goto err_firmware_stop;
}
ret = ath11k_htc_init(ab);
if (ret) {
ath11k_err(ab, "failed to init htc: %d\n", ret);
goto err_wmi_detach;
}
ret = ath11k_ahb_start(ab);
if (ret) {
ath11k_err(ab, "failed to start HIF: %d\n", ret);
goto err_wmi_detach;
}
ret = ath11k_htc_wait_target(&ab->htc);
if (ret) {
ath11k_err(ab, "failed to connect to HTC: %d\n", ret);
goto err_hif_stop;
}
ret = ath11k_dp_htt_connect(&ab->dp);
if (ret) {
ath11k_err(ab, "failed to connect to HTT: %d\n", ret);
goto err_hif_stop;
}
ret = ath11k_wmi_connect(ab);
if (ret) {
ath11k_err(ab, "failed to connect wmi: %d\n", ret);
goto err_hif_stop;
}
ret = ath11k_htc_start(&ab->htc);
if (ret) {
ath11k_err(ab, "failed to start HTC: %d\n", ret);
goto err_hif_stop;
}
ret = ath11k_wmi_wait_for_service_ready(ab);
if (ret) {
ath11k_err(ab, "failed to receive wmi service ready event: %d\n",
ret);
goto err_hif_stop;
}
ret = ath11k_mac_allocate(ab);
if (ret) {
ath11k_err(ab, "failed to create new hw device with mac80211 :%d\n",
ret);
goto err_hif_stop;
}
ath11k_dp_pdev_pre_alloc(ab);
ret = ath11k_dp_pdev_reo_setup(ab);
if (ret) {
ath11k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
goto err_mac_destroy;
}
ret = ath11k_wmi_cmd_init(ab);
if (ret) {
ath11k_err(ab, "failed to send wmi init cmd: %d\n", ret);
goto err_reo_cleanup;
}
ret = ath11k_wmi_wait_for_unified_ready(ab);
if (ret) {
ath11k_err(ab, "failed to receive wmi unified ready event: %d\n",
ret);
goto err_reo_cleanup;
}
ret = ath11k_dp_tx_htt_h2t_ver_req_msg(ab);
if (ret) {
ath11k_err(ab, "failed to send htt version request message: %d\n",
ret);
goto err_reo_cleanup;
}
return 0;
err_reo_cleanup:
ath11k_dp_pdev_reo_cleanup(ab);
err_mac_destroy:
ath11k_mac_destroy(ab);
err_hif_stop:
ath11k_ahb_stop(ab);
err_wmi_detach:
ath11k_wmi_detach(ab);
err_firmware_stop:
ath11k_qmi_firmware_stop(ab);
return ret;
}
int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
{
int ret;
ret = ath11k_ce_init_pipes(ab);
if (ret) {
ath11k_err(ab, "failed to initialize CE: %d\n", ret);
return ret;
}
ret = ath11k_dp_alloc(ab);
if (ret) {
ath11k_err(ab, "failed to init DP: %d\n", ret);
return ret;
}
mutex_lock(&ab->core_lock);
ret = ath11k_core_start(ab, ATH11K_FIRMWARE_MODE_NORMAL);
if (ret) {
ath11k_err(ab, "failed to start core: %d\n", ret);
goto err_dp_free;
}
ret = ath11k_core_pdev_create(ab);
if (ret) {
ath11k_err(ab, "failed to create pdev core: %d\n", ret);
goto err_core_stop;
}
ath11k_ahb_ext_irq_enable(ab);
mutex_unlock(&ab->core_lock);
return 0;
err_core_stop:
ath11k_core_stop(ab);
ath11k_mac_destroy(ab);
err_dp_free:
ath11k_dp_free(ab);
mutex_unlock(&ab->core_lock);
return ret;
}
static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
{
int ret;
mutex_lock(&ab->core_lock);
ath11k_ahb_ext_irq_disable(ab);
ath11k_dp_pdev_free(ab);
ath11k_ahb_stop(ab);
ath11k_wmi_detach(ab);
ath11k_dp_pdev_reo_cleanup(ab);
mutex_unlock(&ab->core_lock);
ath11k_dp_free(ab);
ath11k_hal_srng_deinit(ab);
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
ret = ath11k_hal_srng_init(ab);
if (ret)
return ret;
clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
ret = ath11k_core_qmi_firmware_ready(ab);
if (ret)
goto err_hal_srng_deinit;
clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
return 0;
err_hal_srng_deinit:
ath11k_hal_srng_deinit(ab);
return ret;
}
void ath11k_core_halt(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
lockdep_assert_held(&ar->conf_mutex);
ar->num_created_vdevs = 0;
ath11k_mac_scan_finish(ar);
ath11k_mac_peer_cleanup_all(ar);
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
synchronize_rcu();
INIT_LIST_HEAD(&ar->arvifs);
idr_init(&ar->txmgmt_idr);
}
static void ath11k_core_restart(struct work_struct *work)
{
struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work);
struct ath11k *ar;
struct ath11k_pdev *pdev;
int i, ret = 0;
spin_lock_bh(&ab->base_lock);
ab->stats.fw_crash_counter++;
spin_unlock_bh(&ab->base_lock);
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (!ar || ar->state == ATH11K_STATE_OFF)
continue;
ieee80211_stop_queues(ar->hw);
ath11k_mac_drain_tx(ar);
complete(&ar->scan.started);
complete(&ar->scan.completed);
complete(&ar->peer_assoc_done);
complete(&ar->install_key_done);
complete(&ar->vdev_setup_done);
complete(&ar->bss_survey_done);
wake_up(&ar->dp.tx_empty_waitq);
idr_for_each(&ar->txmgmt_idr,
ath11k_mac_tx_mgmt_pending_free, ar);
idr_destroy(&ar->txmgmt_idr);
}
wake_up(&ab->wmi_sc.tx_credits_wq);
wake_up(&ab->peer_mapping_wq);
ret = ath11k_core_reconfigure_on_crash(ab);
if (ret) {
ath11k_err(ab, "failed to reconfigure driver on crash recovery\n");
return;
}
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (!ar || ar->state == ATH11K_STATE_OFF)
continue;
mutex_lock(&ar->conf_mutex);
switch (ar->state) {
case ATH11K_STATE_ON:
ar->state = ATH11K_STATE_RESTARTING;
ath11k_core_halt(ar);
ieee80211_restart_hw(ar->hw);
break;
case ATH11K_STATE_OFF:
ath11k_warn(ab,
"cannot restart radio %d that hasn't been started\n",
i);
break;
case ATH11K_STATE_RESTARTING:
break;
case ATH11K_STATE_RESTARTED:
ar->state = ATH11K_STATE_WEDGED;
/* fall through */
case ATH11K_STATE_WEDGED:
ath11k_warn(ab,
"device is wedged, will not restart radio %d\n", i);
break;
}
mutex_unlock(&ar->conf_mutex);
}
complete(&ab->driver_recovery);
}
int ath11k_core_init(struct ath11k_base *ab)
{
struct device *dev = ab->dev;
struct rproc *prproc;
phandle rproc_phandle;
int ret;
if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
ath11k_err(ab, "failed to get q6_rproc handle\n");
return -ENOENT;
}
prproc = rproc_get_by_phandle(rproc_phandle);
if (!prproc) {
ath11k_err(ab, "failed to get rproc\n");
return -EINVAL;
}
ab->tgt_rproc = prproc;
ab->hw_params = ath11k_hw_params;
ret = ath11k_core_soc_create(ab);
if (ret) {
ath11k_err(ab, "failed to create soc core: %d\n", ret);
return ret;
}
return 0;
}
void ath11k_core_deinit(struct ath11k_base *ab)
{
mutex_lock(&ab->core_lock);
ath11k_core_pdev_destroy(ab);
ath11k_core_stop(ab);
mutex_unlock(&ab->core_lock);
ath11k_ahb_power_down(ab);
ath11k_mac_destroy(ab);
ath11k_core_soc_destroy(ab);
}
void ath11k_core_free(struct ath11k_base *ab)
{
kfree(ab);
}
struct ath11k_base *ath11k_core_alloc(struct device *dev)
{
struct ath11k_base *ab;
ab = kzalloc(sizeof(*ab), GFP_KERNEL);
if (!ab)
return NULL;
init_completion(&ab->driver_recovery);
ab->workqueue = create_singlethread_workqueue("ath11k_wq");
if (!ab->workqueue)
goto err_sc_free;
mutex_init(&ab->core_lock);
spin_lock_init(&ab->base_lock);
INIT_LIST_HEAD(&ab->peers);
init_waitqueue_head(&ab->peer_mapping_wq);
init_waitqueue_head(&ab->wmi_sc.tx_credits_wq);
INIT_WORK(&ab->restart_work, ath11k_core_restart);
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
ab->dev = dev;
return ab;
err_sc_free:
kfree(ab);
return NULL;
}
static int __init ath11k_init(void)
{
int ret;
ret = ath11k_ahb_init();
if (ret)
printk(KERN_ERR "failed to register ath11k ahb driver: %d\n",
ret);
return ret;
}
module_init(ath11k_init);
static void __exit ath11k_exit(void)
{
ath11k_ahb_exit();
}
module_exit(ath11k_exit);
MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax wireless chip");
MODULE_LICENSE("Dual BSD/GPL");

View File

@ -0,0 +1,826 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#ifndef ATH11K_CORE_H
#define ATH11K_CORE_H
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/bitfield.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
#include "hal.h"
#include "dp.h"
#include "ce.h"
#include "mac.h"
#include "hw.h"
#include "hal_rx.h"
#include "reg.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
#define ATH11K_TX_MGMT_NUM_PENDING_MAX 512
#define ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI 64
/* Pending management packets threshold for dropping probe responses */
#define ATH11K_PRB_RSP_DROP_THRESHOLD ((ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI * 3) / 4)
#define ATH11K_INVALID_HW_MAC_ID 0xFF
enum ath11k_supported_bw {
ATH11K_BW_20 = 0,
ATH11K_BW_40 = 1,
ATH11K_BW_80 = 2,
ATH11K_BW_160 = 3,
};
enum wme_ac {
WME_AC_BE,
WME_AC_BK,
WME_AC_VI,
WME_AC_VO,
WME_NUM_AC
};
#define ATH11K_HT_MCS_MAX 7
#define ATH11K_VHT_MCS_MAX 9
#define ATH11K_HE_MCS_MAX 11
static inline enum wme_ac ath11k_tid_to_ac(u32 tid)
{
return (((tid == 0) || (tid == 3)) ? WME_AC_BE :
((tid == 1) || (tid == 2)) ? WME_AC_BK :
((tid == 4) || (tid == 5)) ? WME_AC_VI :
WME_AC_VO);
}
struct ath11k_skb_cb {
dma_addr_t paddr;
u8 eid;
struct ath11k *ar;
struct ieee80211_vif *vif;
} __packed;
struct ath11k_skb_rxcb {
dma_addr_t paddr;
bool is_first_msdu;
bool is_last_msdu;
bool is_continuation;
struct hal_rx_desc *rx_desc;
u8 err_rel_src;
u8 err_code;
u8 mac_id;
u8 unmapped;
};
enum ath11k_hw_rev {
ATH11K_HW_IPQ8074,
};
enum ath11k_firmware_mode {
/* the default mode, standard 802.11 functionality */
ATH11K_FIRMWARE_MODE_NORMAL,
/* factory tests etc */
ATH11K_FIRMWARE_MODE_FTM,
};
#define ATH11K_IRQ_NUM_MAX 52
#define ATH11K_EXT_IRQ_GRP_NUM_MAX 11
#define ATH11K_EXT_IRQ_NUM_MAX 16
extern const u8 ath11k_reo_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
extern const u8 ath11k_tx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
extern const u8 ath11k_rx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
extern const u8 ath11k_rx_err_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
extern const u8 ath11k_rx_wbm_rel_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
extern const u8 ath11k_rxdma2host_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
extern const u8 ath11k_host2rxdma_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
extern const u8 rx_mon_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
struct ath11k_ext_irq_grp {
struct ath11k_base *ab;
u32 irqs[ATH11K_EXT_IRQ_NUM_MAX];
u32 num_irq;
u32 grp_id;
struct napi_struct napi;
struct net_device napi_ndev;
/* Queue of pending packets, not expected to be accessed concurrently
* to avoid locking overhead.
*/
struct sk_buff_head pending_q;
};
#define HEHANDLE_CAP_PHYINFO_SIZE 3
#define HECAP_PHYINFO_SIZE 9
#define HECAP_MACINFO_SIZE 5
#define HECAP_TXRX_MCS_NSS_SIZE 2
#define HECAP_PPET16_PPET8_MAX_SIZE 25
#define HE_PPET16_PPET8_SIZE 8
/* 802.11ax PPE (PPDU packet Extension) threshold */
struct he_ppe_threshold {
u32 numss_m1;
u32 ru_mask;
u32 ppet16_ppet8_ru3_ru0[HE_PPET16_PPET8_SIZE];
};
struct ath11k_he {
u8 hecap_macinfo[HECAP_MACINFO_SIZE];
u32 hecap_rxmcsnssmap;
u32 hecap_txmcsnssmap;
u32 hecap_phyinfo[HEHANDLE_CAP_PHYINFO_SIZE];
struct he_ppe_threshold hecap_ppet;
u32 heop_param;
};
#define MAX_RADIOS 3
enum {
WMI_HOST_TP_SCALE_MAX = 0,
WMI_HOST_TP_SCALE_50 = 1,
WMI_HOST_TP_SCALE_25 = 2,
WMI_HOST_TP_SCALE_12 = 3,
WMI_HOST_TP_SCALE_MIN = 4,
WMI_HOST_TP_SCALE_SIZE = 5,
};
enum ath11k_scan_state {
ATH11K_SCAN_IDLE,
ATH11K_SCAN_STARTING,
ATH11K_SCAN_RUNNING,
ATH11K_SCAN_ABORTING,
};
enum ath11k_dev_flags {
ATH11K_CAC_RUNNING,
ATH11K_FLAG_CORE_REGISTERED,
ATH11K_FLAG_CRASH_FLUSH,
ATH11K_FLAG_RAW_MODE,
ATH11K_FLAG_HW_CRYPTO_DISABLED,
ATH11K_FLAG_BTCOEX,
ATH11K_FLAG_RECOVERY,
ATH11K_FLAG_UNREGISTERING,
ATH11K_FLAG_REGISTERED,
};
enum ath11k_monitor_flags {
ATH11K_FLAG_MONITOR_ENABLED,
};
struct ath11k_vif {
u32 vdev_id;
enum wmi_vdev_type vdev_type;
enum wmi_vdev_subtype vdev_subtype;
u32 beacon_interval;
u32 dtim_period;
u16 ast_hash;
u16 tcl_metadata;
u8 hal_addr_search_flags;
u8 search_type;
struct ath11k *ar;
struct ieee80211_vif *vif;
u16 tx_seq_no;
struct wmi_wmm_params_all_arg wmm_params;
struct list_head list;
union {
struct {
u32 uapsd;
} sta;
struct {
/* 127 stations; wmi limit */
u8 tim_bitmap[16];
u8 tim_len;
u32 ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
bool hidden_ssid;
/* P2P_IE with NoA attribute for P2P_GO case */
u32 noa_len;
u8 *noa_data;
} ap;
} u;
bool is_started;
bool is_up;
u32 aid;
u8 bssid[ETH_ALEN];
struct cfg80211_bitrate_mask bitrate_mask;
int num_legacy_stations;
int rtscts_prot_mode;
int txpower;
};
struct ath11k_vif_iter {
u32 vdev_id;
struct ath11k_vif *arvif;
};
struct ath11k_rx_peer_stats {
u64 num_msdu;
u64 num_mpdu_fcs_ok;
u64 num_mpdu_fcs_err;
u64 tcp_msdu_count;
u64 udp_msdu_count;
u64 other_msdu_count;
u64 ampdu_msdu_count;
u64 non_ampdu_msdu_count;
u64 stbc_count;
u64 beamformed_count;
u64 mcs_count[HAL_RX_MAX_MCS + 1];
u64 nss_count[HAL_RX_MAX_NSS];
u64 bw_count[HAL_RX_BW_MAX];
u64 gi_count[HAL_RX_GI_MAX];
u64 coding_count[HAL_RX_SU_MU_CODING_MAX];
u64 tid_count[IEEE80211_NUM_TIDS + 1];
u64 pream_cnt[HAL_RX_PREAMBLE_MAX];
u64 reception_type[HAL_RX_RECEPTION_TYPE_MAX];
u64 rx_duration;
};
#define ATH11K_HE_MCS_NUM 12
#define ATH11K_VHT_MCS_NUM 10
#define ATH11K_BW_NUM 4
#define ATH11K_NSS_NUM 4
#define ATH11K_LEGACY_NUM 12
#define ATH11K_GI_NUM 4
#define ATH11K_HT_MCS_NUM 32
enum ath11k_pkt_rx_err {
ATH11K_PKT_RX_ERR_FCS,
ATH11K_PKT_RX_ERR_TKIP,
ATH11K_PKT_RX_ERR_CRYPT,
ATH11K_PKT_RX_ERR_PEER_IDX_INVAL,
ATH11K_PKT_RX_ERR_MAX,
};
enum ath11k_ampdu_subfrm_num {
ATH11K_AMPDU_SUBFRM_NUM_10,
ATH11K_AMPDU_SUBFRM_NUM_20,
ATH11K_AMPDU_SUBFRM_NUM_30,
ATH11K_AMPDU_SUBFRM_NUM_40,
ATH11K_AMPDU_SUBFRM_NUM_50,
ATH11K_AMPDU_SUBFRM_NUM_60,
ATH11K_AMPDU_SUBFRM_NUM_MORE,
ATH11K_AMPDU_SUBFRM_NUM_MAX,
};
enum ath11k_amsdu_subfrm_num {
ATH11K_AMSDU_SUBFRM_NUM_1,
ATH11K_AMSDU_SUBFRM_NUM_2,
ATH11K_AMSDU_SUBFRM_NUM_3,
ATH11K_AMSDU_SUBFRM_NUM_4,
ATH11K_AMSDU_SUBFRM_NUM_MORE,
ATH11K_AMSDU_SUBFRM_NUM_MAX,
};
enum ath11k_counter_type {
ATH11K_COUNTER_TYPE_BYTES,
ATH11K_COUNTER_TYPE_PKTS,
ATH11K_COUNTER_TYPE_MAX,
};
enum ath11k_stats_type {
ATH11K_STATS_TYPE_SUCC,
ATH11K_STATS_TYPE_FAIL,
ATH11K_STATS_TYPE_RETRY,
ATH11K_STATS_TYPE_AMPDU,
ATH11K_STATS_TYPE_MAX,
};
struct ath11k_htt_data_stats {
u64 legacy[ATH11K_COUNTER_TYPE_MAX][ATH11K_LEGACY_NUM];
u64 ht[ATH11K_COUNTER_TYPE_MAX][ATH11K_HT_MCS_NUM];
u64 vht[ATH11K_COUNTER_TYPE_MAX][ATH11K_VHT_MCS_NUM];
u64 he[ATH11K_COUNTER_TYPE_MAX][ATH11K_HE_MCS_NUM];
u64 bw[ATH11K_COUNTER_TYPE_MAX][ATH11K_BW_NUM];
u64 nss[ATH11K_COUNTER_TYPE_MAX][ATH11K_NSS_NUM];
u64 gi[ATH11K_COUNTER_TYPE_MAX][ATH11K_GI_NUM];
};
struct ath11k_htt_tx_stats {
struct ath11k_htt_data_stats stats[ATH11K_STATS_TYPE_MAX];
u64 tx_duration;
u64 ba_fails;
u64 ack_fails;
};
struct ath11k_per_ppdu_tx_stats {
u16 succ_pkts;
u16 failed_pkts;
u16 retry_pkts;
u32 succ_bytes;
u32 failed_bytes;
u32 retry_bytes;
};
struct ath11k_sta {
struct ath11k_vif *arvif;
/* the following are protected by ar->data_lock */
u32 changed; /* IEEE80211_RC_* */
u32 bw;
u32 nss;
u32 smps;
struct work_struct update_wk;
struct ieee80211_tx_info tx_info;
struct rate_info txrate;
struct rate_info last_txrate;
u64 rx_duration;
u64 tx_duration;
u8 rssi_comb;
struct ath11k_htt_tx_stats *tx_stats;
struct ath11k_rx_peer_stats *rx_stats;
};
#define ATH11K_NUM_CHANS 41
#define ATH11K_MAX_5G_CHAN 173
enum ath11k_state {
ATH11K_STATE_OFF,
ATH11K_STATE_ON,
ATH11K_STATE_RESTARTING,
ATH11K_STATE_RESTARTED,
ATH11K_STATE_WEDGED,
/* Add other states as required */
};
/* Antenna noise floor */
#define ATH11K_DEFAULT_NOISE_FLOOR -95
struct ath11k_fw_stats {
struct dentry *debugfs_fwstats;
u32 pdev_id;
u32 stats_id;
struct list_head pdevs;
struct list_head vdevs;
struct list_head bcn;
};
struct ath11k_dbg_htt_stats {
u8 type;
u8 reset;
struct debug_htt_stats_req *stats_req;
/* protects shared stats req buffer */
spinlock_t lock;
};
struct ath11k_debug {
struct dentry *debugfs_pdev;
struct ath11k_dbg_htt_stats htt_stats;
u32 extd_tx_stats;
struct ath11k_fw_stats fw_stats;
struct completion fw_stats_complete;
bool fw_stats_done;
u32 extd_rx_stats;
u32 pktlog_filter;
u32 pktlog_mode;
u32 pktlog_peer_valid;
u8 pktlog_peer_addr[ETH_ALEN];
};
struct ath11k_per_peer_tx_stats {
u32 succ_bytes;
u32 retry_bytes;
u32 failed_bytes;
u16 succ_pkts;
u16 retry_pkts;
u16 failed_pkts;
u32 duration;
u8 ba_fails;
bool is_ampdu;
};
#define ATH11K_FLUSH_TIMEOUT (5 * HZ)
struct ath11k_vdev_stop_status {
bool stop_in_progress;
u32 vdev_id;
};
struct ath11k {
struct ath11k_base *ab;
struct ath11k_pdev *pdev;
struct ieee80211_hw *hw;
struct ieee80211_ops *ops;
struct ath11k_pdev_wmi *wmi;
struct ath11k_pdev_dp dp;
u8 mac_addr[ETH_ALEN];
u32 ht_cap_info;
u32 vht_cap_info;
struct ath11k_he ar_he;
enum ath11k_state state;
struct {
struct completion started;
struct completion completed;
struct completion on_channel;
struct delayed_work timeout;
enum ath11k_scan_state state;
bool is_roc;
int vdev_id;
int roc_freq;
bool roc_notify;
} scan;
struct {
struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
struct ieee80211_sband_iftype_data
iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
} mac;
unsigned long dev_flags;
unsigned int filter_flags;
unsigned long monitor_flags;
u32 min_tx_power;
u32 max_tx_power;
u32 txpower_limit_2g;
u32 txpower_limit_5g;
u32 txpower_scale;
u32 power_scale;
u32 chan_tx_pwr;
u32 num_stations;
u32 max_num_stations;
bool monitor_present;
/* To synchronize concurrent synchronous mac80211 callback operations,
* concurrent debugfs configuration and concurrent FW statistics events.
*/
struct mutex conf_mutex;
/* protects the radio specific data like debug stats, ppdu_stats_info stats,
* vdev_stop_status info, scan data, ath11k_sta info, ath11k_vif info,
* channel context data, survey info, test mode data.
*/
spinlock_t data_lock;
struct list_head arvifs;
/* should never be NULL; needed for regular htt rx */
struct ieee80211_channel *rx_channel;
/* valid during scan; needed for mgmt rx during scan */
struct ieee80211_channel *scan_channel;
u8 cfg_tx_chainmask;
u8 cfg_rx_chainmask;
u8 num_rx_chains;
u8 num_tx_chains;
/* pdev_idx starts from 0 whereas pdev->pdev_id starts with 1 */
u8 pdev_idx;
u8 lmac_id;
struct completion peer_assoc_done;
int install_key_status;
struct completion install_key_done;
int last_wmi_vdev_start_status;
struct ath11k_vdev_stop_status vdev_stop_status;
struct completion vdev_setup_done;
int num_peers;
int max_num_peers;
u32 num_started_vdevs;
u32 num_created_vdevs;
struct idr txmgmt_idr;
/* protects txmgmt_idr data */
spinlock_t txmgmt_idr_lock;
atomic_t num_pending_mgmt_tx;
/* cycle count is reported twice for each visited channel during scan.
* access protected by data_lock
*/
u32 survey_last_rx_clear_count;
u32 survey_last_cycle_count;
/* Channel info events are expected to come in pairs without and with
* COMPLETE flag set respectively for each channel visit during scan.
*
* However there are deviations from this rule. This flag is used to
* avoid reporting garbage data.
*/
bool ch_info_can_report_survey;
struct survey_info survey[ATH11K_NUM_CHANS];
struct completion bss_survey_done;
struct work_struct regd_update_work;
struct work_struct wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
struct ath11k_per_peer_tx_stats peer_tx_stats;
struct list_head ppdu_stats_info;
u32 ppdu_stat_list_depth;
struct ath11k_per_peer_tx_stats cached_stats;
u32 last_ppdu_id;
u32 cached_ppdu_id;
#ifdef CONFIG_ATH11K_DEBUGFS
struct ath11k_debug debug;
#endif
bool dfs_block_radar_events;
};
struct ath11k_band_cap {
u32 max_bw_supported;
u32 ht_cap_info;
u32 he_cap_info[2];
u32 he_mcs;
u32 he_cap_phy_info[PSOC_HOST_MAX_PHY_SIZE];
struct ath11k_ppe_threshold he_ppet;
};
struct ath11k_pdev_cap {
u32 supported_bands;
u32 ampdu_density;
u32 vht_cap;
u32 vht_mcs;
u32 he_mcs;
u32 tx_chain_mask;
u32 rx_chain_mask;
u32 tx_chain_mask_shift;
u32 rx_chain_mask_shift;
struct ath11k_band_cap band[NUM_NL80211_BANDS];
};
struct ath11k_pdev {
struct ath11k *ar;
u32 pdev_id;
struct ath11k_pdev_cap cap;
u8 mac_addr[ETH_ALEN];
};
struct ath11k_board_data {
const struct firmware *fw;
const void *data;
size_t len;
};
/* IPQ8074 HW channel counters frequency value in hertz */
#define IPQ8074_CC_FREQ_HERTZ 320000
struct ath11k_soc_dp_rx_stats {
u32 err_ring_pkts;
u32 invalid_rbm;
u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
u32 hal_reo_error[DP_REO_DST_RING_MAX];
};
/* Master structure to hold the hw data which may be used in core module */
struct ath11k_base {
enum ath11k_hw_rev hw_rev;
struct platform_device *pdev;
struct device *dev;
struct ath11k_qmi qmi;
struct ath11k_wmi_base wmi_sc;
struct completion fw_ready;
struct rproc *tgt_rproc;
int num_radios;
/* HW channel counters frequency value in hertz common to all MACs */
u32 cc_freq_hz;
struct ath11k_htc htc;
struct ath11k_dp dp;
void __iomem *mem;
unsigned long mem_len;
const struct ath11k_hif_ops *hif_ops;
struct ath11k_ce ce;
struct timer_list rx_replenish_retry;
struct ath11k_hal hal;
/* To synchronize core_start/core_stop */
struct mutex core_lock;
/* Protects data like peers */
spinlock_t base_lock;
struct ath11k_pdev pdevs[MAX_RADIOS];
struct ath11k_pdev __rcu *pdevs_active[MAX_RADIOS];
struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS];
unsigned long long free_vdev_map;
struct list_head peers;
wait_queue_head_t peer_mapping_wq;
u8 mac_addr[ETH_ALEN];
bool wmi_ready;
u32 wlan_init_status;
int irq_num[ATH11K_IRQ_NUM_MAX];
struct ath11k_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX];
struct napi_struct *napi;
struct ath11k_targ_cap target_caps;
u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE];
bool pdevs_macaddr_valid;
int bd_api;
struct ath11k_hw_params hw_params;
const struct firmware *cal_file;
/* Below regd's are protected by ab->data_lock */
/* This is the regd set for every radio
* by the firmware during initializatin
*/
struct ieee80211_regdomain *default_regd[MAX_RADIOS];
/* This regd is set during dynamic country setting
* This may or may not be used during the runtime
*/
struct ieee80211_regdomain *new_regd[MAX_RADIOS];
/* Current DFS Regulatory */
enum ath11k_dfs_region dfs_region;
#ifdef CONFIG_ATH11K_DEBUGFS
struct dentry *debugfs_soc;
struct dentry *debugfs_ath11k;
#endif
struct ath11k_soc_dp_rx_stats soc_stats;
unsigned long dev_flags;
struct completion driver_recovery;
struct workqueue_struct *workqueue;
struct work_struct restart_work;
struct {
/* protected by data_lock */
u32 fw_crash_counter;
} stats;
};
struct ath11k_fw_stats_pdev {
struct list_head list;
/* PDEV stats */
s32 ch_noise_floor;
/* Cycles spent transmitting frames */
u32 tx_frame_count;
/* Cycles spent receiving frames */
u32 rx_frame_count;
/* Total channel busy time, evidently */
u32 rx_clear_count;
/* Total on-channel time */
u32 cycle_count;
u32 phy_err_count;
u32 chan_tx_power;
u32 ack_rx_bad;
u32 rts_bad;
u32 rts_good;
u32 fcs_bad;
u32 no_beacons;
u32 mib_int_count;
/* PDEV TX stats */
/* Num HTT cookies queued to dispatch list */
s32 comp_queued;
/* Num HTT cookies dispatched */
s32 comp_delivered;
/* Num MSDU queued to WAL */
s32 msdu_enqued;
/* Num MPDU queue to WAL */
s32 mpdu_enqued;
/* Num MSDUs dropped by WMM limit */
s32 wmm_drop;
/* Num Local frames queued */
s32 local_enqued;
/* Num Local frames done */
s32 local_freed;
/* Num queued to HW */
s32 hw_queued;
/* Num PPDU reaped from HW */
s32 hw_reaped;
/* Num underruns */
s32 underrun;
/* Num PPDUs cleaned up in TX abort */
s32 tx_abort;
/* Num MPDUs requed by SW */
s32 mpdus_requed;
/* excessive retries */
u32 tx_ko;
/* data hw rate code */
u32 data_rc;
/* Scheduler self triggers */
u32 self_triggers;
/* frames dropped due to excessive sw retries */
u32 sw_retry_failure;
/* illegal rate phy errors */
u32 illgl_rate_phy_err;
/* wal pdev continuous xretry */
u32 pdev_cont_xretry;
/* wal pdev tx timeouts */
u32 pdev_tx_timeout;
/* wal pdev resets */
u32 pdev_resets;
/* frames dropped due to non-availability of stateless TIDs */
u32 stateless_tid_alloc_failure;
/* PhY/BB underrun */
u32 phy_underrun;
/* MPDU is more than txop limit */
u32 txop_ovf;
/* PDEV RX stats */
/* Cnts any change in ring routing mid-ppdu */
s32 mid_ppdu_route_change;
/* Total number of statuses processed */
s32 status_rcvd;
/* Extra frags on rings 0-3 */
s32 r0_frags;
s32 r1_frags;
s32 r2_frags;
s32 r3_frags;
/* MSDUs / MPDUs delivered to HTT */
s32 htt_msdus;
s32 htt_mpdus;
/* MSDUs / MPDUs delivered to local stack */
s32 loc_msdus;
s32 loc_mpdus;
/* AMSDUs that have more MSDUs than the status ring size */
s32 oversize_amsdu;
/* Number of PHY errors */
s32 phy_errs;
/* Number of PHY errors drops */
s32 phy_err_drop;
/* Number of mpdu errors - FCS, MIC, ENC etc. */
s32 mpdu_errs;
};
struct ath11k_fw_stats_vdev {
struct list_head list;
u32 vdev_id;
u32 beacon_snr;
u32 data_snr;
u32 num_tx_frames[WLAN_MAX_AC];
u32 num_rx_frames;
u32 num_tx_frames_retries[WLAN_MAX_AC];
u32 num_tx_frames_failures[WLAN_MAX_AC];
u32 num_rts_fail;
u32 num_rts_success;
u32 num_rx_err;
u32 num_rx_discard;
u32 num_tx_not_acked;
u32 tx_rate_history[MAX_TX_RATE_VALUES];
u32 beacon_rssi_history[MAX_TX_RATE_VALUES];
};
struct ath11k_fw_stats_bcn {
struct list_head list;
u32 vdev_id;
u32 tx_bcn_succ_cnt;
u32 tx_bcn_outage_cnt;
};
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
u8 *mac_addr, u16 ast_hash);
struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
const u8 *addr);
struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
const u8 *addr);
struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, int peer_id);
int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab);
int ath11k_core_init(struct ath11k_base *ath11k);
void ath11k_core_deinit(struct ath11k_base *ath11k);
struct ath11k_base *ath11k_core_alloc(struct device *dev);
void ath11k_core_free(struct ath11k_base *ath11k);
int ath11k_core_fetch_bdf(struct ath11k_base *ath11k,
struct ath11k_board_data *bd);
void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
void ath11k_core_halt(struct ath11k *ar);
u8 ath11k_core_get_hw_mac_id(struct ath11k_base *ab, int pdev_idx);
static inline const char *ath11k_scan_state_str(enum ath11k_scan_state state)
{
switch (state) {
case ATH11K_SCAN_IDLE:
return "idle";
case ATH11K_SCAN_STARTING:
return "starting";
case ATH11K_SCAN_RUNNING:
return "running";
case ATH11K_SCAN_ABORTING:
return "aborting";
}
return "unknown";
}
static inline struct ath11k_skb_cb *ATH11K_SKB_CB(struct sk_buff *skb)
{
return (struct ath11k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
}
static inline struct ath11k_skb_rxcb *ATH11K_SKB_RXCB(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct ath11k_skb_rxcb) > sizeof(skb->cb));
return (struct ath11k_skb_rxcb *)skb->cb;
}
static inline struct ath11k_vif *ath11k_vif_to_arvif(struct ieee80211_vif *vif)
{
return (struct ath11k_vif *)vif->drv_priv;
}
#endif /* _CORE_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,281 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#ifndef _ATH11K_DEBUG_H_
#define _ATH11K_DEBUG_H_
#include "hal_tx.h"
#include "trace.h"
#define ATH11K_TX_POWER_MAX_VAL 70
#define ATH11K_TX_POWER_MIN_VAL 0
enum ath11k_debug_mask {
ATH11K_DBG_AHB = 0x00000001,
ATH11K_DBG_WMI = 0x00000002,
ATH11K_DBG_HTC = 0x00000004,
ATH11K_DBG_DP_HTT = 0x00000008,
ATH11K_DBG_MAC = 0x00000010,
ATH11K_DBG_BOOT = 0x00000020,
ATH11K_DBG_QMI = 0x00000040,
ATH11K_DBG_DATA = 0x00000080,
ATH11K_DBG_MGMT = 0x00000100,
ATH11K_DBG_REG = 0x00000200,
ATH11K_DBG_TESTMODE = 0x00000400,
ATH11k_DBG_HAL = 0x00000800,
ATH11K_DBG_ANY = 0xffffffff,
};
/* htt_dbg_ext_stats_type */
enum ath11k_dbg_htt_ext_stats_type {
ATH11K_DBG_HTT_EXT_STATS_RESET = 0,
ATH11K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
ATH11K_DBG_HTT_EXT_STATS_PDEV_RX = 2,
ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ = 3,
ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
ATH11K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
ATH11K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ = 7,
ATH11K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_RATE = 9,
ATH11K_DBG_HTT_EXT_STATS_PDEV_RX_RATE = 10,
ATH11K_DBG_HTT_EXT_STATS_PEER_INFO = 11,
ATH11K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12,
ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ = 13,
ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO = 14,
ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO = 15,
ATH11K_DBG_HTT_EXT_STATS_SFM_INFO = 16,
ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST = 18,
ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
ATH11K_DBG_HTT_EXT_STATS_TWT_SESSIONS = 20,
ATH11K_DBG_HTT_EXT_STATS_REO_RESOURCE_STATS = 21,
ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22,
/* keep this last */
ATH11K_DBG_HTT_NUM_EXT_STATS,
};
struct debug_htt_stats_req {
bool done;
u8 pdev_id;
u8 type;
u8 peer_addr[ETH_ALEN];
struct completion cmpln;
u32 buf_len;
u8 buf[0];
};
#define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512)
#define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024)
#define ATH11K_HTT_PKTLOG_MAX_SIZE 2048
enum ath11k_pktlog_filter {
ATH11K_PKTLOG_RX = 0x000000001,
ATH11K_PKTLOG_TX = 0x000000002,
ATH11K_PKTLOG_RCFIND = 0x000000004,
ATH11K_PKTLOG_RCUPDATE = 0x000000008,
ATH11K_PKTLOG_EVENT_SMART_ANT = 0x000000020,
ATH11K_PKTLOG_EVENT_SW = 0x000000040,
ATH11K_PKTLOG_ANY = 0x00000006f,
};
enum ath11k_pktlog_mode {
ATH11K_PKTLOG_MODE_LITE = 1,
ATH11K_PKTLOG_MODE_FULL = 2,
};
enum ath11k_pktlog_enum {
ATH11K_PKTLOG_TYPE_TX_CTRL = 1,
ATH11K_PKTLOG_TYPE_TX_STAT = 2,
ATH11K_PKTLOG_TYPE_TX_MSDU_ID = 3,
ATH11K_PKTLOG_TYPE_RX_STAT = 5,
ATH11K_PKTLOG_TYPE_RC_FIND = 6,
ATH11K_PKTLOG_TYPE_RC_UPDATE = 7,
ATH11K_PKTLOG_TYPE_TX_VIRT_ADDR = 8,
ATH11K_PKTLOG_TYPE_RX_CBF = 10,
ATH11K_PKTLOG_TYPE_RX_STATBUF = 22,
ATH11K_PKTLOG_TYPE_PPDU_STATS = 23,
ATH11K_PKTLOG_TYPE_LITE_RX = 24,
};
__printf(2, 3) void ath11k_info(struct ath11k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath11k_err(struct ath11k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...);
extern unsigned int ath11k_debug_mask;
#ifdef CONFIG_ATH11K_DEBUG
__printf(3, 4) void __ath11k_dbg(struct ath11k_base *ab,
enum ath11k_debug_mask mask,
const char *fmt, ...);
void ath11k_dbg_dump(struct ath11k_base *ab,
enum ath11k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len);
#else /* CONFIG_ATH11K_DEBUG */
static inline int __ath11k_dbg(struct ath11k_base *ab,
enum ath11k_debug_mask dbg_mask,
const char *fmt, ...)
{
return 0;
}
static inline void ath11k_dbg_dump(struct ath11k_base *ab,
enum ath11k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len)
{
}
#endif /* CONFIG_ATH11K_DEBUG */
#ifdef CONFIG_ATH11K_DEBUGFS
int ath11k_debug_soc_create(struct ath11k_base *ab);
void ath11k_debug_soc_destroy(struct ath11k_base *ab);
int ath11k_debug_pdev_create(struct ath11k_base *ab);
void ath11k_debug_pdev_destroy(struct ath11k_base *ab);
int ath11k_debug_register(struct ath11k *ar);
void ath11k_debug_unregister(struct ath11k *ar);
void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
struct sk_buff *skb);
void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb);
void ath11k_debug_fw_stats_init(struct ath11k *ar);
int ath11k_dbg_htt_stats_req(struct ath11k *ar);
static inline bool ath11k_debug_is_pktlog_lite_mode_enabled(struct ath11k *ar)
{
return (ar->debug.pktlog_mode == ATH11K_PKTLOG_MODE_LITE);
}
static inline bool ath11k_debug_is_pktlog_rx_stats_enabled(struct ath11k *ar)
{
return (!ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode);
}
static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
{
return (ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode &&
ether_addr_equal(addr, ar->debug.pktlog_peer_addr));
}
static inline int ath11k_debug_is_extd_tx_stats_enabled(struct ath11k *ar)
{
return ar->debug.extd_tx_stats;
}
static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar)
{
return ar->debug.extd_rx_stats;
}
#else
static inline int ath11k_debug_soc_create(struct ath11k_base *ab)
{
return 0;
}
static inline void ath11k_debug_soc_destroy(struct ath11k_base *ab)
{
}
static inline int ath11k_debug_pdev_create(struct ath11k_base *ab)
{
return 0;
}
static inline void ath11k_debug_pdev_destroy(struct ath11k_base *ab)
{
}
static inline int ath11k_debug_register(struct ath11k *ar)
{
return 0;
}
static inline void ath11k_debug_unregister(struct ath11k *ar)
{
}
static inline void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
struct sk_buff *skb)
{
}
static inline void ath11k_debug_fw_stats_process(struct ath11k_base *ab,
struct sk_buff *skb)
{
}
static inline void ath11k_debug_fw_stats_init(struct ath11k *ar)
{
}
static inline int ath11k_debug_is_extd_tx_stats_enabled(struct ath11k *ar)
{
return 0;
}
static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar)
{
return 0;
}
static inline int ath11k_dbg_htt_stats_req(struct ath11k *ar)
{
return 0;
}
static inline bool ath11k_debug_is_pktlog_lite_mode_enabled(struct ath11k *ar)
{
return false;
}
static inline bool ath11k_debug_is_pktlog_rx_stats_enabled(struct ath11k *ar)
{
return false;
}
static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
{
return false;
}
#endif /* CONFIG_ATH11K_DEBUGFS */
#ifdef CONFIG_MAC80211_DEBUGFS
void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
void
ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
struct ath11k_per_peer_tx_stats *peer_stats,
u8 legacy_rate_idx);
void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts);
#else /* !CONFIG_MAC80211_DEBUGFS */
static inline void
ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
struct ath11k_per_peer_tx_stats *peer_stats,
u8 legacy_rate_idx)
{
}
static inline void
ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
{
}
#endif /* CONFIG_MAC80211_DEBUGFS*/
#define ath11k_dbg(ar, dbg_mask, fmt, ...) \
do { \
if (ath11k_debug_mask & dbg_mask) \
__ath11k_dbg(ar, dbg_mask, fmt, ##__VA_ARGS__); \
} while (0)
#endif /* _ATH11K_DEBUG_H_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,538 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include <linux/vmalloc.h>
#include "core.h"
#include "peer.h"
#include "debug.h"
void
ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
struct ath11k_per_peer_tx_stats *peer_stats,
u8 legacy_rate_idx)
{
struct rate_info *txrate = &arsta->txrate;
struct ath11k_htt_tx_stats *tx_stats;
int gi, mcs, bw, nss;
if (!arsta->tx_stats)
return;
tx_stats = arsta->tx_stats;
gi = FIELD_GET(RATE_INFO_FLAGS_SHORT_GI, arsta->txrate.flags);
mcs = txrate->mcs;
bw = txrate->bw;
nss = txrate->nss - 1;
#define STATS_OP_FMT(name) tx_stats->stats[ATH11K_STATS_TYPE_##name]
if (txrate->flags & RATE_INFO_FLAGS_HE_MCS) {
STATS_OP_FMT(SUCC).he[0][mcs] += peer_stats->succ_bytes;
STATS_OP_FMT(SUCC).he[1][mcs] += peer_stats->succ_pkts;
STATS_OP_FMT(FAIL).he[0][mcs] += peer_stats->failed_bytes;
STATS_OP_FMT(FAIL).he[1][mcs] += peer_stats->failed_pkts;
STATS_OP_FMT(RETRY).he[0][mcs] += peer_stats->retry_bytes;
STATS_OP_FMT(RETRY).he[1][mcs] += peer_stats->retry_pkts;
} else if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
STATS_OP_FMT(SUCC).vht[0][mcs] += peer_stats->succ_bytes;
STATS_OP_FMT(SUCC).vht[1][mcs] += peer_stats->succ_pkts;
STATS_OP_FMT(FAIL).vht[0][mcs] += peer_stats->failed_bytes;
STATS_OP_FMT(FAIL).vht[1][mcs] += peer_stats->failed_pkts;
STATS_OP_FMT(RETRY).vht[0][mcs] += peer_stats->retry_bytes;
STATS_OP_FMT(RETRY).vht[1][mcs] += peer_stats->retry_pkts;
} else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
STATS_OP_FMT(SUCC).ht[0][mcs] += peer_stats->succ_bytes;
STATS_OP_FMT(SUCC).ht[1][mcs] += peer_stats->succ_pkts;
STATS_OP_FMT(FAIL).ht[0][mcs] += peer_stats->failed_bytes;
STATS_OP_FMT(FAIL).ht[1][mcs] += peer_stats->failed_pkts;
STATS_OP_FMT(RETRY).ht[0][mcs] += peer_stats->retry_bytes;
STATS_OP_FMT(RETRY).ht[1][mcs] += peer_stats->retry_pkts;
} else {
mcs = legacy_rate_idx;
STATS_OP_FMT(SUCC).legacy[0][mcs] += peer_stats->succ_bytes;
STATS_OP_FMT(SUCC).legacy[1][mcs] += peer_stats->succ_pkts;
STATS_OP_FMT(FAIL).legacy[0][mcs] += peer_stats->failed_bytes;
STATS_OP_FMT(FAIL).legacy[1][mcs] += peer_stats->failed_pkts;
STATS_OP_FMT(RETRY).legacy[0][mcs] += peer_stats->retry_bytes;
STATS_OP_FMT(RETRY).legacy[1][mcs] += peer_stats->retry_pkts;
}
if (peer_stats->is_ampdu) {
tx_stats->ba_fails += peer_stats->ba_fails;
if (txrate->flags & RATE_INFO_FLAGS_HE_MCS) {
STATS_OP_FMT(AMPDU).he[0][mcs] +=
peer_stats->succ_bytes + peer_stats->retry_bytes;
STATS_OP_FMT(AMPDU).he[1][mcs] +=
peer_stats->succ_pkts + peer_stats->retry_pkts;
} else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
STATS_OP_FMT(AMPDU).ht[0][mcs] +=
peer_stats->succ_bytes + peer_stats->retry_bytes;
STATS_OP_FMT(AMPDU).ht[1][mcs] +=
peer_stats->succ_pkts + peer_stats->retry_pkts;
} else {
STATS_OP_FMT(AMPDU).vht[0][mcs] +=
peer_stats->succ_bytes + peer_stats->retry_bytes;
STATS_OP_FMT(AMPDU).vht[1][mcs] +=
peer_stats->succ_pkts + peer_stats->retry_pkts;
}
STATS_OP_FMT(AMPDU).bw[0][bw] +=
peer_stats->succ_bytes + peer_stats->retry_bytes;
STATS_OP_FMT(AMPDU).nss[0][nss] +=
peer_stats->succ_bytes + peer_stats->retry_bytes;
STATS_OP_FMT(AMPDU).gi[0][gi] +=
peer_stats->succ_bytes + peer_stats->retry_bytes;
STATS_OP_FMT(AMPDU).bw[1][bw] +=
peer_stats->succ_pkts + peer_stats->retry_pkts;
STATS_OP_FMT(AMPDU).nss[1][nss] +=
peer_stats->succ_pkts + peer_stats->retry_pkts;
STATS_OP_FMT(AMPDU).gi[1][gi] +=
peer_stats->succ_pkts + peer_stats->retry_pkts;
} else {
tx_stats->ack_fails += peer_stats->ba_fails;
}
STATS_OP_FMT(SUCC).bw[0][bw] += peer_stats->succ_bytes;
STATS_OP_FMT(SUCC).nss[0][nss] += peer_stats->succ_bytes;
STATS_OP_FMT(SUCC).gi[0][gi] += peer_stats->succ_bytes;
STATS_OP_FMT(SUCC).bw[1][bw] += peer_stats->succ_pkts;
STATS_OP_FMT(SUCC).nss[1][nss] += peer_stats->succ_pkts;
STATS_OP_FMT(SUCC).gi[1][gi] += peer_stats->succ_pkts;
STATS_OP_FMT(FAIL).bw[0][bw] += peer_stats->failed_bytes;
STATS_OP_FMT(FAIL).nss[0][nss] += peer_stats->failed_bytes;
STATS_OP_FMT(FAIL).gi[0][gi] += peer_stats->failed_bytes;
STATS_OP_FMT(FAIL).bw[1][bw] += peer_stats->failed_pkts;
STATS_OP_FMT(FAIL).nss[1][nss] += peer_stats->failed_pkts;
STATS_OP_FMT(FAIL).gi[1][gi] += peer_stats->failed_pkts;
STATS_OP_FMT(RETRY).bw[0][bw] += peer_stats->retry_bytes;
STATS_OP_FMT(RETRY).nss[0][nss] += peer_stats->retry_bytes;
STATS_OP_FMT(RETRY).gi[0][gi] += peer_stats->retry_bytes;
STATS_OP_FMT(RETRY).bw[1][bw] += peer_stats->retry_pkts;
STATS_OP_FMT(RETRY).nss[1][nss] += peer_stats->retry_pkts;
STATS_OP_FMT(RETRY).gi[1][gi] += peer_stats->retry_pkts;
tx_stats->tx_duration += peer_stats->duration;
}
void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
struct ath11k_peer *peer;
struct ath11k_sta *arsta;
struct ieee80211_sta *sta;
u16 rate;
u8 rate_idx;
int ret;
rcu_read_lock();
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_id(ab, ts->peer_id);
if (!peer || !peer->sta) {
ath11k_warn(ab, "failed to find the peer\n");
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
return;
}
sta = peer->sta;
arsta = (struct ath11k_sta *)sta->drv_priv;
memset(&arsta->txrate, 0, sizeof(arsta->txrate));
if (ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11A ||
ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11B) {
ret = ath11k_mac_hw_ratecode_to_legacy_rate(ts->mcs,
ts->pkt_type,
&rate_idx,
&rate);
if (ret < 0) {
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
return;
}
arsta->txrate.legacy = rate;
} else if (ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11N) {
if (ts->mcs > 7) {
ath11k_warn(ab, "Invalid HT mcs index %d\n", ts->mcs);
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
return;
}
arsta->txrate.mcs = ts->mcs + 8 * (arsta->last_txrate.nss - 1);
arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
if (ts->sgi)
arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
} else if (ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AC) {
if (ts->mcs > 9) {
ath11k_warn(ab, "Invalid VHT mcs index %d\n", ts->mcs);
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
return;
}
arsta->txrate.mcs = ts->mcs;
arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
if (ts->sgi)
arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
} else {
/*TODO: update HE rates */
}
arsta->txrate.nss = arsta->last_txrate.nss;
arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(ts->bw);
ath11k_accumulate_per_peer_tx_stats(arsta, peer_stats, rate_idx);
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
}
static ssize_t ath11k_dbg_sta_dump_tx_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
struct ath11k_htt_data_stats *stats;
static const char *str_name[ATH11K_STATS_TYPE_MAX] = {"succ", "fail",
"retry", "ampdu"};
static const char *str[ATH11K_COUNTER_TYPE_MAX] = {"bytes", "packets"};
int len = 0, i, j, k, retval = 0;
const int size = 2 * 4096;
char *buf;
buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
for (k = 0; k < ATH11K_STATS_TYPE_MAX; k++) {
for (j = 0; j < ATH11K_COUNTER_TYPE_MAX; j++) {
stats = &arsta->tx_stats->stats[k];
len += scnprintf(buf + len, size - len, "%s_%s\n",
str_name[k],
str[j]);
len += scnprintf(buf + len, size - len,
" HE MCS %s\n",
str[j]);
for (i = 0; i < ATH11K_HE_MCS_NUM; i++)
len += scnprintf(buf + len, size - len,
" %llu ",
stats->he[j][i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len,
" VHT MCS %s\n",
str[j]);
for (i = 0; i < ATH11K_VHT_MCS_NUM; i++)
len += scnprintf(buf + len, size - len,
" %llu ",
stats->vht[j][i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len, " HT MCS %s\n",
str[j]);
for (i = 0; i < ATH11K_HT_MCS_NUM; i++)
len += scnprintf(buf + len, size - len,
" %llu ", stats->ht[j][i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len,
" BW %s (20,40,80,160 MHz)\n", str[j]);
len += scnprintf(buf + len, size - len,
" %llu %llu %llu %llu\n",
stats->bw[j][0], stats->bw[j][1],
stats->bw[j][2], stats->bw[j][3]);
len += scnprintf(buf + len, size - len,
" NSS %s (1x1,2x2,3x3,4x4)\n", str[j]);
len += scnprintf(buf + len, size - len,
" %llu %llu %llu %llu\n",
stats->nss[j][0], stats->nss[j][1],
stats->nss[j][2], stats->nss[j][3]);
len += scnprintf(buf + len, size - len,
" GI %s (0.4us,0.8us,1.6us,3.2us)\n",
str[j]);
len += scnprintf(buf + len, size - len,
" %llu %llu %llu %llu\n",
stats->gi[j][0], stats->gi[j][1],
stats->gi[j][2], stats->gi[j][3]);
len += scnprintf(buf + len, size - len,
" legacy rate %s (1,2 ... Mbps)\n ",
str[j]);
for (i = 0; i < ATH11K_LEGACY_NUM; i++)
len += scnprintf(buf + len, size - len, "%llu ",
stats->legacy[j][i]);
len += scnprintf(buf + len, size - len, "\n");
}
}
len += scnprintf(buf + len, size - len,
"\nTX duration\n %llu usecs\n",
arsta->tx_stats->tx_duration);
len += scnprintf(buf + len, size - len,
"BA fails\n %llu\n", arsta->tx_stats->ba_fails);
len += scnprintf(buf + len, size - len,
"ack fails\n %llu\n", arsta->tx_stats->ack_fails);
spin_unlock_bh(&ar->data_lock);
if (len > size)
len = size;
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
mutex_unlock(&ar->conf_mutex);
return retval;
}
static const struct file_operations fops_tx_stats = {
.read = ath11k_dbg_sta_dump_tx_stats,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_dbg_sta_dump_rx_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
int len = 0, i, retval = 0;
const int size = 4096;
char *buf;
if (!rx_stats)
return -ENOENT;
buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->ab->base_lock);
len += scnprintf(buf + len, size - len, "RX peer stats:\n");
len += scnprintf(buf + len, size - len, "Num of MSDUs: %llu\n",
rx_stats->num_msdu);
len += scnprintf(buf + len, size - len, "Num of MSDUs with TCP L4: %llu\n",
rx_stats->tcp_msdu_count);
len += scnprintf(buf + len, size - len, "Num of MSDUs with UDP L4: %llu\n",
rx_stats->udp_msdu_count);
len += scnprintf(buf + len, size - len, "Num of MSDUs part of AMPDU: %llu\n",
rx_stats->ampdu_msdu_count);
len += scnprintf(buf + len, size - len, "Num of MSDUs not part of AMPDU: %llu\n",
rx_stats->non_ampdu_msdu_count);
len += scnprintf(buf + len, size - len, "Num of MSDUs using STBC: %llu\n",
rx_stats->stbc_count);
len += scnprintf(buf + len, size - len, "Num of MSDUs beamformed: %llu\n",
rx_stats->beamformed_count);
len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS ok: %llu\n",
rx_stats->num_mpdu_fcs_ok);
len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS error: %llu\n",
rx_stats->num_mpdu_fcs_err);
len += scnprintf(buf + len, size - len,
"GI: 0.8us %llu 0.4us %llu 1.6us %llu 3.2us %llu\n",
rx_stats->gi_count[0], rx_stats->gi_count[1],
rx_stats->gi_count[2], rx_stats->gi_count[3]);
len += scnprintf(buf + len, size - len,
"BW: 20Mhz %llu 40Mhz %llu 80Mhz %llu 160Mhz %llu\n",
rx_stats->bw_count[0], rx_stats->bw_count[1],
rx_stats->bw_count[2], rx_stats->bw_count[3]);
len += scnprintf(buf + len, size - len, "BCC %llu LDPC %llu\n",
rx_stats->coding_count[0], rx_stats->coding_count[1]);
len += scnprintf(buf + len, size - len,
"preamble: 11A %llu 11B %llu 11N %llu 11AC %llu 11AX %llu\n",
rx_stats->pream_cnt[0], rx_stats->pream_cnt[1],
rx_stats->pream_cnt[2], rx_stats->pream_cnt[3],
rx_stats->pream_cnt[4]);
len += scnprintf(buf + len, size - len,
"reception type: SU %llu MU_MIMO %llu MU_OFDMA %llu MU_OFDMA_MIMO %llu\n",
rx_stats->reception_type[0], rx_stats->reception_type[1],
rx_stats->reception_type[2], rx_stats->reception_type[3]);
len += scnprintf(buf + len, size - len, "TID(0-15) Legacy TID(16):");
for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
len += scnprintf(buf + len, size - len, "%llu ", rx_stats->tid_count[i]);
len += scnprintf(buf + len, size - len, "\nMCS(0-11) Legacy MCS(12):");
for (i = 0; i < HAL_RX_MAX_MCS + 1; i++)
len += scnprintf(buf + len, size - len, "%llu ", rx_stats->mcs_count[i]);
len += scnprintf(buf + len, size - len, "\nNSS(1-8):");
for (i = 0; i < HAL_RX_MAX_NSS; i++)
len += scnprintf(buf + len, size - len, "%llu ", rx_stats->nss_count[i]);
len += scnprintf(buf + len, size - len, "\nRX Duration:%llu ",
rx_stats->rx_duration);
len += scnprintf(buf + len, size - len, "\n");
spin_unlock_bh(&ar->ab->base_lock);
if (len > size)
len = size;
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
mutex_unlock(&ar->conf_mutex);
return retval;
}
static const struct file_operations fops_rx_stats = {
.read = ath11k_dbg_sta_dump_rx_stats,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static int
ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file)
{
struct ieee80211_sta *sta = inode->i_private;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
struct debug_htt_stats_req *stats_req;
int ret;
stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE);
if (!stats_req)
return -ENOMEM;
mutex_lock(&ar->conf_mutex);
ar->debug.htt_stats.stats_req = stats_req;
stats_req->type = ATH11K_DBG_HTT_EXT_STATS_PEER_INFO;
memcpy(stats_req->peer_addr, sta->addr, ETH_ALEN);
ret = ath11k_dbg_htt_stats_req(ar);
mutex_unlock(&ar->conf_mutex);
if (ret < 0)
goto out;
file->private_data = stats_req;
return 0;
out:
vfree(stats_req);
return ret;
}
static int
ath11k_dbg_sta_release_htt_peer_stats(struct inode *inode, struct file *file)
{
vfree(file->private_data);
return 0;
}
static ssize_t ath11k_dbg_sta_read_htt_peer_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct debug_htt_stats_req *stats_req = file->private_data;
char *buf;
u32 length = 0;
buf = stats_req->buf;
length = min_t(u32, stats_req->buf_len, ATH11K_HTT_STATS_BUF_SIZE);
return simple_read_from_buffer(user_buf, count, ppos, buf, length);
}
static const struct file_operations fops_htt_peer_stats = {
.open = ath11k_dbg_sta_open_htt_peer_stats,
.release = ath11k_dbg_sta_release_htt_peer_stats,
.read = ath11k_dbg_sta_read_htt_peer_stats,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_dbg_sta_write_peer_pktlog(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
int ret, enable;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON) {
ret = -ENETDOWN;
goto out;
}
ret = kstrtoint_from_user(buf, count, 0, &enable);
if (ret)
goto out;
ar->debug.pktlog_peer_valid = enable;
memcpy(ar->debug.pktlog_peer_addr, sta->addr, ETH_ALEN);
/* Send peer based pktlog enable/disable */
ret = ath11k_wmi_pdev_peer_pktlog_filter(ar, sta->addr, enable);
if (ret) {
ath11k_warn(ar->ab, "failed to set peer pktlog filter %pM: %d\n",
sta->addr, ret);
goto out;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "peer pktlog filter set to %d\n",
enable);
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static ssize_t ath11k_dbg_sta_read_peer_pktlog(struct file *file,
char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
char buf[32] = {0};
int len;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf), "%08x %pM\n",
ar->debug.pktlog_peer_valid,
ar->debug.pktlog_peer_addr);
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(ubuf, count, ppos, buf, len);
}
static const struct file_operations fops_peer_pktlog = {
.write = ath11k_dbg_sta_write_peer_pktlog,
.read = ath11k_dbg_sta_read_peer_pktlog,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
struct ath11k *ar = hw->priv;
if (ath11k_debug_is_extd_tx_stats_enabled(ar))
debugfs_create_file("tx_stats", 0400, dir, sta,
&fops_tx_stats);
if (ath11k_debug_is_extd_rx_stats_enabled(ar))
debugfs_create_file("rx_stats", 0400, dir, sta,
&fops_rx_stats);
debugfs_create_file("htt_peer_stats", 0400, dir, sta,
&fops_htt_peer_stats);
debugfs_create_file("peer_pktlog", 0644, dir, sta,
&fops_peer_pktlog);
}

View File

@ -0,0 +1,903 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include <linux/kfifo.h>
#include "core.h"
#include "dp_tx.h"
#include "hal_tx.h"
#include "debug.h"
#include "dp_rx.h"
#include "peer.h"
static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
struct sk_buff *skb)
{
dev_kfree_skb_any(skb);
}
void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_peer *peer;
/* TODO: Any other peer specific DP cleanup */
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, vdev_id, addr);
if (!peer) {
ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
addr, vdev_id);
spin_unlock_bh(&ab->base_lock);
return;
}
ath11k_peer_rx_tid_cleanup(ar, peer);
spin_unlock_bh(&ab->base_lock);
}
int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
{
struct ath11k_base *ab = ar->ab;
u32 reo_dest;
int ret;
/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
reo_dest = ar->dp.mac_id + 1;
ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
WMI_PEER_SET_DEFAULT_ROUTING,
DP_RX_HASH_ENABLE | (reo_dest << 1));
if (ret) {
ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
ret, addr, vdev_id);
return ret;
}
ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id,
HAL_DESC_REO_NON_QOS_TID, 1, 0);
if (ret) {
ath11k_warn(ab, "failed to setup rxd tid queue for non-qos tid %d\n",
ret);
return ret;
}
ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, 0, 1, 0);
if (ret) {
ath11k_warn(ab, "failed to setup rxd tid queue for tid 0 %d\n",
ret);
return ret;
}
/* TODO: Setup other peer specific resource used in data path */
return 0;
}
void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
{
if (!ring->vaddr_unaligned)
return;
dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
ring->paddr_unaligned);
ring->vaddr_unaligned = NULL;
}
int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
enum hal_ring_type type, int ring_num,
int mac_id, int num_entries)
{
struct hal_srng_params params = { 0 };
int entry_sz = ath11k_hal_srng_get_entrysize(type);
int max_entries = ath11k_hal_srng_get_max_entries(type);
int ret;
if (max_entries < 0 || entry_sz < 0)
return -EINVAL;
if (num_entries > max_entries)
num_entries = max_entries;
ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
&ring->paddr_unaligned,
GFP_KERNEL);
if (!ring->vaddr_unaligned)
return -ENOMEM;
ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
(unsigned long)ring->vaddr_unaligned);
params.ring_base_vaddr = ring->vaddr;
params.ring_base_paddr = ring->paddr;
params.num_entries = num_entries;
switch (type) {
case HAL_REO_DST:
params.intr_batch_cntr_thres_entries =
HAL_SRNG_INT_BATCH_THRESHOLD_RX;
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
break;
case HAL_RXDMA_BUF:
case HAL_RXDMA_MONITOR_BUF:
case HAL_RXDMA_MONITOR_STATUS:
params.low_threshold = num_entries >> 3;
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
params.intr_batch_cntr_thres_entries = 0;
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
break;
case HAL_WBM2SW_RELEASE:
if (ring_num < 3) {
params.intr_batch_cntr_thres_entries =
HAL_SRNG_INT_BATCH_THRESHOLD_TX;
params.intr_timer_thres_us =
HAL_SRNG_INT_TIMER_THRESHOLD_TX;
break;
}
/* follow through when ring_num >= 3 */
/* fall through */
case HAL_REO_EXCEPTION:
case HAL_REO_REINJECT:
case HAL_REO_CMD:
case HAL_REO_STATUS:
case HAL_TCL_DATA:
case HAL_TCL_CMD:
case HAL_TCL_STATUS:
case HAL_WBM_IDLE_LINK:
case HAL_SW2WBM_RELEASE:
case HAL_RXDMA_DST:
case HAL_RXDMA_MONITOR_DST:
case HAL_RXDMA_MONITOR_DESC:
case HAL_RXDMA_DIR_BUF:
params.intr_batch_cntr_thres_entries =
HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
break;
default:
ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
return -EINVAL;
}
ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
if (ret < 0) {
ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
ret, ring_num);
return ret;
}
ring->ring_id = ret;
return 0;
}
static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
int i;
ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
}
ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
}
static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
struct hal_srng *srng;
int i, ret;
ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
HAL_SW2WBM_RELEASE, 0, 0,
DP_WBM_RELEASE_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
ret);
goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
DP_TCL_CMD_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
0, 0, DP_TCL_STATUS_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
goto err;
}
for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
HAL_TCL_DATA, i, 0,
DP_TCL_DATA_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
i, ret);
goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
HAL_WBM2SW_RELEASE, i, 0,
DP_TX_COMP_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up tcl_comp ring ring (%d) :%d\n",
i, ret);
goto err;
}
srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
ath11k_hal_tx_init_data_ring(ab, srng);
}
ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
0, 0, DP_REO_REINJECT_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
ret);
goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
3, 0, DP_RX_RELEASE_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
0, 0, DP_REO_EXCEPTION_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
ret);
goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
0, 0, DP_REO_CMD_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
goto err;
}
srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
ath11k_hal_reo_init_cmd_ring(ab, srng);
ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
0, 0, DP_REO_STATUS_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
goto err;
}
ath11k_hal_reo_hw_setup(ab);
return 0;
err:
ath11k_dp_srng_common_cleanup(ab);
return ret;
}
static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
int i;
for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
if (!slist[i].vaddr)
continue;
dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
slist[i].vaddr, slist[i].paddr);
slist[i].vaddr = NULL;
}
}
static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
int size,
u32 n_link_desc_bank,
u32 n_link_desc,
u32 last_bank_sz)
{
struct ath11k_dp *dp = &ab->dp;
struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
u32 n_entries_per_buf;
int num_scatter_buf, scatter_idx;
struct hal_wbm_link_desc *scatter_buf;
int align_bytes, n_entries;
dma_addr_t paddr;
int rem_entries;
int i;
int ret = 0;
u32 end_offset;
n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
ath11k_hal_srng_get_entrysize(HAL_WBM_IDLE_LINK);
num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
return -EINVAL;
for (i = 0; i < num_scatter_buf; i++) {
slist[i].vaddr = dma_alloc_coherent(ab->dev,
HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
&slist[i].paddr, GFP_KERNEL);
if (!slist[i].vaddr) {
ret = -ENOMEM;
goto err;
}
}
scatter_idx = 0;
scatter_buf = slist[scatter_idx].vaddr;
rem_entries = n_entries_per_buf;
for (i = 0; i < n_link_desc_bank; i++) {
align_bytes = link_desc_banks[i].vaddr -
link_desc_banks[i].vaddr_unaligned;
n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
HAL_LINK_DESC_SIZE;
paddr = link_desc_banks[i].paddr;
while (n_entries) {
ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
if (rem_entries) {
rem_entries--;
scatter_buf++;
continue;
}
rem_entries = n_entries_per_buf;
scatter_idx++;
scatter_buf = slist[scatter_idx].vaddr;
}
}
end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
sizeof(struct hal_wbm_link_desc);
ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
n_link_desc, end_offset);
return 0;
err:
ath11k_dp_scatter_idle_link_desc_cleanup(ab);
return ret;
}
static void
ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
struct dp_link_desc_bank *link_desc_banks)
{
int i;
for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
if (link_desc_banks[i].vaddr_unaligned) {
dma_free_coherent(ab->dev,
link_desc_banks[i].size,
link_desc_banks[i].vaddr_unaligned,
link_desc_banks[i].paddr_unaligned);
link_desc_banks[i].vaddr_unaligned = NULL;
}
}
}
static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
struct dp_link_desc_bank *desc_bank,
int n_link_desc_bank,
int last_bank_sz)
{
struct ath11k_dp *dp = &ab->dp;
int i;
int ret = 0;
int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
for (i = 0; i < n_link_desc_bank; i++) {
if (i == (n_link_desc_bank - 1) && last_bank_sz)
desc_sz = last_bank_sz;
desc_bank[i].vaddr_unaligned =
dma_alloc_coherent(ab->dev, desc_sz,
&desc_bank[i].paddr_unaligned,
GFP_KERNEL);
if (!desc_bank[i].vaddr_unaligned) {
ret = -ENOMEM;
goto err;
}
desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
HAL_LINK_DESC_ALIGN);
desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
((unsigned long)desc_bank[i].vaddr -
(unsigned long)desc_bank[i].vaddr_unaligned);
desc_bank[i].size = desc_sz;
}
return 0;
err:
ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
return ret;
}
void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
struct dp_link_desc_bank *desc_bank,
u32 ring_type, struct dp_srng *ring)
{
ath11k_dp_link_desc_bank_free(ab, desc_bank);
if (ring_type != HAL_RXDMA_MONITOR_DESC) {
ath11k_dp_srng_cleanup(ab, ring);
ath11k_dp_scatter_idle_link_desc_cleanup(ab);
}
}
static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
{
struct ath11k_dp *dp = &ab->dp;
u32 n_mpdu_link_desc, n_mpdu_queue_desc;
u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
int ret = 0;
n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
HAL_NUM_MPDUS_PER_LINK_DESC;
n_mpdu_queue_desc = n_mpdu_link_desc /
HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
DP_AVG_MSDUS_PER_FLOW) /
HAL_NUM_TX_MSDUS_PER_LINK_DESC;
n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
DP_AVG_MSDUS_PER_MPDU) /
HAL_NUM_RX_MSDUS_PER_LINK_DESC;
*n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
n_tx_msdu_link_desc + n_rx_msdu_link_desc;
if (*n_link_desc & (*n_link_desc - 1))
*n_link_desc = 1 << fls(*n_link_desc);
ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
if (ret) {
ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
return ret;
}
return ret;
}
int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
struct dp_link_desc_bank *link_desc_banks,
u32 ring_type, struct hal_srng *srng,
u32 n_link_desc)
{
u32 tot_mem_sz;
u32 n_link_desc_bank, last_bank_sz;
u32 entry_sz, align_bytes, n_entries;
u32 paddr;
u32 *desc;
int i, ret;
tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
tot_mem_sz += HAL_LINK_DESC_ALIGN;
if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
n_link_desc_bank = 1;
last_bank_sz = tot_mem_sz;
} else {
n_link_desc_bank = tot_mem_sz /
(DP_LINK_DESC_ALLOC_SIZE_THRESH -
HAL_LINK_DESC_ALIGN);
last_bank_sz = tot_mem_sz %
(DP_LINK_DESC_ALLOC_SIZE_THRESH -
HAL_LINK_DESC_ALIGN);
if (last_bank_sz)
n_link_desc_bank += 1;
}
if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
return -EINVAL;
ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
n_link_desc_bank, last_bank_sz);
if (ret)
return ret;
/* Setup link desc idle list for HW internal usage */
entry_sz = ath11k_hal_srng_get_entrysize(ring_type);
tot_mem_sz = entry_sz * n_link_desc;
/* Setup scatter desc list when the total memory requirement is more */
if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
ring_type != HAL_RXDMA_MONITOR_DESC) {
ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
n_link_desc_bank,
n_link_desc,
last_bank_sz);
if (ret) {
ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
ret);
goto fail_desc_bank_free;
}
return 0;
}
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
for (i = 0; i < n_link_desc_bank; i++) {
align_bytes = link_desc_banks[i].vaddr -
link_desc_banks[i].vaddr_unaligned;
n_entries = (link_desc_banks[i].size - align_bytes) /
HAL_LINK_DESC_SIZE;
paddr = link_desc_banks[i].paddr;
while (n_entries &&
(desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
i, paddr);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
}
}
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return 0;
fail_desc_bank_free:
ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
return ret;
}
int ath11k_dp_service_srng(struct ath11k_base *ab,
struct ath11k_ext_irq_grp *irq_grp,
int budget)
{
struct napi_struct *napi = &irq_grp->napi;
int grp_id = irq_grp->grp_id;
int work_done = 0;
int i = 0;
int tot_work_done = 0;
while (ath11k_tx_ring_mask[grp_id] >> i) {
if (ath11k_tx_ring_mask[grp_id] & BIT(i))
ath11k_dp_tx_completion_handler(ab, i);
i++;
}
if (ath11k_rx_err_ring_mask[grp_id]) {
work_done = ath11k_dp_process_rx_err(ab, napi, budget);
budget -= work_done;
tot_work_done += work_done;
if (budget <= 0)
goto done;
}
if (ath11k_rx_wbm_rel_ring_mask[grp_id]) {
work_done = ath11k_dp_rx_process_wbm_err(ab,
napi,
budget);
budget -= work_done;
tot_work_done += work_done;
if (budget <= 0)
goto done;
}
if (ath11k_rx_ring_mask[grp_id]) {
for (i = 0; i < ab->num_radios; i++) {
if (ath11k_rx_ring_mask[grp_id] & BIT(i)) {
work_done = ath11k_dp_process_rx(ab, i, napi,
&irq_grp->pending_q,
budget);
budget -= work_done;
tot_work_done += work_done;
}
if (budget <= 0)
goto done;
}
}
if (rx_mon_status_ring_mask[grp_id]) {
for (i = 0; i < ab->num_radios; i++) {
if (rx_mon_status_ring_mask[grp_id] & BIT(i)) {
work_done =
ath11k_dp_rx_process_mon_rings(ab,
i, napi,
budget);
budget -= work_done;
tot_work_done += work_done;
}
if (budget <= 0)
goto done;
}
}
if (ath11k_reo_status_ring_mask[grp_id])
ath11k_dp_process_reo_status(ab);
for (i = 0; i < ab->num_radios; i++) {
if (ath11k_rxdma2host_ring_mask[grp_id] & BIT(i)) {
work_done = ath11k_dp_process_rxdma_err(ab, i, budget);
budget -= work_done;
tot_work_done += work_done;
}
if (budget <= 0)
goto done;
if (ath11k_host2rxdma_ring_mask[grp_id] & BIT(i)) {
struct ath11k_pdev_dp *dp = &ab->pdevs[i].ar->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, 0,
HAL_RX_BUF_RBM_SW3_BM,
GFP_ATOMIC);
}
}
/* TODO: Implement handler for other interrupts */
done:
return tot_work_done;
}
void ath11k_dp_pdev_free(struct ath11k_base *ab)
{
struct ath11k *ar;
int i;
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
ath11k_dp_rx_pdev_free(ab, i);
ath11k_debug_unregister(ar);
ath11k_dp_rx_pdev_mon_detach(ar);
}
}
void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
{
struct ath11k *ar;
struct ath11k_pdev_dp *dp;
int i;
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
dp = &ar->dp;
dp->mac_id = i;
idr_init(&dp->rx_refill_buf_ring.bufs_idr);
spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
atomic_set(&dp->num_tx_pending, 0);
init_waitqueue_head(&dp->tx_empty_waitq);
idr_init(&dp->rx_mon_status_refill_ring.bufs_idr);
spin_lock_init(&dp->rx_mon_status_refill_ring.idr_lock);
idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
}
}
int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
{
struct ath11k *ar;
int ret;
int i;
/* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
ret = ath11k_dp_rx_pdev_alloc(ab, i);
if (ret) {
ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
i);
goto err;
}
ret = ath11k_dp_rx_pdev_mon_attach(ar);
if (ret) {
ath11k_warn(ab, "failed to initialize mon pdev %d\n",
i);
goto err;
}
}
return 0;
err:
ath11k_dp_pdev_free(ab);
return ret;
}
int ath11k_dp_htt_connect(struct ath11k_dp *dp)
{
struct ath11k_htc_svc_conn_req conn_req;
struct ath11k_htc_svc_conn_resp conn_resp;
int status;
memset(&conn_req, 0, sizeof(conn_req));
memset(&conn_resp, 0, sizeof(conn_resp));
conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
/* connect to control service */
conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
&conn_resp);
if (status)
return status;
dp->eid = conn_resp.eid;
return 0;
}
static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
{
/* For STA mode, enable address search index,
* tcl uses ast_hash value in the descriptor.
*/
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_STA:
arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
break;
case WMI_VDEV_TYPE_AP:
case WMI_VDEV_TYPE_IBSS:
arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
break;
case WMI_VDEV_TYPE_MONITOR:
default:
return;
}
}
void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
{
arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
arvif->vdev_id) |
FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
ar->pdev->pdev_id);
/* set HTT extension valid bit to 0 by default */
arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
ath11k_dp_update_vdev_search(arvif);
}
static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
{
struct ath11k_base *ab = (struct ath11k_base *)ctx;
struct sk_buff *msdu = skb;
dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
DMA_TO_DEVICE);
dev_kfree_skb_any(msdu);
return 0;
}
void ath11k_dp_free(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
int i;
ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
ath11k_dp_srng_common_cleanup(ab);
ath11k_dp_reo_cmd_list_cleanup(ab);
for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
idr_for_each(&dp->tx_ring[i].txbuf_idr,
ath11k_dp_tx_pending_cleanup, ab);
idr_destroy(&dp->tx_ring[i].txbuf_idr);
spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
spin_lock_bh(&dp->tx_ring[i].tx_status_lock);
kfifo_free(&dp->tx_ring[i].tx_status_fifo);
spin_unlock_bh(&dp->tx_ring[i].tx_status_lock);
}
/* Deinit any SOC level resource */
}
int ath11k_dp_alloc(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
struct hal_srng *srng = NULL;
size_t size = 0;
u32 n_link_desc = 0;
int ret;
int i;
dp->ab = ab;
INIT_LIST_HEAD(&dp->reo_cmd_list);
INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
spin_lock_init(&dp->reo_cmd_lock);
ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
if (ret) {
ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
return ret;
}
srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
HAL_WBM_IDLE_LINK, srng, n_link_desc);
if (ret) {
ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
return ret;
}
ret = ath11k_dp_srng_common_setup(ab);
if (ret)
goto fail_link_desc_cleanup;
size = roundup_pow_of_two(DP_TX_COMP_RING_SIZE);
for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
idr_init(&dp->tx_ring[i].txbuf_idr);
spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
dp->tx_ring[i].tcl_data_ring_id = i;
spin_lock_init(&dp->tx_ring[i].tx_status_lock);
ret = kfifo_alloc(&dp->tx_ring[i].tx_status_fifo, size,
GFP_KERNEL);
if (ret)
goto fail_cmn_srng_cleanup;
}
for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
ath11k_hal_tx_set_dscp_tid_map(ab, i);
/* Init any SOC level resource for DP */
return 0;
fail_cmn_srng_cleanup:
ath11k_dp_srng_common_cleanup(ab);
fail_link_desc_cleanup:
ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
return ret;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,86 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#ifndef ATH11K_DP_RX_H
#define ATH11K_DP_RX_H
#include "core.h"
#include "rx_desc.h"
#include "debug.h"
#define DP_RX_MPDU_ERR_FCS BIT(0)
#define DP_RX_MPDU_ERR_DECRYPT BIT(1)
#define DP_RX_MPDU_ERR_TKIP_MIC BIT(2)
#define DP_RX_MPDU_ERR_AMSDU_ERR BIT(3)
#define DP_RX_MPDU_ERR_OVERFLOW BIT(4)
#define DP_RX_MPDU_ERR_MSDU_LEN BIT(5)
#define DP_RX_MPDU_ERR_MPDU_LEN BIT(6)
#define DP_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7)
enum dp_rx_decap_type {
DP_RX_DECAP_TYPE_RAW,
DP_RX_DECAP_TYPE_NATIVE_WIFI,
DP_RX_DECAP_TYPE_ETHERNET2_DIX,
DP_RX_DECAP_TYPE_8023,
};
struct ath11k_dp_amsdu_subframe_hdr {
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
__be16 len;
} __packed;
struct ath11k_dp_rfc1042_hdr {
u8 llc_dsap;
u8 llc_ssap;
u8 llc_ctrl;
u8 snap_oui[3];
__be16 snap_type;
} __packed;
int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
struct ieee80211_ampdu_params *params);
int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
struct ieee80211_ampdu_params *params);
void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer);
int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
u8 tid, u32 ba_win_sz, u16 ssn);
void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
struct sk_buff *skb);
int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab);
void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab);
int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int pdev_idx);
void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int pdev_idx);
void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab);
void ath11k_dp_process_reo_status(struct ath11k_base *ab);
int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget);
int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
struct napi_struct *napi, int budget);
int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
int budget);
int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, struct sk_buff_head *pending_q,
int budget);
int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
struct dp_rxdma_ring *rx_ring,
int req_entries,
enum hal_rx_buf_return_buf_manager mgr,
gfp_t gfp);
int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
const void *ptr, void *data),
void *data);
int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget);
int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget);
int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
struct dp_rxdma_ring *rx_ring,
int req_entries,
enum hal_rx_buf_return_buf_manager mgr,
gfp_t gfp);
int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar);
int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar);
#endif /* ATH11K_DP_RX_H */

View File

@ -0,0 +1,936 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include "core.h"
#include "dp_tx.h"
#include "debug.h"
#include "hw.h"
/* NOTE: Any of the mapped ring id value must not exceed DP_TCL_NUM_RING_MAX */
static const u8
ath11k_txq_tcl_ring_map[ATH11K_HW_MAX_QUEUES] = { 0x0, 0x1, 0x2, 0x2 };
static enum hal_tcl_encap_type
ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
{
/* TODO: Determine encap type based on vif_type and configuration */
return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
}
static void ath11k_dp_tx_encap_nwifi(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
u8 *qos_ctl;
if (!ieee80211_is_data_qos(hdr->frame_control))
return;
qos_ctl = ieee80211_get_qos_ctl(hdr);
memmove(skb->data + IEEE80211_QOS_CTL_LEN,
skb->data, (void *)qos_ctl - (void *)skb->data);
skb_pull(skb, IEEE80211_QOS_CTL_LEN);
hdr = (void *)skb->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
}
static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
if (!ieee80211_is_data_qos(hdr->frame_control))
return HAL_DESC_REO_NON_QOS_TID;
else
return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
}
static enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher)
{
switch (cipher) {
case WLAN_CIPHER_SUITE_WEP40:
return HAL_ENCRYPT_TYPE_WEP_40;
case WLAN_CIPHER_SUITE_WEP104:
return HAL_ENCRYPT_TYPE_WEP_104;
case WLAN_CIPHER_SUITE_TKIP:
return HAL_ENCRYPT_TYPE_TKIP_MIC;
case WLAN_CIPHER_SUITE_CCMP:
return HAL_ENCRYPT_TYPE_CCMP_128;
case WLAN_CIPHER_SUITE_CCMP_256:
return HAL_ENCRYPT_TYPE_CCMP_256;
case WLAN_CIPHER_SUITE_GCMP:
return HAL_ENCRYPT_TYPE_GCMP_128;
case WLAN_CIPHER_SUITE_GCMP_256:
return HAL_ENCRYPT_TYPE_AES_GCMP_256;
default:
return HAL_ENCRYPT_TYPE_OPEN;
}
}
int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
struct sk_buff *skb)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_dp *dp = &ab->dp;
struct hal_tx_info ti = {0};
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
struct hal_srng *tcl_ring;
struct ieee80211_hdr *hdr = (void *)skb->data;
struct dp_tx_ring *tx_ring;
u8 cached_desc[HAL_TCL_DESC_LEN];
void *hal_tcl_desc;
u8 pool_id;
u8 hal_ring_id;
int ret;
if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
if (!ieee80211_is_data(hdr->frame_control))
return -ENOTSUPP;
pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
ti.ring_id = ath11k_txq_tcl_ring_map[pool_id];
tx_ring = &dp->tx_ring[ti.ring_id];
spin_lock_bh(&tx_ring->tx_idr_lock);
ret = idr_alloc(&tx_ring->txbuf_idr, skb, 0,
DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
spin_unlock_bh(&tx_ring->tx_idr_lock);
if (ret < 0)
return -ENOSPC;
ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, ar->pdev_idx) |
FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) |
FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb);
ti.meta_data_flags = arvif->tcl_metadata;
if (info->control.hw_key)
ti.encrypt_type =
ath11k_dp_tx_get_encrypt_type(info->control.hw_key->cipher);
else
ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
ti.addr_search_flags = arvif->hal_addr_search_flags;
ti.search_type = arvif->search_type;
ti.type = HAL_TCL_DESC_TYPE_BUFFER;
ti.pkt_offset = 0;
ti.lmac_id = ar->lmac_id;
ti.bss_ast_hash = arvif->ast_hash;
ti.dscp_tid_tbl_idx = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN, 1);
}
if (ieee80211_vif_is_mesh(arvif->vif))
ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_MESH_ENABLE, 1);
ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1);
ti.tid = ath11k_dp_tx_get_tid(skb);
switch (ti.encap_type) {
case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
ath11k_dp_tx_encap_nwifi(skb);
break;
case HAL_TCL_ENCAP_TYPE_RAW:
/* TODO: for CHECKSUM_PARTIAL case in raw mode, HW checksum offload
* is not applicable, hence manual checksum calculation using
* skb_checksum_help() is needed
*/
case HAL_TCL_ENCAP_TYPE_ETHERNET:
case HAL_TCL_ENCAP_TYPE_802_3:
/* TODO: Take care of other encap modes as well */
ret = -EINVAL;
goto fail_remove_idr;
}
ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, ti.paddr)) {
ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
ret = -ENOMEM;
goto fail_remove_idr;
}
ti.data_len = skb->len;
skb_cb->paddr = ti.paddr;
skb_cb->vif = arvif->vif;
skb_cb->ar = ar;
memset(cached_desc, 0, HAL_TCL_DESC_LEN);
ath11k_hal_tx_cmd_desc_setup(ab, cached_desc, &ti);
hal_ring_id = tx_ring->tcl_data_ring.ring_id;
tcl_ring = &ab->hal.srng_list[hal_ring_id];
spin_lock_bh(&tcl_ring->lock);
ath11k_hal_srng_access_begin(ab, tcl_ring);
hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring);
if (!hal_tcl_desc) {
/* NOTE: It is highly unlikely we'll be running out of tcl_ring
* desc because the desc is directly enqueued onto hw queue.
* So add tx packet throttling logic in future if required.
*/
ath11k_hal_srng_access_end(ab, tcl_ring);
spin_unlock_bh(&tcl_ring->lock);
ret = -ENOMEM;
goto fail_unmap_dma;
}
ath11k_hal_tx_desc_sync(cached_desc, hal_tcl_desc);
ath11k_hal_srng_access_end(ab, tcl_ring);
spin_unlock_bh(&tcl_ring->lock);
atomic_inc(&ar->dp.num_tx_pending);
return 0;
fail_unmap_dma:
dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
fail_remove_idr:
spin_lock_bh(&tx_ring->tx_idr_lock);
idr_remove(&tx_ring->txbuf_idr,
FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ti.desc_id));
spin_unlock_bh(&tx_ring->tx_idr_lock);
return ret;
}
static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id,
int msdu_id,
struct dp_tx_ring *tx_ring)
{
struct ath11k *ar;
struct sk_buff *msdu;
struct ath11k_skb_cb *skb_cb;
spin_lock_bh(&tx_ring->tx_idr_lock);
msdu = idr_find(&tx_ring->txbuf_idr, msdu_id);
if (!msdu) {
ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
msdu_id);
spin_unlock_bh(&tx_ring->tx_idr_lock);
return;
}
skb_cb = ATH11K_SKB_CB(msdu);
idr_remove(&tx_ring->txbuf_idr, msdu_id);
spin_unlock_bh(&tx_ring->tx_idr_lock);
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
dev_kfree_skb_any(msdu);
ar = ab->pdevs[mac_id].ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
}
static void
ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
struct dp_tx_ring *tx_ring,
struct ath11k_dp_htt_wbm_tx_status *ts)
{
struct sk_buff *msdu;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
struct ath11k *ar;
spin_lock_bh(&tx_ring->tx_idr_lock);
msdu = idr_find(&tx_ring->txbuf_idr, ts->msdu_id);
if (!msdu) {
ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n",
ts->msdu_id);
spin_unlock_bh(&tx_ring->tx_idr_lock);
return;
}
skb_cb = ATH11K_SKB_CB(msdu);
info = IEEE80211_SKB_CB(msdu);
ar = skb_cb->ar;
idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
spin_unlock_bh(&tx_ring->tx_idr_lock);
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
memset(&info->status, 0, sizeof(info->status));
if (ts->acked) {
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
ts->ack_rssi;
info->status.is_valid_ack_signal = true;
} else {
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
}
}
ieee80211_tx_status(ar->hw, msdu);
}
static void
ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
void *desc, u8 mac_id,
u32 msdu_id, struct dp_tx_ring *tx_ring)
{
struct htt_tx_wbm_completion *status_desc;
struct ath11k_dp_htt_wbm_tx_status ts = {0};
enum hal_wbm_htt_tx_comp_status wbm_status;
status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
status_desc->info0);
switch (wbm_status) {
case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
ts.msdu_id = msdu_id;
ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI,
status_desc->info1);
ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts);
break;
case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
ath11k_dp_tx_free_txbuf(ab, mac_id, msdu_id, tx_ring);
break;
case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
/* This event is to be handled only when the driver decides to
* use WDS offload functionality.
*/
break;
default:
ath11k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
break;
}
}
static void ath11k_dp_tx_cache_peer_stats(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
{
struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
if (ts->try_cnt > 1) {
peer_stats->retry_pkts += ts->try_cnt - 1;
peer_stats->retry_bytes += (ts->try_cnt - 1) * msdu->len;
if (ts->status != HAL_WBM_TQM_REL_REASON_FRAME_ACKED) {
peer_stats->failed_pkts += 1;
peer_stats->failed_bytes += msdu->len;
}
}
}
static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
{
struct ath11k_base *ab = ar->ab;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
/* Must not happen */
return;
}
skb_cb = ATH11K_SKB_CB(msdu);
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
rcu_read_lock();
if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
dev_kfree_skb_any(msdu);
goto exit;
}
if (!skb_cb->vif) {
dev_kfree_skb_any(msdu);
goto exit;
}
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
/* skip tx rate update from ieee80211_status*/
info->status.rates[0].idx = -1;
if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
ts->ack_rssi;
info->status.is_valid_ack_signal = true;
}
if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
if (ath11k_debug_is_extd_tx_stats_enabled(ar)) {
if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) {
if (ar->last_ppdu_id == 0) {
ar->last_ppdu_id = ts->ppdu_id;
} else if (ar->last_ppdu_id == ts->ppdu_id ||
ar->cached_ppdu_id == ar->last_ppdu_id) {
ar->cached_ppdu_id = ar->last_ppdu_id;
ar->cached_stats.is_ampdu = true;
ath11k_update_per_peer_stats_from_txcompl(ar, msdu, ts);
memset(&ar->cached_stats, 0,
sizeof(struct ath11k_per_peer_tx_stats));
} else {
ar->cached_stats.is_ampdu = false;
ath11k_update_per_peer_stats_from_txcompl(ar, msdu, ts);
memset(&ar->cached_stats, 0,
sizeof(struct ath11k_per_peer_tx_stats));
}
ar->last_ppdu_id = ts->ppdu_id;
}
ath11k_dp_tx_cache_peer_stats(ar, msdu, ts);
}
/* NOTE: Tx rate status reporting. Tx completion status does not have
* necessary information (for example nss) to build the tx rate.
* Might end up reporting it out-of-band from HTT stats.
*/
ieee80211_tx_status(ar->hw, msdu);
exit:
rcu_read_unlock();
}
void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
{
struct ath11k *ar;
struct ath11k_dp *dp = &ab->dp;
int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
struct sk_buff *msdu;
struct hal_wbm_release_ring tx_status;
struct hal_tx_status ts;
struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
u32 *desc;
u32 msdu_id;
u8 mac_id;
spin_lock_bh(&status_ring->lock);
ath11k_hal_srng_access_begin(ab, status_ring);
spin_lock_bh(&tx_ring->tx_status_lock);
while (!kfifo_is_full(&tx_ring->tx_status_fifo) &&
(desc = ath11k_hal_srng_dst_get_next_entry(ab, status_ring))) {
ath11k_hal_tx_status_desc_sync((void *)desc,
(void *)&tx_status);
kfifo_put(&tx_ring->tx_status_fifo, tx_status);
}
if ((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
kfifo_is_full(&tx_ring->tx_status_fifo)) {
/* TODO: Process pending tx_status messages when kfifo_is_full() */
ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
}
spin_unlock_bh(&tx_ring->tx_status_lock);
ath11k_hal_srng_access_end(ab, status_ring);
spin_unlock_bh(&status_ring->lock);
spin_lock_bh(&tx_ring->tx_status_lock);
while (kfifo_get(&tx_ring->tx_status_fifo, &tx_status)) {
memset(&ts, 0, sizeof(ts));
ath11k_hal_tx_status_parse(ab, &tx_status, &ts);
mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, ts.desc_id);
msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ts.desc_id);
if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
ath11k_dp_tx_process_htt_tx_complete(ab,
(void *)&tx_status,
mac_id, msdu_id,
tx_ring);
continue;
}
spin_lock_bh(&tx_ring->tx_idr_lock);
msdu = idr_find(&tx_ring->txbuf_idr, msdu_id);
if (!msdu) {
ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
msdu_id);
spin_unlock_bh(&tx_ring->tx_idr_lock);
continue;
}
idr_remove(&tx_ring->txbuf_idr, msdu_id);
spin_unlock_bh(&tx_ring->tx_idr_lock);
ar = ab->pdevs[mac_id].ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
/* TODO: Locking optimization so that tx_completion for an msdu
* is not called with tx_status_lock acquired
*/
ath11k_dp_tx_complete_msdu(ar, msdu, &ts);
}
spin_unlock_bh(&tx_ring->tx_status_lock);
}
int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
enum hal_reo_cmd_type type,
struct ath11k_hal_reo_cmd *cmd,
void (*cb)(struct ath11k_dp *, void *,
enum hal_reo_cmd_status))
{
struct ath11k_dp *dp = &ab->dp;
struct dp_reo_cmd *dp_cmd;
struct hal_srng *cmd_ring;
int cmd_num;
cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
/* reo cmd ring descriptors has cmd_num starting from 1 */
if (cmd_num <= 0)
return -EINVAL;
if (!cb)
return 0;
/* Can this be optimized so that we keep the pending command list only
* for tid delete command to free up the resoruce on the command status
* indication?
*/
dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
if (!dp_cmd)
return -ENOMEM;
memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid));
dp_cmd->cmd_num = cmd_num;
dp_cmd->handler = cb;
spin_lock_bh(&dp->reo_cmd_lock);
list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
spin_unlock_bh(&dp->reo_cmd_lock);
return 0;
}
static int
ath11k_dp_tx_get_ring_id_type(struct ath11k_base *ab,
int mac_id, u32 ring_id,
enum hal_ring_type ring_type,
enum htt_srng_ring_type *htt_ring_type,
enum htt_srng_ring_id *htt_ring_id)
{
int lmac_ring_id_offset = 0;
int ret = 0;
switch (ring_type) {
case HAL_RXDMA_BUF:
lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC;
if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
lmac_ring_id_offset) ||
ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
lmac_ring_id_offset))) {
ret = -EINVAL;
}
*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_RXDMA_DST:
*htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
*htt_ring_type = HTT_HW_TO_SW_RING;
break;
case HAL_RXDMA_MONITOR_BUF:
*htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_RXDMA_MONITOR_STATUS:
*htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_RXDMA_MONITOR_DST:
*htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
*htt_ring_type = HTT_HW_TO_SW_RING;
break;
case HAL_RXDMA_MONITOR_DESC:
*htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
default:
ath11k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
ret = -EINVAL;
}
return ret;
}
int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
int mac_id, enum hal_ring_type ring_type)
{
struct htt_srng_setup_cmd *cmd;
struct hal_srng *srng = &ab->hal.srng_list[ring_id];
struct hal_srng_params params;
struct sk_buff *skb;
u32 ring_entry_sz;
int len = sizeof(*cmd);
dma_addr_t hp_addr, tp_addr;
enum htt_srng_ring_type htt_ring_type;
enum htt_srng_ring_id htt_ring_id;
int ret = 0;
skb = ath11k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
memset(&params, 0, sizeof(params));
ath11k_hal_srng_get_params(ab, srng, &params);
hp_addr = ath11k_hal_srng_get_hp_addr(ab, srng);
tp_addr = ath11k_hal_srng_get_tp_addr(ab, srng);
if (ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
ring_type, &htt_ring_type,
&htt_ring_id))
goto err_free;
skb_put(skb, len);
cmd = (struct htt_srng_setup_cmd *)skb->data;
cmd->info0 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE,
HTT_H2T_MSG_TYPE_SRING_SETUP);
if (htt_ring_type == HTT_SW_TO_HW_RING ||
htt_ring_type == HTT_HW_TO_SW_RING)
cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
DP_SW2HW_MACID(mac_id));
else
cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
mac_id);
cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE,
htt_ring_type);
cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID, htt_ring_id);
cmd->ring_base_addr_lo = params.ring_base_paddr &
HAL_ADDR_LSB_REG_MASK;
cmd->ring_base_addr_hi = (u64)params.ring_base_paddr >>
HAL_ADDR_MSB_REG_SHIFT;
ret = ath11k_hal_srng_get_entrysize(ring_type);
if (ret < 0)
return -EINVAL;
ring_entry_sz = ret;
ring_entry_sz >>= 2;
cmd->info1 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE,
ring_entry_sz);
cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE,
params.num_entries * ring_entry_sz);
cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP,
!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
cmd->info1 |= FIELD_PREP(
HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP,
!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
cmd->info1 |= FIELD_PREP(
HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP,
!!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP));
if (htt_ring_type == HTT_SW_TO_HW_RING)
cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS;
cmd->ring_head_off32_remote_addr_lo = hp_addr & HAL_ADDR_LSB_REG_MASK;
cmd->ring_head_off32_remote_addr_hi = (u64)hp_addr >>
HAL_ADDR_MSB_REG_SHIFT;
cmd->ring_tail_off32_remote_addr_lo = tp_addr & HAL_ADDR_LSB_REG_MASK;
cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >>
HAL_ADDR_MSB_REG_SHIFT;
cmd->ring_msi_addr_lo = 0;
cmd->ring_msi_addr_hi = 0;
cmd->msi_data = 0;
cmd->intr_info = FIELD_PREP(
HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
params.intr_batch_cntr_thres_entries * ring_entry_sz);
cmd->intr_info |= FIELD_PREP(
HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH,
params.intr_timer_thres_us >> 3);
cmd->info2 = 0;
if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
cmd->info2 = FIELD_PREP(
HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH,
params.low_threshold);
}
ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
if (ret)
goto err_free;
return 0;
err_free:
dev_kfree_skb_any(skb);
return ret;
}
#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
struct sk_buff *skb;
struct htt_ver_req_cmd *cmd;
int len = sizeof(*cmd);
int ret;
init_completion(&dp->htt_tgt_version_received);
skb = ath11k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_ver_req_cmd *)skb->data;
cmd->ver_reg_info = FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID,
HTT_H2T_MSG_TYPE_VERSION_REQ);
ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
HTT_TARGET_VERSION_TIMEOUT_HZ);
if (ret == 0) {
ath11k_warn(ab, "htt target version request timed out\n");
return -ETIMEDOUT;
}
if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
ath11k_err(ab, "unsupported htt major version %d supported version is %d\n",
dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
return -ENOTSUPP;
}
return 0;
}
int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_dp *dp = &ab->dp;
struct sk_buff *skb;
struct htt_ppdu_stats_cfg_cmd *cmd;
int len = sizeof(*cmd);
u8 pdev_mask;
int ret;
skb = ath11k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
pdev_mask = 1 << (ar->pdev_idx);
cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);
ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
int mac_id, enum hal_ring_type ring_type,
int rx_buf_size,
struct htt_rx_ring_tlv_filter *tlv_filter)
{
struct htt_rx_ring_selection_cfg_cmd *cmd;
struct hal_srng *srng = &ab->hal.srng_list[ring_id];
struct hal_srng_params params;
struct sk_buff *skb;
int len = sizeof(*cmd);
enum htt_srng_ring_type htt_ring_type;
enum htt_srng_ring_id htt_ring_id;
int ret = 0;
skb = ath11k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
memset(&params, 0, sizeof(params));
ath11k_hal_srng_get_params(ab, srng, &params);
if (ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
ring_type, &htt_ring_type,
&htt_ring_id))
goto err_free;
skb_put(skb, len);
cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
cmd->info0 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE,
HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
if (htt_ring_type == HTT_SW_TO_HW_RING ||
htt_ring_type == HTT_HW_TO_SW_RING)
cmd->info0 |=
FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
DP_SW2HW_MACID(mac_id));
else
cmd->info0 |=
FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
mac_id);
cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID,
htt_ring_id);
cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS,
!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS,
!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
cmd->info1 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE,
rx_buf_size);
cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0;
cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1;
cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2;
cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3;
cmd->rx_filter_tlv = tlv_filter->rx_filter;
ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
if (ret)
goto err_free;
return 0;
err_free:
dev_kfree_skb_any(skb);
return ret;
}
int
ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
struct htt_ext_stats_cfg_params *cfg_params,
u64 cookie)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_dp *dp = &ab->dp;
struct sk_buff *skb;
struct htt_ext_stats_cfg_cmd *cmd;
int len = sizeof(*cmd);
int ret;
skb = ath11k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id;
cmd->hdr.stats_type = type;
cmd->cfg_param0 = cfg_params->cfg0;
cmd->cfg_param1 = cfg_params->cfg1;
cmd->cfg_param2 = cfg_params->cfg2;
cmd->cfg_param3 = cfg_params->cfg3;
cmd->cookie_lsb = lower_32_bits(cookie);
cmd->cookie_msb = upper_32_bits(cookie);
ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
if (ret) {
ath11k_warn(ab, "failed to send htt type stats request: %d",
ret);
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct htt_rx_ring_tlv_filter tlv_filter = {0};
int ret = 0, ring_id = 0;
ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
if (!reset) {
tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
tlv_filter.pkt_filter_flags0 =
HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
tlv_filter.pkt_filter_flags1 =
HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
tlv_filter.pkt_filter_flags2 =
HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
tlv_filter.pkt_filter_flags3 =
HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
HTT_RX_MON_MO_DATA_FILTER_FLASG3;
}
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
HAL_RXDMA_MONITOR_BUF,
DP_RXDMA_REFILL_RING_SIZE,
&tlv_filter);
if (ret)
return ret;
ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id;
if (!reset)
tlv_filter.rx_filter =
HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
else
tlv_filter = ath11k_mac_mon_status_filter_default;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
HAL_RXDMA_MONITOR_STATUS,
DP_RXDMA_REFILL_RING_SIZE,
&tlv_filter);
return ret;
}

View File

@ -0,0 +1,40 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#ifndef ATH11K_DP_TX_H
#define ATH11K_DP_TX_H
#include "core.h"
#include "hal_tx.h"
struct ath11k_dp_htt_wbm_tx_status {
u32 msdu_id;
bool acked;
int ack_rssi;
};
int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab);
int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
struct sk_buff *skb);
void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id);
int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
enum hal_reo_cmd_type type,
struct ath11k_hal_reo_cmd *cmd,
void (*func)(struct ath11k_dp *, void *,
enum hal_reo_cmd_status));
int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask);
int
ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
struct htt_ext_stats_cfg_params *cfg_params,
u64 cookie);
int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset);
int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
int mac_id, enum hal_ring_type ring_type,
int rx_buf_size,
struct htt_rx_ring_tlv_filter *tlv_filter);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,897 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#ifndef ATH11K_HAL_H
#define ATH11K_HAL_H
#include "hal_desc.h"
#include "rx_desc.h"
struct ath11k_base;
#define HAL_LINK_DESC_SIZE (32 << 2)
#define HAL_LINK_DESC_ALIGN 128
#define HAL_NUM_MPDUS_PER_LINK_DESC 6
#define HAL_NUM_TX_MSDUS_PER_LINK_DESC 7
#define HAL_NUM_RX_MSDUS_PER_LINK_DESC 6
#define HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC 12
#define HAL_MAX_AVAIL_BLK_RES 3
#define HAL_RING_BASE_ALIGN 8
#define HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX 32704
/* TODO: Check with hw team on the supported scatter buf size */
#define HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE 8
#define HAL_WBM_IDLE_SCATTER_BUF_SIZE (HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX - \
HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE)
#define HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX 48
#define HAL_DSCP_TID_TBL_SIZE 24
/* calculate the register address from bar0 of shadow register x */
#define SHADOW_BASE_ADDRESS 0x00003024
#define SHADOW_NUM_REGISTERS 36
/* WCSS Relative address */
#define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000
#define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000
#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG 0x00a00000
#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG 0x00a01000
#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG 0x00a02000
#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG 0x00a03000
#define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000
/* SW2TCL(x) R0 ring configuration address */
#define HAL_TCL1_RING_CMN_CTRL_REG 0x00000014
#define HAL_TCL1_RING_DSCP_TID_MAP 0x0000002c
#define HAL_TCL1_RING_BASE_LSB 0x00000510
#define HAL_TCL1_RING_BASE_MSB 0x00000514
#define HAL_TCL1_RING_ID 0x00000518
#define HAL_TCL1_RING_MISC 0x00000520
#define HAL_TCL1_RING_TP_ADDR_LSB 0x0000052c
#define HAL_TCL1_RING_TP_ADDR_MSB 0x00000530
#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0 0x00000540
#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1 0x00000544
#define HAL_TCL1_RING_MSI1_BASE_LSB 0x00000558
#define HAL_TCL1_RING_MSI1_BASE_MSB 0x0000055c
#define HAL_TCL1_RING_MSI1_DATA 0x00000560
#define HAL_TCL2_RING_BASE_LSB 0x00000568
#define HAL_TCL_RING_BASE_LSB 0x00000618
#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET \
(HAL_TCL1_RING_MSI1_BASE_LSB - HAL_TCL1_RING_BASE_LSB)
#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET \
(HAL_TCL1_RING_MSI1_BASE_MSB - HAL_TCL1_RING_BASE_LSB)
#define HAL_TCL1_RING_MSI1_DATA_OFFSET \
(HAL_TCL1_RING_MSI1_DATA - HAL_TCL1_RING_BASE_LSB)
#define HAL_TCL1_RING_BASE_MSB_OFFSET \
(HAL_TCL1_RING_BASE_MSB - HAL_TCL1_RING_BASE_LSB)
#define HAL_TCL1_RING_ID_OFFSET \
(HAL_TCL1_RING_ID - HAL_TCL1_RING_BASE_LSB)
#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET \
(HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0 - HAL_TCL1_RING_BASE_LSB)
#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET \
(HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1 - HAL_TCL1_RING_BASE_LSB)
#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET \
(HAL_TCL1_RING_TP_ADDR_LSB - HAL_TCL1_RING_BASE_LSB)
#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET \
(HAL_TCL1_RING_TP_ADDR_MSB - HAL_TCL1_RING_BASE_LSB)
#define HAL_TCL1_RING_MISC_OFFSET \
(HAL_TCL1_RING_MISC - HAL_TCL1_RING_BASE_LSB)
/* SW2TCL(x) R2 ring pointers (head/tail) address */
#define HAL_TCL1_RING_HP 0x00002000
#define HAL_TCL1_RING_TP 0x00002004
#define HAL_TCL2_RING_HP 0x00002008
#define HAL_TCL_RING_HP 0x00002018
#define HAL_TCL1_RING_TP_OFFSET \
(HAL_TCL1_RING_TP - HAL_TCL1_RING_HP)
/* TCL STATUS ring address */
#define HAL_TCL_STATUS_RING_BASE_LSB 0x00000720
#define HAL_TCL_STATUS_RING_HP 0x00002030
/* REO2SW(x) R0 ring configuration address */
#define HAL_REO1_GEN_ENABLE 0x00000000
#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
#define HAL_REO1_RING_BASE_LSB 0x0000029c
#define HAL_REO1_RING_BASE_MSB 0x000002a0
#define HAL_REO1_RING_ID 0x000002a4
#define HAL_REO1_RING_MISC 0x000002ac
#define HAL_REO1_RING_HP_ADDR_LSB 0x000002b0
#define HAL_REO1_RING_HP_ADDR_MSB 0x000002b4
#define HAL_REO1_RING_PRODUCER_INT_SETUP 0x000002c0
#define HAL_REO1_RING_MSI1_BASE_LSB 0x000002e4
#define HAL_REO1_RING_MSI1_BASE_MSB 0x000002e8
#define HAL_REO1_RING_MSI1_DATA 0x000002ec
#define HAL_REO2_RING_BASE_LSB 0x000002f4
#define HAL_REO1_AGING_THRESH_IX_0 0x00000564
#define HAL_REO1_AGING_THRESH_IX_1 0x00000568
#define HAL_REO1_AGING_THRESH_IX_2 0x0000056c
#define HAL_REO1_AGING_THRESH_IX_3 0x00000570
#define HAL_REO1_RING_MSI1_BASE_LSB_OFFSET \
(HAL_REO1_RING_MSI1_BASE_LSB - HAL_REO1_RING_BASE_LSB)
#define HAL_REO1_RING_MSI1_BASE_MSB_OFFSET \
(HAL_REO1_RING_MSI1_BASE_MSB - HAL_REO1_RING_BASE_LSB)
#define HAL_REO1_RING_MSI1_DATA_OFFSET \
(HAL_REO1_RING_MSI1_DATA - HAL_REO1_RING_BASE_LSB)
#define HAL_REO1_RING_BASE_MSB_OFFSET \
(HAL_REO1_RING_BASE_MSB - HAL_REO1_RING_BASE_LSB)
#define HAL_REO1_RING_ID_OFFSET (HAL_REO1_RING_ID - HAL_REO1_RING_BASE_LSB)
#define HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET \
(HAL_REO1_RING_PRODUCER_INT_SETUP - HAL_REO1_RING_BASE_LSB)
#define HAL_REO1_RING_HP_ADDR_LSB_OFFSET \
(HAL_REO1_RING_HP_ADDR_LSB - HAL_REO1_RING_BASE_LSB)
#define HAL_REO1_RING_HP_ADDR_MSB_OFFSET \
(HAL_REO1_RING_HP_ADDR_MSB - HAL_REO1_RING_BASE_LSB)
#define HAL_REO1_RING_MISC_OFFSET (HAL_REO1_RING_MISC - HAL_REO1_RING_BASE_LSB)
/* REO2SW(x) R2 ring pointers (head/tail) address */
#define HAL_REO1_RING_HP 0x00003038
#define HAL_REO1_RING_TP 0x0000303c
#define HAL_REO2_RING_HP 0x00003040
#define HAL_REO1_RING_TP_OFFSET (HAL_REO1_RING_TP - HAL_REO1_RING_HP)
/* REO2TCL R0 ring configuration address */
#define HAL_REO_TCL_RING_BASE_LSB 0x000003fc
/* REO2TCL R2 ring pointer (head/tail) address */
#define HAL_REO_TCL_RING_HP 0x00003058
/* REO CMD R0 address */
#define HAL_REO_CMD_RING_BASE_LSB 0x00000194
/* REO CMD R2 address */
#define HAL_REO_CMD_HP 0x00003020
/* SW2REO R0 address */
#define HAL_SW2REO_RING_BASE_LSB 0x000001ec
/* SW2REO R2 address */
#define HAL_SW2REO_RING_HP 0x00003028
/* CE ring R0 address */
#define HAL_CE_DST_RING_BASE_LSB 0x00000000
#define HAL_CE_DST_STATUS_RING_BASE_LSB 0x00000058
#define HAL_CE_DST_RING_CTRL 0x000000b0
/* CE ring R2 address */
#define HAL_CE_DST_RING_HP 0x00000400
#define HAL_CE_DST_STATUS_RING_HP 0x00000408
/* REO status address */
#define HAL_REO_STATUS_RING_BASE_LSB 0x00000504
#define HAL_REO_STATUS_HP 0x00003070
/* WBM Idle R0 address */
#define HAL_WBM_IDLE_LINK_RING_BASE_LSB 0x00000860
#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR 0x00000870
#define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR 0x00000048
#define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR 0x0000004c
#define HAL_WBM_SCATTERED_RING_BASE_LSB 0x00000058
#define HAL_WBM_SCATTERED_RING_BASE_MSB 0x0000005c
#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0 0x00000068
#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1 0x0000006c
#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0 0x00000078
#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1 0x0000007c
#define HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR 0x00000084
/* WBM Idle R2 address */
#define HAL_WBM_IDLE_LINK_RING_HP 0x000030b0
/* SW2WBM R0 release address */
#define HAL_WBM_RELEASE_RING_BASE_LSB 0x000001d8
/* SW2WBM R2 release address */
#define HAL_WBM_RELEASE_RING_HP 0x00003018
/* WBM2SW R0 release address */
#define HAL_WBM0_RELEASE_RING_BASE_LSB 0x00000910
#define HAL_WBM1_RELEASE_RING_BASE_LSB 0x00000968
/* WBM2SW R2 release address */
#define HAL_WBM0_RELEASE_RING_HP 0x000030c0
#define HAL_WBM1_RELEASE_RING_HP 0x000030c8
/* TCL ring feild mask and offset */
#define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
#define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
#define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
#define HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE BIT(1)
#define HAL_TCL1_RING_MISC_MSI_SWAP BIT(3)
#define HAL_TCL1_RING_MISC_HOST_FW_SWAP BIT(4)
#define HAL_TCL1_RING_MISC_DATA_TLV_SWAP BIT(5)
#define HAL_TCL1_RING_MISC_SRNG_ENABLE BIT(6)
#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD GENMASK(31, 16)
#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD GENMASK(14, 0)
#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD GENMASK(15, 0)
#define HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
#define HAL_TCL1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
#define HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN BIT(17)
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP GENMASK(31, 0)
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP0 GENMASK(2, 0)
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP1 GENMASK(5, 3)
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP2 GENMASK(8, 6)
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP3 GENMASK(11, 9)
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP4 GENMASK(14, 12)
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP5 GENMASK(17, 15)
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18)
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21)
/* REO ring feild mask and offset */
#define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
#define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
#define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8)
#define HAL_REO1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
#define HAL_REO1_RING_MISC_MSI_SWAP BIT(3)
#define HAL_REO1_RING_MISC_HOST_FW_SWAP BIT(4)
#define HAL_REO1_RING_MISC_DATA_TLV_SWAP BIT(5)
#define HAL_REO1_RING_MISC_SRNG_ENABLE BIT(6)
#define HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD GENMASK(31, 16)
#define HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD GENMASK(14, 0)
#define HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
#define HAL_REO1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
#define HAL_REO1_GEN_ENABLE_FRAG_DST_RING GENMASK(25, 23)
#define HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE BIT(2)
#define HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE BIT(3)
/* CE ring bit field mask and shift */
#define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN GENMASK(15, 0)
#define HAL_ADDR_LSB_REG_MASK 0xffffffff
#define HAL_ADDR_MSB_REG_SHIFT 32
/* WBM ring bit field mask and shift */
#define HAL_WBM_LINK_DESC_IDLE_LIST_MODE BIT(1)
#define HAL_WBM_SCATTER_BUFFER_SIZE GENMASK(10, 2)
#define HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST GENMASK(31, 16)
#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32 GENMASK(7, 0)
#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG GENMASK(31, 8)
#define HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1 GENMASK(20, 8)
#define HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1 GENMASK(20, 8)
#define BASE_ADDR_MATCH_TAG_VAL 0x5
#define HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE 0x000fffff
#define HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE 0x000fffff
#define HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE 0x0000ffff
#define HAL_REO_CMD_RING_BASE_MSB_RING_SIZE 0x0000ffff
#define HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
#define HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE 0x000fffff
#define HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE 0x000fffff
#define HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
#define HAL_CE_SRC_RING_BASE_MSB_RING_SIZE 0x0000ffff
#define HAL_CE_DST_RING_BASE_MSB_RING_SIZE 0x0000ffff
#define HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
#define HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE 0x0000ffff
#define HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE 0x0000ffff
#define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff
#define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff
#define HAL_RX_DESC_SIZE (sizeof(struct hal_rx_desc))
/* Add any other errors here and return them in
* ath11k_hal_rx_desc_get_err().
*/
enum hal_srng_ring_id {
HAL_SRNG_RING_ID_REO2SW1 = 0,
HAL_SRNG_RING_ID_REO2SW2,
HAL_SRNG_RING_ID_REO2SW3,
HAL_SRNG_RING_ID_REO2SW4,
HAL_SRNG_RING_ID_REO2TCL,
HAL_SRNG_RING_ID_SW2REO,
HAL_SRNG_RING_ID_REO_CMD = 8,
HAL_SRNG_RING_ID_REO_STATUS,
HAL_SRNG_RING_ID_SW2TCL1 = 16,
HAL_SRNG_RING_ID_SW2TCL2,
HAL_SRNG_RING_ID_SW2TCL3,
HAL_SRNG_RING_ID_SW2TCL4,
HAL_SRNG_RING_ID_SW2TCL_CMD = 24,
HAL_SRNG_RING_ID_TCL_STATUS,
HAL_SRNG_RING_ID_CE0_SRC = 32,
HAL_SRNG_RING_ID_CE1_SRC,
HAL_SRNG_RING_ID_CE2_SRC,
HAL_SRNG_RING_ID_CE3_SRC,
HAL_SRNG_RING_ID_CE4_SRC,
HAL_SRNG_RING_ID_CE5_SRC,
HAL_SRNG_RING_ID_CE6_SRC,
HAL_SRNG_RING_ID_CE7_SRC,
HAL_SRNG_RING_ID_CE8_SRC,
HAL_SRNG_RING_ID_CE9_SRC,
HAL_SRNG_RING_ID_CE10_SRC,
HAL_SRNG_RING_ID_CE11_SRC,
HAL_SRNG_RING_ID_CE0_DST = 56,
HAL_SRNG_RING_ID_CE1_DST,
HAL_SRNG_RING_ID_CE2_DST,
HAL_SRNG_RING_ID_CE3_DST,
HAL_SRNG_RING_ID_CE4_DST,
HAL_SRNG_RING_ID_CE5_DST,
HAL_SRNG_RING_ID_CE6_DST,
HAL_SRNG_RING_ID_CE7_DST,
HAL_SRNG_RING_ID_CE8_DST,
HAL_SRNG_RING_ID_CE9_DST,
HAL_SRNG_RING_ID_CE10_DST,
HAL_SRNG_RING_ID_CE11_DST,
HAL_SRNG_RING_ID_CE0_DST_STATUS = 80,
HAL_SRNG_RING_ID_CE1_DST_STATUS,
HAL_SRNG_RING_ID_CE2_DST_STATUS,
HAL_SRNG_RING_ID_CE3_DST_STATUS,
HAL_SRNG_RING_ID_CE4_DST_STATUS,
HAL_SRNG_RING_ID_CE5_DST_STATUS,
HAL_SRNG_RING_ID_CE6_DST_STATUS,
HAL_SRNG_RING_ID_CE7_DST_STATUS,
HAL_SRNG_RING_ID_CE8_DST_STATUS,
HAL_SRNG_RING_ID_CE9_DST_STATUS,
HAL_SRNG_RING_ID_CE10_DST_STATUS,
HAL_SRNG_RING_ID_CE11_DST_STATUS,
HAL_SRNG_RING_ID_WBM_IDLE_LINK = 104,
HAL_SRNG_RING_ID_WBM_SW_RELEASE,
HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
HAL_SRNG_RING_ID_WBM2SW1_RELEASE,
HAL_SRNG_RING_ID_WBM2SW2_RELEASE,
HAL_SRNG_RING_ID_WBM2SW3_RELEASE,
HAL_SRNG_RING_ID_UMAC_ID_END = 127,
HAL_SRNG_RING_ID_LMAC1_ID_START,
HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF = HAL_SRNG_RING_ID_LMAC1_ID_START,
HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF,
HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_STATBUF,
HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
HAL_SRNG_RING_ID_LMAC1_ID_END = 143
};
/* SRNG registers are split into two groups R0 and R2 */
#define HAL_SRNG_REG_GRP_R0 0
#define HAL_SRNG_REG_GRP_R2 1
#define HAL_SRNG_NUM_REG_GRP 2
#define HAL_SRNG_NUM_LMACS 3
#define HAL_SRNG_REO_EXCEPTION HAL_SRNG_RING_ID_REO2SW1
#define HAL_SRNG_RINGS_PER_LMAC (HAL_SRNG_RING_ID_LMAC1_ID_END - \
HAL_SRNG_RING_ID_LMAC1_ID_START)
#define HAL_SRNG_NUM_LMAC_RINGS (HAL_SRNG_NUM_LMACS * HAL_SRNG_RINGS_PER_LMAC)
#define HAL_SRNG_RING_ID_MAX (HAL_SRNG_RING_ID_UMAC_ID_END + \
HAL_SRNG_NUM_LMAC_RINGS)
enum hal_ring_type {
HAL_REO_DST,
HAL_REO_EXCEPTION,
HAL_REO_REINJECT,
HAL_REO_CMD,
HAL_REO_STATUS,
HAL_TCL_DATA,
HAL_TCL_CMD,
HAL_TCL_STATUS,
HAL_CE_SRC,
HAL_CE_DST,
HAL_CE_DST_STATUS,
HAL_WBM_IDLE_LINK,
HAL_SW2WBM_RELEASE,
HAL_WBM2SW_RELEASE,
HAL_RXDMA_BUF,
HAL_RXDMA_DST,
HAL_RXDMA_MONITOR_BUF,
HAL_RXDMA_MONITOR_STATUS,
HAL_RXDMA_MONITOR_DST,
HAL_RXDMA_MONITOR_DESC,
HAL_RXDMA_DIR_BUF,
HAL_MAX_RING_TYPES,
};
#define HAL_RX_MAX_BA_WINDOW 256
#define HAL_DEFAULT_REO_TIMEOUT_USEC (40 * 1000)
/**
* enum hal_reo_cmd_type: Enum for REO command type
* @CMD_GET_QUEUE_STATS: Get REO queue status/stats
* @CMD_FLUSH_QUEUE: Flush all frames in REO queue
* @CMD_FLUSH_CACHE: Flush descriptor entries in the cache
* @CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked
* earlier with a 'REO_FLUSH_CACHE' command
* @CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list
* @CMD_UPDATE_RX_REO_QUEUE: Update REO queue settings
*/
enum hal_reo_cmd_type {
HAL_REO_CMD_GET_QUEUE_STATS = 0,
HAL_REO_CMD_FLUSH_QUEUE = 1,
HAL_REO_CMD_FLUSH_CACHE = 2,
HAL_REO_CMD_UNBLOCK_CACHE = 3,
HAL_REO_CMD_FLUSH_TIMEOUT_LIST = 4,
HAL_REO_CMD_UPDATE_RX_QUEUE = 5,
};
/**
* enum hal_reo_cmd_status: Enum for execution status of REO command
* @HAL_REO_CMD_SUCCESS: Command has successfully executed
* @HAL_REO_CMD_BLOCKED: Command could not be executed as the queue
* or cache was blocked
* @HAL_REO_CMD_FAILED: Command execution failed, could be due to
* invalid queue desc
* @HAL_REO_CMD_RESOURCE_BLOCKED:
* @HAL_REO_CMD_DRAIN:
*/
enum hal_reo_cmd_status {
HAL_REO_CMD_SUCCESS = 0,
HAL_REO_CMD_BLOCKED = 1,
HAL_REO_CMD_FAILED = 2,
HAL_REO_CMD_RESOURCE_BLOCKED = 3,
HAL_REO_CMD_DRAIN = 0xff,
};
struct hal_wbm_idle_scatter_list {
dma_addr_t paddr;
struct hal_wbm_link_desc *vaddr;
};
struct hal_srng_params {
dma_addr_t ring_base_paddr;
u32 *ring_base_vaddr;
int num_entries;
u32 intr_batch_cntr_thres_entries;
u32 intr_timer_thres_us;
u32 flags;
u32 max_buffer_len;
u32 low_threshold;
/* Add more params as needed */
};
enum hal_srng_dir {
HAL_SRNG_DIR_SRC,
HAL_SRNG_DIR_DST
};
/* srng flags */
#define HAL_SRNG_FLAGS_MSI_SWAP 0x00000008
#define HAL_SRNG_FLAGS_RING_PTR_SWAP 0x00000010
#define HAL_SRNG_FLAGS_DATA_TLV_SWAP 0x00000020
#define HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN 0x00010000
#define HAL_SRNG_FLAGS_MSI_INTR 0x00020000
#define HAL_SRNG_FLAGS_LMAC_RING 0x80000000
#define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1)
#define HAL_SRNG_TLV_HDR_LEN GENMASK(25, 10)
/* Common SRNG ring structure for source and destination rings */
struct hal_srng {
/* Unique SRNG ring ID */
u8 ring_id;
/* Ring initialization done */
u8 initialized;
/* Interrupt/MSI value assigned to this ring */
int irq;
/* Physical base address of the ring */
dma_addr_t ring_base_paddr;
/* Virtual base address of the ring */
u32 *ring_base_vaddr;
/* Number of entries in ring */
u32 num_entries;
/* Ring size */
u32 ring_size;
/* Ring size mask */
u32 ring_size_mask;
/* Size of ring entry */
u32 entry_size;
/* Interrupt timer threshold - in micro seconds */
u32 intr_timer_thres_us;
/* Interrupt batch counter threshold - in number of ring entries */
u32 intr_batch_cntr_thres_entries;
/* MSI Address */
dma_addr_t msi_addr;
/* MSI data */
u32 msi_data;
/* Misc flags */
u32 flags;
/* Lock for serializing ring index updates */
spinlock_t lock;
/* Start offset of SRNG register groups for this ring
* TBD: See if this is required - register address can be derived
* from ring ID
*/
u32 hwreg_base[HAL_SRNG_NUM_REG_GRP];
/* Source or Destination ring */
enum hal_srng_dir ring_dir;
union {
struct {
/* SW tail pointer */
u32 tp;
/* Shadow head pointer location to be updated by HW */
volatile u32 *hp_addr;
/* Cached head pointer */
u32 cached_hp;
/* Tail pointer location to be updated by SW - This
* will be a register address and need not be
* accessed through SW structure
*/
u32 *tp_addr;
/* Current SW loop cnt */
u32 loop_cnt;
/* max transfer size */
u16 max_buffer_length;
} dst_ring;
struct {
/* SW head pointer */
u32 hp;
/* SW reap head pointer */
u32 reap_hp;
/* Shadow tail pointer location to be updated by HW */
u32 *tp_addr;
/* Cached tail pointer */
u32 cached_tp;
/* Head pointer location to be updated by SW - This
* will be a register address and need not be accessed
* through SW structure
*/
u32 *hp_addr;
/* Low threshold - in number of ring entries */
u32 low_threshold;
} src_ring;
} u;
};
/* Interrupt mitigation - Batch threshold in terms of numer of frames */
#define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256
#define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128
#define HAL_SRNG_INT_BATCH_THRESHOLD_OTHER 1
/* Interrupt mitigation - timer threshold in us */
#define HAL_SRNG_INT_TIMER_THRESHOLD_TX 1000
#define HAL_SRNG_INT_TIMER_THRESHOLD_RX 500
#define HAL_SRNG_INT_TIMER_THRESHOLD_OTHER 1000
/* HW SRNG configuration table */
struct hal_srng_config {
int start_ring_id;
u16 max_rings;
u16 entry_size;
u32 reg_start[HAL_SRNG_NUM_REG_GRP];
u16 reg_size[HAL_SRNG_NUM_REG_GRP];
u8 lmac_ring;
enum hal_srng_dir ring_dir;
u32 max_size;
};
/**
* enum hal_rx_buf_return_buf_manager
*
* @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list
* @HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST: Descriptor returned to WBM idle
* descriptor list.
* @HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW
* @HAL_RX_BUF_RBM_SW0_BM: For Tx completion -- returned to host
* @HAL_RX_BUF_RBM_SW1_BM: For Tx completion -- returned to host
* @HAL_RX_BUF_RBM_SW2_BM: For Tx completion -- returned to host
* @HAL_RX_BUF_RBM_SW3_BM: For Rx release -- returned to host
*/
enum hal_rx_buf_return_buf_manager {
HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST,
HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST,
HAL_RX_BUF_RBM_FW_BM,
HAL_RX_BUF_RBM_SW0_BM,
HAL_RX_BUF_RBM_SW1_BM,
HAL_RX_BUF_RBM_SW2_BM,
HAL_RX_BUF_RBM_SW3_BM,
};
#define HAL_SRNG_DESC_LOOP_CNT 0xf0000000
#define HAL_REO_CMD_FLG_NEED_STATUS BIT(0)
#define HAL_REO_CMD_FLG_STATS_CLEAR BIT(1)
#define HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER BIT(2)
#define HAL_REO_CMD_FLG_FLUSH_RELEASE_BLOCKING BIT(3)
#define HAL_REO_CMD_FLG_FLUSH_NO_INVAL BIT(4)
#define HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS BIT(5)
#define HAL_REO_CMD_FLG_FLUSH_ALL BIT(6)
#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */
#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
#define HAL_REO_CMD_UPD0_VLD BIT(9)
#define HAL_REO_CMD_UPD0_ALDC BIT(10)
#define HAL_REO_CMD_UPD0_DIS_DUP_DETECTION BIT(11)
#define HAL_REO_CMD_UPD0_SOFT_REORDER_EN BIT(12)
#define HAL_REO_CMD_UPD0_AC BIT(13)
#define HAL_REO_CMD_UPD0_BAR BIT(14)
#define HAL_REO_CMD_UPD0_RETRY BIT(15)
#define HAL_REO_CMD_UPD0_CHECK_2K_MODE BIT(16)
#define HAL_REO_CMD_UPD0_OOR_MODE BIT(17)
#define HAL_REO_CMD_UPD0_BA_WINDOW_SIZE BIT(18)
#define HAL_REO_CMD_UPD0_PN_CHECK BIT(19)
#define HAL_REO_CMD_UPD0_EVEN_PN BIT(20)
#define HAL_REO_CMD_UPD0_UNEVEN_PN BIT(21)
#define HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE BIT(22)
#define HAL_REO_CMD_UPD0_PN_SIZE BIT(23)
#define HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG BIT(24)
#define HAL_REO_CMD_UPD0_SVLD BIT(25)
#define HAL_REO_CMD_UPD0_SSN BIT(26)
#define HAL_REO_CMD_UPD0_SEQ_2K_ERR BIT(27)
#define HAL_REO_CMD_UPD0_PN_ERR BIT(28)
#define HAL_REO_CMD_UPD0_PN_VALID BIT(29)
#define HAL_REO_CMD_UPD0_PN BIT(30)
/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */
#define HAL_REO_CMD_UPD1_VLD BIT(16)
#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17)
#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19)
#define HAL_REO_CMD_UPD1_SOFT_REORDER_EN BIT(20)
#define HAL_REO_CMD_UPD1_AC GENMASK(22, 21)
#define HAL_REO_CMD_UPD1_BAR BIT(23)
#define HAL_REO_CMD_UPD1_RETRY BIT(24)
#define HAL_REO_CMD_UPD1_CHECK_2K_MODE BIT(25)
#define HAL_REO_CMD_UPD1_OOR_MODE BIT(26)
#define HAL_REO_CMD_UPD1_PN_CHECK BIT(27)
#define HAL_REO_CMD_UPD1_EVEN_PN BIT(28)
#define HAL_REO_CMD_UPD1_UNEVEN_PN BIT(29)
#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30)
#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31)
/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */
#define HAL_REO_CMD_UPD2_SVLD BIT(10)
#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11)
#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23)
#define HAL_REO_CMD_UPD2_PN_ERR BIT(24)
#define HAL_REO_DEST_RING_CTRL_HASH_RING_MAP GENMASK(31, 8)
struct ath11k_hal_reo_cmd {
u32 addr_lo;
u32 flag;
u32 upd0;
u32 upd1;
u32 upd2;
u32 pn[4];
u16 rx_queue_num;
u16 min_rel;
u16 min_fwd;
u8 addr_hi;
u8 ac_list;
u8 blocking_idx;
u16 ba_window_size;
u8 pn_size;
};
enum hal_pn_type {
HAL_PN_TYPE_NONE,
HAL_PN_TYPE_WPA,
HAL_PN_TYPE_WAPI_EVEN,
HAL_PN_TYPE_WAPI_UNEVEN,
};
enum hal_ce_desc {
HAL_CE_DESC_SRC,
HAL_CE_DESC_DST,
HAL_CE_DESC_DST_STATUS,
};
struct hal_reo_status_header {
u16 cmd_num;
enum hal_reo_cmd_status cmd_status;
u16 cmd_exe_time;
u32 timestamp;
};
struct hal_reo_status_queue_stats {
u16 ssn;
u16 curr_idx;
u32 pn[4];
u32 last_rx_queue_ts;
u32 last_rx_dequeue_ts;
u32 rx_bitmap[8]; /* Bitmap from 0-255 */
u32 curr_mpdu_cnt;
u32 curr_msdu_cnt;
u16 fwd_due_to_bar_cnt;
u16 dup_cnt;
u32 frames_in_order_cnt;
u32 num_mpdu_processed_cnt;
u32 num_msdu_processed_cnt;
u32 total_num_processed_byte_cnt;
u32 late_rx_mpdu_cnt;
u32 reorder_hole_cnt;
u8 timeout_cnt;
u8 bar_rx_cnt;
u8 num_window_2k_jump_cnt;
};
struct hal_reo_status_flush_queue {
bool err_detected;
};
enum hal_reo_status_flush_cache_err_code {
HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_SUCCESS,
HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_IN_USE,
HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_NOT_FOUND,
};
struct hal_reo_status_flush_cache {
bool err_detected;
enum hal_reo_status_flush_cache_err_code err_code;
bool cache_controller_flush_status_hit;
u8 cache_controller_flush_status_desc_type;
u8 cache_controller_flush_status_client_id;
u8 cache_controller_flush_status_err;
u8 cache_controller_flush_status_cnt;
};
enum hal_reo_status_unblock_cache_type {
HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE,
HAL_REO_STATUS_UNBLOCK_ENTIRE_CACHE_USAGE,
};
struct hal_reo_status_unblock_cache {
bool err_detected;
enum hal_reo_status_unblock_cache_type unblock_type;
};
struct hal_reo_status_flush_timeout_list {
bool err_detected;
bool list_empty;
u16 release_desc_cnt;
u16 fwd_buf_cnt;
};
enum hal_reo_threshold_idx {
HAL_REO_THRESHOLD_IDX_DESC_COUNTER0,
HAL_REO_THRESHOLD_IDX_DESC_COUNTER1,
HAL_REO_THRESHOLD_IDX_DESC_COUNTER2,
HAL_REO_THRESHOLD_IDX_DESC_COUNTER_SUM,
};
struct hal_reo_status_desc_thresh_reached {
enum hal_reo_threshold_idx threshold_idx;
u32 link_desc_counter0;
u32 link_desc_counter1;
u32 link_desc_counter2;
u32 link_desc_counter_sum;
};
struct hal_reo_status {
struct hal_reo_status_header uniform_hdr;
u8 loop_cnt;
union {
struct hal_reo_status_queue_stats queue_stats;
struct hal_reo_status_flush_queue flush_queue;
struct hal_reo_status_flush_cache flush_cache;
struct hal_reo_status_unblock_cache unblock_cache;
struct hal_reo_status_flush_timeout_list timeout_list;
struct hal_reo_status_desc_thresh_reached desc_thresh_reached;
} u;
};
/**
* HAL context to be used to access SRNG APIs (currently used by data path
* and transport (CE) modules)
*/
struct ath11k_hal {
/* HAL internal state for all SRNG rings.
*/
struct hal_srng srng_list[HAL_SRNG_RING_ID_MAX];
/* SRNG configuration table */
const struct hal_srng_config *srng_config;
/* Remote pointer memory for HW/FW updates */
struct {
u32 *vaddr;
dma_addr_t paddr;
} rdp;
/* Shared memory for ring pointer updates from host to FW */
struct {
u32 *vaddr;
dma_addr_t paddr;
} wrp;
/* Available REO blocking resources bitmap */
u8 avail_blk_resource;
u8 current_blk_index;
/* shadow register configuration */
u32 shadow_reg_addr[SHADOW_NUM_REGISTERS];
int num_shadow_reg_configured;
};
u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
u32 start_seqtype);
void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
struct hal_srng *srng);
void ath11k_hal_reo_hw_setup(struct ath11k_base *ab);
void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
struct hal_wbm_idle_scatter_list *sbuf,
u32 nsbufs, u32 tot_link_desc,
u32 end_offset);
dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
struct hal_srng *srng);
dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
struct hal_srng *srng);
void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
dma_addr_t paddr);
u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type);
void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
u8 byte_swap_data);
void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr);
u32 ath11k_hal_ce_dst_status_get_length(void *buf);
int ath11k_hal_srng_get_entrysize(u32 ring_type);
int ath11k_hal_srng_get_max_entries(u32 ring_type);
void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
struct hal_srng_params *params);
u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
struct hal_srng *srng);
u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng);
int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
bool sync_hw_ptr);
u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng);
u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
struct hal_srng *srng);
u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
struct hal_srng *srng);
u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
struct hal_srng *srng);
int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
bool sync_hw_ptr);
void ath11k_hal_srng_access_begin(struct ath11k_base *ab,
struct hal_srng *srng);
void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng);
int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
int ring_num, int mac_id,
struct hal_srng_params *params);
int ath11k_hal_srng_init(struct ath11k_base *ath11k);
void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,332 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#ifndef ATH11K_HAL_RX_H
#define ATH11K_HAL_RX_H
struct hal_rx_wbm_rel_info {
u32 cookie;
enum hal_wbm_rel_src_module err_rel_src;
enum hal_reo_dest_ring_push_reason push_reason;
u32 err_code;
bool first_msdu;
bool last_msdu;
};
#define HAL_INVALID_PEERID 0xffff
#define VHT_SIG_SU_NSS_MASK 0x7
#define HAL_RX_MAX_MCS 12
#define HAL_RX_MAX_NSS 8
struct hal_rx_mon_status_tlv_hdr {
u32 hdr;
u8 value[0];
};
enum hal_rx_su_mu_coding {
HAL_RX_SU_MU_CODING_BCC,
HAL_RX_SU_MU_CODING_LDPC,
HAL_RX_SU_MU_CODING_MAX,
};
enum hal_rx_gi {
HAL_RX_GI_0_8_US,
HAL_RX_GI_0_4_US,
HAL_RX_GI_1_6_US,
HAL_RX_GI_3_2_US,
HAL_RX_GI_MAX,
};
enum hal_rx_bw {
HAL_RX_BW_20MHZ,
HAL_RX_BW_40MHZ,
HAL_RX_BW_80MHZ,
HAL_RX_BW_160MHZ,
HAL_RX_BW_MAX,
};
enum hal_rx_preamble {
HAL_RX_PREAMBLE_11A,
HAL_RX_PREAMBLE_11B,
HAL_RX_PREAMBLE_11N,
HAL_RX_PREAMBLE_11AC,
HAL_RX_PREAMBLE_11AX,
HAL_RX_PREAMBLE_MAX,
};
enum hal_rx_reception_type {
HAL_RX_RECEPTION_TYPE_SU,
HAL_RX_RECEPTION_TYPE_MU_MIMO,
HAL_RX_RECEPTION_TYPE_MU_OFDMA,
HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO,
HAL_RX_RECEPTION_TYPE_MAX,
};
#define HAL_TLV_STATUS_PPDU_NOT_DONE 0
#define HAL_TLV_STATUS_PPDU_DONE 1
#define HAL_TLV_STATUS_BUF_DONE 2
#define HAL_TLV_STATUS_PPDU_NON_STD_DONE 3
#define HAL_RX_FCS_LEN 4
enum hal_rx_mon_status {
HAL_RX_MON_STATUS_PPDU_NOT_DONE,
HAL_RX_MON_STATUS_PPDU_DONE,
HAL_RX_MON_STATUS_BUF_DONE,
};
struct hal_rx_mon_ppdu_info {
u32 ppdu_id;
u32 ppdu_ts;
u32 num_mpdu_fcs_ok;
u32 num_mpdu_fcs_err;
u32 preamble_type;
u16 chan_num;
u16 tcp_msdu_count;
u16 tcp_ack_msdu_count;
u16 udp_msdu_count;
u16 other_msdu_count;
u16 peer_id;
u8 rate;
u8 mcs;
u8 nss;
u8 bw;
u8 is_stbc;
u8 gi;
u8 ldpc;
u8 beamformed;
u8 rssi_comb;
u8 tid;
u8 reception_type;
u64 rx_duration;
};
#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
struct hal_rx_ppdu_start {
__le32 info0;
__le32 chan_num;
__le32 ppdu_start_ts;
} __packed;
#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(25, 16)
#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(8, 0)
#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID BIT(9)
#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID BIT(10)
#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID BIT(11)
#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE GENMASK(23, 20)
#define HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX GENMASK(15, 0)
#define HAL_RX_PPDU_END_USER_STATS_INFO2_FRAME_CTRL GENMASK(31, 16)
#define HAL_RX_PPDU_END_USER_STATS_INFO3_QOS_CTRL GENMASK(31, 16)
#define HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT GENMASK(15, 0)
#define HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT GENMASK(31, 16)
#define HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT GENMASK(15, 0)
#define HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT GENMASK(31, 16)
#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0)
#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16)
struct hal_rx_ppdu_end_user_stats {
__le32 rsvd0[2];
__le32 info0;
__le32 info1;
__le32 info2;
__le32 info3;
__le32 ht_ctrl;
__le32 rsvd1[2];
__le32 info4;
__le32 info5;
__le32 info6;
__le32 rsvd2[11];
} __packed;
#define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0)
#define HAL_RX_HT_SIG_INFO_INFO0_BW BIT(7)
#define HAL_RX_HT_SIG_INFO_INFO1_STBC GENMASK(5, 4)
#define HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING BIT(6)
#define HAL_RX_HT_SIG_INFO_INFO1_GI BIT(7)
struct hal_rx_ht_sig_info {
__le32 info0;
__le32 info1;
} __packed;
#define HAL_RX_LSIG_B_INFO_INFO0_RATE GENMASK(3, 0)
#define HAL_RX_LSIG_B_INFO_INFO0_LEN GENMASK(15, 4)
struct hal_rx_lsig_b_info {
__le32 info0;
} __packed;
#define HAL_RX_LSIG_A_INFO_INFO0_RATE GENMASK(3, 0)
#define HAL_RX_LSIG_A_INFO_INFO0_LEN GENMASK(16, 5)
#define HAL_RX_LSIG_A_INFO_INFO0_PKT_TYPE GENMASK(27, 24)
struct hal_rx_lsig_a_info {
__le32 info0;
} __packed;
#define HAL_RX_VHT_SIG_A_INFO_INFO0_BW GENMASK(1, 0)
#define HAL_RX_VHT_SIG_A_INFO_INFO0_STBC BIT(3)
#define HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID GENMASK(9, 4)
#define HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS GENMASK(21, 10)
#define HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING GENMASK(1, 0)
#define HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING BIT(2)
#define HAL_RX_VHT_SIG_A_INFO_INFO1_MCS GENMASK(7, 4)
#define HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED BIT(8)
struct hal_rx_vht_sig_a_info {
__le32 info0;
__le32 info1;
} __packed;
enum hal_rx_vht_sig_a_gi_setting {
HAL_RX_VHT_SIG_A_NORMAL_GI = 0,
HAL_RX_VHT_SIG_A_SHORT_GI = 1,
HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY = 3,
};
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS GENMASK(6, 3)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM BIT(7)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW GENMASK(20, 19)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE GENMASK(22, 21)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS GENMASK(25, 23)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING BIT(7)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC BIT(9)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF BIT(10)
struct hal_rx_he_sig_a_su_info {
__le32 info0;
__le32 info1;
} __packed;
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW GENMASK(17, 15)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE GENMASK(24, 23)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC BIT(12)
struct hal_rx_he_sig_a_mu_dl_info {
__le32 info0;
__le32 info1;
} __packed;
#define HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION GENMASK(7, 0)
struct hal_rx_he_sig_b1_mu_info {
__le32 info0;
} __packed;
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS GENMASK(18, 15)
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING BIT(20)
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS GENMASK(31, 29)
struct hal_rx_he_sig_b2_mu_info {
__le32 info0;
} __packed;
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM BIT(19)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING BIT(20)
struct hal_rx_he_sig_b2_ofdma_info {
__le32 info0;
} __packed;
#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8)
struct hal_rx_phyrx_rssi_legacy_info {
__le32 rsvd[35];
__le32 info0;
} __packed;
#define HAL_RX_MPDU_INFO_INFO0_PEERID GENMASK(31, 16)
struct hal_rx_mpdu_info {
__le32 rsvd0;
__le32 info0;
__le32 rsvd1[21];
} __packed;
#define HAL_RX_PPDU_END_DURATION GENMASK(23, 0)
struct hal_rx_ppdu_end_duration {
__le32 rsvd0[9];
__le32 info0;
__le32 rsvd1[4];
} __packed;
struct hal_rx_rxpcu_classification_overview {
u32 rsvd0;
} __packed;
struct hal_rx_msdu_desc_info {
u32 msdu_flags;
u16 msdu_len; /* 14 bits for length */
};
#define HAL_RX_NUM_MSDU_DESC 6
struct hal_rx_msdu_list {
struct hal_rx_msdu_desc_info msdu_info[HAL_RX_NUM_MSDU_DESC];
u32 sw_cookie[HAL_RX_NUM_MSDU_DESC];
u8 rbm[HAL_RX_NUM_MSDU_DESC];
};
void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
struct hal_reo_status *status);
void ath11k_hal_reo_flush_queue_status(struct ath11k_base *ab, u32 *reo_desc,
struct hal_reo_status *status);
void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
struct hal_reo_status *status);
void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
struct hal_reo_status *status);
void ath11k_hal_reo_unblk_cache_status(struct ath11k_base *ab, u32 *reo_desc,
struct hal_reo_status *status);
void ath11k_hal_reo_flush_timeout_list_status(struct ath11k_base *ab,
u32 *reo_desc,
struct hal_reo_status *status);
void ath11k_hal_reo_desc_thresh_reached_status(struct ath11k_base *ab,
u32 *reo_desc,
struct hal_reo_status *status);
void ath11k_hal_reo_update_rx_reo_queue_status(struct ath11k_base *ab,
u32 *reo_desc,
struct hal_reo_status *status);
int ath11k_hal_reo_process_status(u8 *reo_desc, u8 *status);
void ath11k_hal_rx_msdu_link_info_get(void *link_desc, u32 *num_msdus,
u32 *msdu_cookies,
enum hal_rx_buf_return_buf_manager *rbm);
void ath11k_hal_rx_msdu_link_desc_set(struct ath11k_base *ab, void *desc,
void *link_desc,
enum hal_wbm_rel_bm_act action);
void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr,
u32 cookie, u8 manager);
void ath11k_hal_rx_buf_addr_info_get(void *desc, dma_addr_t *paddr,
u32 *cookie, u8 *rbm);
int ath11k_hal_desc_reo_parse_err(struct ath11k_base *ab, u32 *rx_desc,
dma_addr_t *paddr, u32 *desc_bank);
int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
struct hal_rx_wbm_rel_info *rel_info);
void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc,
dma_addr_t *paddr, u32 *desc_bank);
void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc,
dma_addr_t *paddr, u32 *sw_cookie,
void **pp_buf_addr_info,
u32 *msdu_cnt);
enum hal_rx_mon_status
ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
struct hal_rx_mon_ppdu_info *ppdu_info,
struct sk_buff *skb);
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0 0xDDBEEF
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1 0xADBEEF
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2 0xBDBEEF
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3 0xCDBEEF
#endif

View File

@ -0,0 +1,226 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include "ahb.h"
#include "hal.h"
#include "hal_tx.h"
#define DSCP_TID_MAP_TBL_ENTRY_SIZE 64
/* dscp_tid_map - Default DSCP-TID mapping
*
* DSCP TID
* 000000 0
* 001000 1
* 010000 2
* 011000 3
* 100000 4
* 101000 5
* 110000 6
* 111000 7
*/
static const u8 dscp_tid_map[DSCP_TID_MAP_TBL_ENTRY_SIZE] = {
0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 7, 7,
};
void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
struct hal_tx_info *ti)
{
struct hal_tcl_data_cmd *tcl_cmd = (struct hal_tcl_data_cmd *)cmd;
tcl_cmd->buf_addr_info.info0 =
FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, ti->paddr);
tcl_cmd->buf_addr_info.info1 =
FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT));
tcl_cmd->buf_addr_info.info1 |=
FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR,
(ti->ring_id + HAL_RX_BUF_RBM_SW0_BM)) |
FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, ti->desc_id);
tcl_cmd->info0 =
FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_DESC_TYPE, ti->type) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCAP_TYPE, ti->encap_type) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCRYPT_TYPE,
ti->encrypt_type) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_SEARCH_TYPE,
ti->search_type) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ADDR_EN,
ti->addr_search_flags) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_CMD_NUM,
ti->meta_data_flags);
tcl_cmd->info1 = ti->flags0 |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_DATA_LEN, ti->data_len) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET, ti->pkt_offset);
tcl_cmd->info2 = ti->flags1 |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID, ti->tid) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_LMAC_ID, ti->lmac_id);
tcl_cmd->info3 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX,
ti->dscp_tid_tbl_idx) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX,
ti->bss_ast_hash);
}
/* Commit the descriptor to hardware */
void ath11k_hal_tx_desc_sync(void *tx_desc_cached, void *hw_desc)
{
memcpy(hw_desc + sizeof(struct hal_tlv_hdr), tx_desc_cached,
sizeof(struct hal_tcl_data_cmd));
}
/* Get the descriptor status from hardware */
void ath11k_hal_tx_status_desc_sync(void *hw_desc, void *local_desc)
{
memcpy(local_desc, hw_desc, HAL_TX_STATUS_DESC_LEN);
}
void ath11k_hal_tx_status_parse(struct ath11k_base *ab,
struct hal_wbm_release_ring *desc,
struct hal_tx_status *ts)
{
ts->buf_rel_source =
FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0);
if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
return;
ts->desc_id = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
desc->buf_addr_info.info1);
if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
return;
ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
desc->info0);
ts->ppdu_id = FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER,
desc->info1);
ts->try_cnt = FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT,
desc->info1);
ts->ack_rssi = FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI,
desc->info2);
if (desc->info2 & HAL_WBM_RELEASE_INFO2_FIRST_MSDU)
ts->flags |= HAL_TX_STATUS_FLAGS_FIRST_MSDU;
if (desc->info2 & HAL_WBM_RELEASE_INFO2_LAST_MSDU)
ts->flags |= HAL_TX_STATUS_FLAGS_LAST_MSDU;
if (desc->info2 & HAL_WBM_RELEASE_INFO2_MSDU_IN_AMSDU)
ts->flags |= HAL_TX_STATUS_FLAGS_MSDU_IN_AMSDU;
ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID, desc->info3);
ts->tid = FIELD_GET(HAL_WBM_RELEASE_INFO3_TID, desc->info3);
if (!(desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID))
return;
ts->flags |= HAL_TX_STATUS_FLAGS_RATE_STATS_VALID;
ts->tsf = desc->rate_stats.tsf;
ts->bw = FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW, desc->rate_stats.info0);
ts->pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE,
desc->rate_stats.info0);
if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_STBC)
ts->flags |= HAL_TX_STATUS_FLAGS_RATE_STBC;
if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_LDPC)
ts->flags |= HAL_TX_STATUS_FLAGS_RATE_LDPC;
if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_OFDMA_TX)
ts->flags |= HAL_TX_STATUS_FLAGS_OFDMA;
ts->sgi = FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI,
desc->rate_stats.info0);
ts->mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS,
desc->rate_stats.info0);
ts->num_tones_in_ru = FIELD_GET(HAL_TX_RATE_STATS_INFO0_TONES_IN_RU,
desc->rate_stats.info0);
}
void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id)
{
u32 ctrl_reg_val;
u32 addr;
u8 hw_map_val[HAL_DSCP_TID_TBL_SIZE];
int i;
u32 value;
int cnt = 0;
ctrl_reg_val = ath11k_ahb_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
HAL_TCL1_RING_CMN_CTRL_REG);
/* Enable read/write access */
ctrl_reg_val |= HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
ath11k_ahb_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_DSCP_TID_MAP +
(4 * id * (HAL_DSCP_TID_TBL_SIZE / 4));
/* Configure each DSCP-TID mapping in three bits there by configure
* three bytes in an iteration.
*/
for (i = 0; i < DSCP_TID_MAP_TBL_ENTRY_SIZE; i += 8) {
value = FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP0,
dscp_tid_map[i]) |
FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP1,
dscp_tid_map[i + 1]) |
FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP2,
dscp_tid_map[i + 2]) |
FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP3,
dscp_tid_map[i + 3]) |
FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP4,
dscp_tid_map[i + 4]) |
FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP5,
dscp_tid_map[i + 5]) |
FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP6,
dscp_tid_map[i + 6]) |
FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP7,
dscp_tid_map[i + 7]);
memcpy(&hw_map_val[cnt], (u8 *)&value, 3);
cnt += 3;
}
for (i = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 4) {
ath11k_ahb_write32(ab, addr, *(u32 *)&hw_map_val[i]);
addr += 4;
}
/* Disable read/write access */
ctrl_reg_val = ath11k_ahb_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
HAL_TCL1_RING_CMN_CTRL_REG);
ctrl_reg_val &= ~HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
ath11k_ahb_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
HAL_TCL1_RING_CMN_CTRL_REG,
ctrl_reg_val);
}
void ath11k_hal_tx_init_data_ring(struct ath11k_base *ab, struct hal_srng *srng)
{
struct hal_srng_params params;
struct hal_tlv_hdr *tlv;
int i, entry_size;
u8 *desc;
memset(&params, 0, sizeof(params));
entry_size = ath11k_hal_srng_get_entrysize(HAL_TCL_DATA);
ath11k_hal_srng_get_params(ab, srng, &params);
desc = (u8 *)params.ring_base_vaddr;
for (i = 0; i < params.num_entries; i++) {
tlv = (struct hal_tlv_hdr *)desc;
tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_TCL_DATA_CMD) |
FIELD_PREP(HAL_TLV_HDR_LEN,
sizeof(struct hal_tcl_data_cmd));
desc += entry_size;
}
}

View File

@ -0,0 +1,80 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#ifndef ATH11K_HAL_TX_H
#define ATH11K_HAL_TX_H
#include "hal_desc.h"
#define HAL_TX_ADDRX_EN 1
#define HAL_TX_ADDRY_EN 2
#define HAL_TX_ADDR_SEARCH_DEFAULT 0
#define HAL_TX_ADDR_SEARCH_INDEX 1
struct hal_tx_info {
u16 meta_data_flags; /* %HAL_TCL_DATA_CMD_INFO0_META_ */
u8 ring_id;
u32 desc_id;
enum hal_tcl_desc_type type;
enum hal_tcl_encap_type encap_type;
dma_addr_t paddr;
u32 data_len;
u32 pkt_offset;
enum hal_encrypt_type encrypt_type;
u32 flags0; /* %HAL_TCL_DATA_CMD_INFO1_ */
u32 flags1; /* %HAL_TCL_DATA_CMD_INFO2_ */
u16 addr_search_flags; /* %HAL_TCL_DATA_CMD_INFO0_ADDR(X/Y)_ */
u16 bss_ast_hash;
u8 tid;
u8 search_type; /* %HAL_TX_ADDR_SEARCH_ */
u8 lmac_id;
u8 dscp_tid_tbl_idx;
};
/* TODO: Check if the actual desc macros can be used instead */
#define HAL_TX_STATUS_FLAGS_FIRST_MSDU BIT(0)
#define HAL_TX_STATUS_FLAGS_LAST_MSDU BIT(1)
#define HAL_TX_STATUS_FLAGS_MSDU_IN_AMSDU BIT(2)
#define HAL_TX_STATUS_FLAGS_RATE_STATS_VALID BIT(3)
#define HAL_TX_STATUS_FLAGS_RATE_LDPC BIT(4)
#define HAL_TX_STATUS_FLAGS_RATE_STBC BIT(5)
#define HAL_TX_STATUS_FLAGS_OFDMA BIT(6)
#define HAL_TX_STATUS_DESC_LEN sizeof(struct hal_wbm_release_ring)
/* Tx status parsed from srng desc */
struct hal_tx_status {
enum hal_wbm_rel_src_module buf_rel_source;
u32 desc_id;
enum hal_wbm_tqm_rel_reason status;
u8 ack_rssi;
enum hal_tx_rate_stats_bw bw;
enum hal_tx_rate_stats_pkt_type pkt_type;
enum hal_tx_rate_stats_sgi sgi;
u8 mcs;
u16 num_tones_in_ru;
u32 flags; /* %HAL_TX_STATUS_FLAGS_ */
u32 tsf;
u32 ppdu_id;
u8 try_cnt;
u8 tid;
u16 peer_id;
};
void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
struct hal_tx_info *ti);
void ath11k_hal_tx_desc_sync(void *tx_desc_cached, void *hw_desc);
void ath11k_hal_tx_status_parse(struct ath11k_base *ab,
struct hal_wbm_release_ring *desc,
struct hal_tx_status *ts);
void ath11k_hal_tx_status_desc_sync(void *hw_desc, void *local_desc);
void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id);
int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
enum hal_reo_cmd_type type,
struct ath11k_hal_reo_cmd *cmd);
void ath11k_hal_tx_init_data_ring(struct ath11k_base *ab,
struct hal_srng *srng);
#endif

View File

@ -0,0 +1,773 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
#include "ahb.h"
#include "debug.h"
struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ab, int size)
{
struct sk_buff *skb;
skb = dev_alloc_skb(size + sizeof(struct ath11k_htc_hdr));
if (!skb)
return NULL;
skb_reserve(skb, sizeof(struct ath11k_htc_hdr));
/* FW/HTC requires 4-byte aligned streams */
if (!IS_ALIGNED((unsigned long)skb->data, 4))
ath11k_warn(ab, "Unaligned HTC tx skb\n");
return skb;
}
static void ath11k_htc_control_tx_complete(struct ath11k_base *ab,
struct sk_buff *skb)
{
kfree_skb(skb);
}
static struct sk_buff *ath11k_htc_build_tx_ctrl_skb(void *ab)
{
struct sk_buff *skb;
struct ath11k_skb_cb *skb_cb;
skb = dev_alloc_skb(ATH11K_HTC_CONTROL_BUFFER_SIZE);
if (!skb)
return NULL;
skb_reserve(skb, sizeof(struct ath11k_htc_hdr));
WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
skb_cb = ATH11K_SKB_CB(skb);
memset(skb_cb, 0, sizeof(*skb_cb));
ath11k_dbg(ab, ATH11K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
return skb;
}
static inline void ath11k_htc_restore_tx_skb(struct ath11k_htc *htc,
struct sk_buff *skb)
{
struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
dma_unmap_single(htc->ab->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
skb_pull(skb, sizeof(struct ath11k_htc_hdr));
}
static void ath11k_htc_prepare_tx_skb(struct ath11k_htc_ep *ep,
struct sk_buff *skb)
{
struct ath11k_htc_hdr *hdr;
hdr = (struct ath11k_htc_hdr *)skb->data;
memset(hdr, 0, sizeof(*hdr));
hdr->htc_info = FIELD_PREP(HTC_HDR_ENDPOINTID, ep->eid) |
FIELD_PREP(HTC_HDR_PAYLOADLEN,
(skb->len - sizeof(*hdr))) |
FIELD_PREP(HTC_HDR_FLAGS,
ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE);
spin_lock_bh(&ep->htc->tx_lock);
hdr->ctrl_info = FIELD_PREP(HTC_HDR_CONTROLBYTES1, ep->seq_no++);
spin_unlock_bh(&ep->htc->tx_lock);
}
int ath11k_htc_send(struct ath11k_htc *htc,
enum ath11k_htc_ep_id eid,
struct sk_buff *skb)
{
struct ath11k_htc_ep *ep = &htc->endpoint[eid];
struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
struct device *dev = htc->ab->dev;
struct ath11k_base *ab = htc->ab;
int credits = 0;
int ret;
if (eid >= ATH11K_HTC_EP_COUNT) {
ath11k_warn(ab, "Invalid endpoint id: %d\n", eid);
return -ENOENT;
}
skb_push(skb, sizeof(struct ath11k_htc_hdr));
if (ep->tx_credit_flow_enabled) {
credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credits < credits) {
ath11k_dbg(ab, ATH11K_DBG_HTC,
"htc insufficient credits ep %d required %d available %d\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
ret = -EAGAIN;
goto err_pull;
}
ep->tx_credits -= credits;
ath11k_dbg(ab, ATH11K_DBG_HTC,
"htc ep %d consumed %d credits (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
}
ath11k_htc_prepare_tx_skb(ep, skb);
skb_cb->eid = eid;
skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
ret = dma_mapping_error(dev, skb_cb->paddr);
if (ret) {
ret = -EIO;
goto err_credits;
}
ret = ath11k_ce_send(htc->ab, skb, ep->ul_pipe_id, ep->eid);
if (ret)
goto err_unmap;
return 0;
err_unmap:
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
ath11k_dbg(ab, ATH11K_DBG_HTC,
"htc ep %d reverted %d credits back (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
if (ep->ep_ops.ep_tx_credits)
ep->ep_ops.ep_tx_credits(htc->ab);
}
err_pull:
skb_pull(skb, sizeof(struct ath11k_htc_hdr));
return ret;
}
static void
ath11k_htc_process_credit_report(struct ath11k_htc *htc,
const struct ath11k_htc_credit_report *report,
int len,
enum ath11k_htc_ep_id eid)
{
struct ath11k_base *ab = htc->ab;
struct ath11k_htc_ep *ep;
int i, n_reports;
if (len % sizeof(*report))
ath11k_warn(ab, "Uneven credit report len %d", len);
n_reports = len / sizeof(*report);
spin_lock_bh(&htc->tx_lock);
for (i = 0; i < n_reports; i++, report++) {
if (report->eid >= ATH11K_HTC_EP_COUNT)
break;
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
ath11k_dbg(ab, ATH11K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
report->eid, report->credits, ep->tx_credits);
if (ep->ep_ops.ep_tx_credits) {
spin_unlock_bh(&htc->tx_lock);
ep->ep_ops.ep_tx_credits(htc->ab);
spin_lock_bh(&htc->tx_lock);
}
}
spin_unlock_bh(&htc->tx_lock);
}
static int ath11k_htc_process_trailer(struct ath11k_htc *htc,
u8 *buffer,
int length,
enum ath11k_htc_ep_id src_eid)
{
struct ath11k_base *ab = htc->ab;
int status = 0;
struct ath11k_htc_record *record;
size_t len;
while (length > 0) {
record = (struct ath11k_htc_record *)buffer;
if (length < sizeof(record->hdr)) {
status = -EINVAL;
break;
}
if (record->hdr.len > length) {
/* no room left in buffer for record */
ath11k_warn(ab, "Invalid record length: %d\n",
record->hdr.len);
status = -EINVAL;
break;
}
switch (record->hdr.id) {
case ATH11K_HTC_RECORD_CREDITS:
len = sizeof(struct ath11k_htc_credit_report);
if (record->hdr.len < len) {
ath11k_warn(ab, "Credit report too long\n");
status = -EINVAL;
break;
}
ath11k_htc_process_credit_report(htc,
record->credit_report,
record->hdr.len,
src_eid);
break;
default:
ath11k_warn(ab, "Unhandled record: id:%d length:%d\n",
record->hdr.id, record->hdr.len);
break;
}
if (status)
break;
/* multiple records may be present in a trailer */
buffer += sizeof(record->hdr) + record->hdr.len;
length -= sizeof(record->hdr) + record->hdr.len;
}
return status;
}
void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
struct sk_buff *skb)
{
int status = 0;
struct ath11k_htc *htc = &ab->htc;
struct ath11k_htc_hdr *hdr;
struct ath11k_htc_ep *ep;
u16 payload_len;
u32 trailer_len = 0;
size_t min_len;
u8 eid;
bool trailer_present;
hdr = (struct ath11k_htc_hdr *)skb->data;
skb_pull(skb, sizeof(*hdr));
eid = FIELD_GET(HTC_HDR_ENDPOINTID, hdr->htc_info);
if (eid >= ATH11K_HTC_EP_COUNT) {
ath11k_warn(ab, "HTC Rx: invalid eid %d\n", eid);
goto out;
}
ep = &htc->endpoint[eid];
payload_len = FIELD_GET(HTC_HDR_PAYLOADLEN, hdr->htc_info);
if (payload_len + sizeof(*hdr) > ATH11K_HTC_MAX_LEN) {
ath11k_warn(ab, "HTC rx frame too long, len: %zu\n",
payload_len + sizeof(*hdr));
goto out;
}
if (skb->len < payload_len) {
ath11k_warn(ab, "HTC Rx: insufficient length, got %d, expected %d\n",
skb->len, payload_len);
goto out;
}
/* get flags to check for trailer */
trailer_present = (FIELD_GET(HTC_HDR_FLAGS, hdr->htc_info)) &
ATH11K_HTC_FLAG_TRAILER_PRESENT;
if (trailer_present) {
u8 *trailer;
trailer_len = FIELD_GET(HTC_HDR_CONTROLBYTES0, hdr->ctrl_info);
min_len = sizeof(struct ath11k_htc_record_hdr);
if ((trailer_len < min_len) ||
(trailer_len > payload_len)) {
ath11k_warn(ab, "Invalid trailer length: %d\n",
trailer_len);
goto out;
}
trailer = (u8 *)hdr;
trailer += sizeof(*hdr);
trailer += payload_len;
trailer -= trailer_len;
status = ath11k_htc_process_trailer(htc, trailer,
trailer_len, eid);
if (status)
goto out;
skb_trim(skb, skb->len - trailer_len);
}
if (trailer_len >= payload_len)
/* zero length packet with trailer data, just drop these */
goto out;
if (eid == ATH11K_HTC_EP_0) {
struct ath11k_htc_msg *msg = (struct ath11k_htc_msg *)skb->data;
switch (FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id)) {
case ATH11K_HTC_MSG_READY_ID:
case ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
/* handle HTC control message */
if (completion_done(&htc->ctl_resp)) {
/* this is a fatal error, target should not be
* sending unsolicited messages on the ep 0
*/
ath11k_warn(ab, "HTC rx ctrl still processing\n");
complete(&htc->ctl_resp);
goto out;
}
htc->control_resp_len =
min_t(int, skb->len,
ATH11K_HTC_MAX_CTRL_MSG_LEN);
memcpy(htc->control_resp_buffer, skb->data,
htc->control_resp_len);
complete(&htc->ctl_resp);
break;
default:
ath11k_warn(ab, "ignoring unsolicited htc ep0 event\n");
break;
}
goto out;
}
ath11k_dbg(ab, ATH11K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ab, skb);
/* poll tx completion for interrupt disabled CE's */
ath11k_ce_poll_send_completed(ab, ep->ul_pipe_id);
/* skb is now owned by the rx completion handler */
skb = NULL;
out:
kfree_skb(skb);
}
static void ath11k_htc_control_rx_complete(struct ath11k_base *ab,
struct sk_buff *skb)
{
/* This is unexpected. FW is not supposed to send regular rx on this
* endpoint.
*/
ath11k_warn(ab, "unexpected htc rx\n");
kfree_skb(skb);
}
static const char *htc_service_name(enum ath11k_htc_svc_id id)
{
switch (id) {
case ATH11K_HTC_SVC_ID_RESERVED:
return "Reserved";
case ATH11K_HTC_SVC_ID_RSVD_CTRL:
return "Control";
case ATH11K_HTC_SVC_ID_WMI_CONTROL:
return "WMI";
case ATH11K_HTC_SVC_ID_WMI_DATA_BE:
return "DATA BE";
case ATH11K_HTC_SVC_ID_WMI_DATA_BK:
return "DATA BK";
case ATH11K_HTC_SVC_ID_WMI_DATA_VI:
return "DATA VI";
case ATH11K_HTC_SVC_ID_WMI_DATA_VO:
return "DATA VO";
case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1:
return "WMI MAC1";
case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2:
return "WMI MAC2";
case ATH11K_HTC_SVC_ID_NMI_CONTROL:
return "NMI Control";
case ATH11K_HTC_SVC_ID_NMI_DATA:
return "NMI Data";
case ATH11K_HTC_SVC_ID_HTT_DATA_MSG:
return "HTT Data";
case ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS:
return "RAW";
case ATH11K_HTC_SVC_ID_IPA_TX:
return "IPA TX";
case ATH11K_HTC_SVC_ID_PKT_LOG:
return "PKT LOG";
}
return "Unknown";
}
static void ath11k_htc_reset_endpoint_states(struct ath11k_htc *htc)
{
struct ath11k_htc_ep *ep;
int i;
for (i = ATH11K_HTC_EP_0; i < ATH11K_HTC_EP_COUNT; i++) {
ep = &htc->endpoint[i];
ep->service_id = ATH11K_HTC_SVC_ID_UNUSED;
ep->max_ep_message_len = 0;
ep->max_tx_queue_depth = 0;
ep->eid = i;
ep->htc = htc;
ep->tx_credit_flow_enabled = true;
}
}
static u8 ath11k_htc_get_credit_allocation(struct ath11k_htc *htc,
u16 service_id)
{
u8 i, allocation = 0;
for (i = 0; i < ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) {
if (htc->service_alloc_table[i].service_id == service_id) {
allocation =
htc->service_alloc_table[i].credit_allocation;
}
}
return allocation;
}
static int ath11k_htc_setup_target_buffer_assignments(struct ath11k_htc *htc)
{
struct ath11k_htc_svc_tx_credits *serv_entry;
u32 svc_id[] = {
ATH11K_HTC_SVC_ID_WMI_CONTROL,
ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2,
};
int i, credits;
credits = htc->total_transmit_credits;
serv_entry = htc->service_alloc_table;
if ((htc->wmi_ep_count == 0) ||
(htc->wmi_ep_count > ARRAY_SIZE(svc_id)))
return -EINVAL;
/* Divide credits among number of endpoints for WMI */
credits = credits / htc->wmi_ep_count;
for (i = 0; i < htc->wmi_ep_count; i++) {
serv_entry[i].service_id = svc_id[i];
serv_entry[i].credit_allocation = credits;
}
return 0;
}
int ath11k_htc_wait_target(struct ath11k_htc *htc)
{
int i, status = 0;
struct ath11k_base *ab = htc->ab;
unsigned long time_left;
struct ath11k_htc_ready *ready;
u16 message_id;
u16 credit_count;
u16 credit_size;
time_left = wait_for_completion_timeout(&htc->ctl_resp,
ATH11K_HTC_WAIT_TIMEOUT_HZ);
if (!time_left) {
ath11k_warn(ab, "failed to receive control response completion, polling..\n");
for (i = 0; i < CE_COUNT; i++)
ath11k_ce_per_engine_service(htc->ab, i);
time_left =
wait_for_completion_timeout(&htc->ctl_resp,
ATH11K_HTC_WAIT_TIMEOUT_HZ);
if (!time_left)
status = -ETIMEDOUT;
}
if (status < 0) {
ath11k_warn(ab, "ctl_resp never came in (%d)\n", status);
return status;
}
if (htc->control_resp_len < sizeof(*ready)) {
ath11k_warn(ab, "Invalid HTC ready msg len:%d\n",
htc->control_resp_len);
return -ECOMM;
}
ready = (struct ath11k_htc_ready *)htc->control_resp_buffer;
message_id = FIELD_GET(HTC_MSG_MESSAGEID, ready->id_credit_count);
credit_count = FIELD_GET(HTC_READY_MSG_CREDITCOUNT,
ready->id_credit_count);
credit_size = FIELD_GET(HTC_READY_MSG_CREDITSIZE, ready->size_ep);
if (message_id != ATH11K_HTC_MSG_READY_ID) {
ath11k_warn(ab, "Invalid HTC ready msg: 0x%x\n", message_id);
return -ECOMM;
}
htc->total_transmit_credits = credit_count;
htc->target_credit_size = credit_size;
ath11k_dbg(ab, ATH11K_DBG_HTC,
"Target ready! transmit resources: %d size:%d\n",
htc->total_transmit_credits, htc->target_credit_size);
if ((htc->total_transmit_credits == 0) ||
(htc->target_credit_size == 0)) {
ath11k_warn(ab, "Invalid credit size received\n");
return -ECOMM;
}
ath11k_htc_setup_target_buffer_assignments(htc);
return 0;
}
int ath11k_htc_connect_service(struct ath11k_htc *htc,
struct ath11k_htc_svc_conn_req *conn_req,
struct ath11k_htc_svc_conn_resp *conn_resp)
{
struct ath11k_base *ab = htc->ab;
struct ath11k_htc_conn_svc *req_msg;
struct ath11k_htc_conn_svc_resp resp_msg_dummy;
struct ath11k_htc_conn_svc_resp *resp_msg = &resp_msg_dummy;
enum ath11k_htc_ep_id assigned_eid = ATH11K_HTC_EP_COUNT;
struct ath11k_htc_ep *ep;
struct sk_buff *skb;
unsigned int max_msg_size = 0;
int length, status;
unsigned long time_left;
bool disable_credit_flow_ctrl = false;
u16 message_id, service_id, flags = 0;
u8 tx_alloc = 0;
/* special case for HTC pseudo control service */
if (conn_req->service_id == ATH11K_HTC_SVC_ID_RSVD_CTRL) {
disable_credit_flow_ctrl = true;
assigned_eid = ATH11K_HTC_EP_0;
max_msg_size = ATH11K_HTC_MAX_CTRL_MSG_LEN;
memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
goto setup;
}
tx_alloc = ath11k_htc_get_credit_allocation(htc,
conn_req->service_id);
if (!tx_alloc)
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"boot htc service %s does not allocate target credits\n",
htc_service_name(conn_req->service_id));
skb = ath11k_htc_build_tx_ctrl_skb(htc->ab);
if (!skb) {
ath11k_warn(ab, "Failed to allocate HTC packet\n");
return -ENOMEM;
}
length = sizeof(*req_msg);
skb_put(skb, length);
memset(skb->data, 0, length);
req_msg = (struct ath11k_htc_conn_svc *)skb->data;
req_msg->msg_svc_id = FIELD_PREP(HTC_MSG_MESSAGEID,
ATH11K_HTC_MSG_CONNECT_SERVICE_ID);
flags |= FIELD_PREP(ATH11K_HTC_CONN_FLAGS_RECV_ALLOC, tx_alloc);
/* Only enable credit flow control for WMI ctrl service */
if (!(conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL ||
conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1 ||
conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2)) {
flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
disable_credit_flow_ctrl = true;
}
req_msg->flags_len = FIELD_PREP(HTC_SVC_MSG_CONNECTIONFLAGS, flags);
req_msg->msg_svc_id |= FIELD_PREP(HTC_SVC_MSG_SERVICE_ID,
conn_req->service_id);
reinit_completion(&htc->ctl_resp);
status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb);
if (status) {
kfree_skb(skb);
return status;
}
/* wait for response */
time_left = wait_for_completion_timeout(&htc->ctl_resp,
ATH11K_HTC_CONN_SVC_TIMEOUT_HZ);
if (!time_left) {
ath11k_err(ab, "Service connect timeout\n");
return -ETIMEDOUT;
}
/* we controlled the buffer creation, it's aligned */
resp_msg = (struct ath11k_htc_conn_svc_resp *)htc->control_resp_buffer;
message_id = FIELD_GET(HTC_MSG_MESSAGEID, resp_msg->msg_svc_id);
service_id = FIELD_GET(HTC_SVC_RESP_MSG_SERVICEID,
resp_msg->msg_svc_id);
if ((message_id != ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
(htc->control_resp_len < sizeof(*resp_msg))) {
ath11k_err(ab, "Invalid resp message ID 0x%x", message_id);
return -EPROTO;
}
ath11k_dbg(ab, ATH11K_DBG_HTC,
"HTC Service %s connect response: status: 0x%lx, assigned ep: 0x%lx\n",
htc_service_name(service_id),
FIELD_GET(HTC_SVC_RESP_MSG_STATUS, resp_msg->flags_len),
FIELD_GET(HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len));
conn_resp->connect_resp_code = FIELD_GET(HTC_SVC_RESP_MSG_STATUS,
resp_msg->flags_len);
/* check response status */
if (conn_resp->connect_resp_code != ATH11K_HTC_CONN_SVC_STATUS_SUCCESS) {
ath11k_err(ab, "HTC Service %s connect request failed: 0x%x)\n",
htc_service_name(service_id),
conn_resp->connect_resp_code);
return -EPROTO;
}
assigned_eid = (enum ath11k_htc_ep_id)FIELD_GET(
HTC_SVC_RESP_MSG_ENDPOINTID,
resp_msg->flags_len);
max_msg_size = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
resp_msg->flags_len);
setup:
if (assigned_eid >= ATH11K_HTC_EP_COUNT)
return -EPROTO;
if (max_msg_size == 0)
return -EPROTO;
ep = &htc->endpoint[assigned_eid];
ep->eid = assigned_eid;
if (ep->service_id != ATH11K_HTC_SVC_ID_UNUSED)
return -EPROTO;
/* return assigned endpoint to caller */
conn_resp->eid = assigned_eid;
conn_resp->max_msg_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
resp_msg->flags_len);
/* setup the endpoint */
ep->service_id = conn_req->service_id;
ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
ep->max_ep_message_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
resp_msg->flags_len);
ep->tx_credits = tx_alloc;
/* copy all the callbacks */
ep->ep_ops = conn_req->ep_ops;
status = ath11k_ahb_map_service_to_pipe(htc->ab,
ep->service_id,
&ep->ul_pipe_id,
&ep->dl_pipe_id);
if (status)
return status;
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
htc_service_name(ep->service_id), ep->ul_pipe_id,
ep->dl_pipe_id, ep->eid);
if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
ep->tx_credit_flow_enabled = false;
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"boot htc service '%s' eid %d TX flow control disabled\n",
htc_service_name(ep->service_id), assigned_eid);
}
return status;
}
int ath11k_htc_start(struct ath11k_htc *htc)
{
struct sk_buff *skb;
int status = 0;
struct ath11k_base *ab = htc->ab;
struct ath11k_htc_setup_complete_extended *msg;
skb = ath11k_htc_build_tx_ctrl_skb(htc->ab);
if (!skb)
return -ENOMEM;
skb_put(skb, sizeof(*msg));
memset(skb->data, 0, skb->len);
msg = (struct ath11k_htc_setup_complete_extended *)skb->data;
msg->msg_id = FIELD_PREP(HTC_MSG_MESSAGEID,
ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID);
ath11k_dbg(ab, ATH11K_DBG_HTC, "HTC is using TX credit flow control\n");
status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb);
if (status) {
kfree_skb(skb);
return status;
}
return 0;
}
int ath11k_htc_init(struct ath11k_base *ab)
{
struct ath11k_htc *htc = &ab->htc;
struct ath11k_htc_svc_conn_req conn_req;
struct ath11k_htc_svc_conn_resp conn_resp;
int ret;
spin_lock_init(&htc->tx_lock);
ath11k_htc_reset_endpoint_states(htc);
htc->ab = ab;
switch (ab->wmi_sc.preferred_hw_mode) {
case WMI_HOST_HW_MODE_SINGLE:
htc->wmi_ep_count = 1;
break;
case WMI_HOST_HW_MODE_DBS:
case WMI_HOST_HW_MODE_DBS_OR_SBS:
htc->wmi_ep_count = 2;
break;
case WMI_HOST_HW_MODE_DBS_SBS:
htc->wmi_ep_count = 3;
break;
default:
htc->wmi_ep_count = 3;
break;
}
/* setup our pseudo HTC control endpoint connection */
memset(&conn_req, 0, sizeof(conn_req));
memset(&conn_resp, 0, sizeof(conn_resp));
conn_req.ep_ops.ep_tx_complete = ath11k_htc_control_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath11k_htc_control_rx_complete;
conn_req.max_send_queue_depth = ATH11K_NUM_CONTROL_TX_BUFFERS;
conn_req.service_id = ATH11K_HTC_SVC_ID_RSVD_CTRL;
/* connect fake service */
ret = ath11k_htc_connect_service(htc, &conn_req, &conn_resp);
if (ret) {
ath11k_err(ab, "could not connect to htc service (%d)\n", ret);
return ret;
}
init_completion(&htc->ctl_resp);
return 0;
}

View File

@ -0,0 +1,313 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#ifndef ATH11K_HTC_H
#define ATH11K_HTC_H
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/bug.h>
#include <linux/skbuff.h>
#include <linux/timer.h>
struct ath11k_base;
#define HTC_HDR_ENDPOINTID GENMASK(7, 0)
#define HTC_HDR_FLAGS GENMASK(15, 8)
#define HTC_HDR_PAYLOADLEN GENMASK(31, 16)
#define HTC_HDR_CONTROLBYTES0 GENMASK(7, 0)
#define HTC_HDR_CONTROLBYTES1 GENMASK(15, 8)
#define HTC_HDR_RESERVED GENMASK(31, 16)
#define HTC_SVC_MSG_SERVICE_ID GENMASK(31, 16)
#define HTC_SVC_MSG_CONNECTIONFLAGS GENMASK(15, 0)
#define HTC_SVC_MSG_SERVICEMETALENGTH GENMASK(23, 16)
#define HTC_READY_MSG_CREDITCOUNT GENMASK(31, 16)
#define HTC_READY_MSG_CREDITSIZE GENMASK(15, 0)
#define HTC_READY_MSG_MAXENDPOINTS GENMASK(23, 16)
#define HTC_READY_EX_MSG_HTCVERSION GENMASK(7, 0)
#define HTC_READY_EX_MSG_MAXMSGSPERHTCBUNDLE GENMASK(15, 8)
#define HTC_SVC_RESP_MSG_SERVICEID GENMASK(31, 16)
#define HTC_SVC_RESP_MSG_STATUS GENMASK(7, 0)
#define HTC_SVC_RESP_MSG_ENDPOINTID GENMASK(15, 8)
#define HTC_SVC_RESP_MSG_MAXMSGSIZE GENMASK(31, 16)
#define HTC_SVC_RESP_MSG_SERVICEMETALENGTH GENMASK(7, 0)
#define HTC_MSG_MESSAGEID GENMASK(15, 0)
#define HTC_SETUP_COMPLETE_EX_MSG_SETUPFLAGS GENMASK(31, 0)
#define HTC_SETUP_COMPLETE_EX_MSG_MAXMSGSPERBUNDLEDRECV GENMASK(7, 0)
#define HTC_SETUP_COMPLETE_EX_MSG_RSVD0 GENMASK(15, 8)
#define HTC_SETUP_COMPLETE_EX_MSG_RSVD1 GENMASK(23, 16)
#define HTC_SETUP_COMPLETE_EX_MSG_RSVD2 GENMASK(31, 24)
enum ath11k_htc_tx_flags {
ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
ATH11K_HTC_FLAG_SEND_BUNDLE = 0x02
};
enum ath11k_htc_rx_flags {
ATH11K_HTC_FLAG_TRAILER_PRESENT = 0x02,
ATH11K_HTC_FLAG_BUNDLE_MASK = 0xF0
};
struct ath11k_htc_hdr {
u32 htc_info;
u32 ctrl_info;
} __packed __aligned(4);
enum ath11k_htc_msg_id {
ATH11K_HTC_MSG_READY_ID = 1,
ATH11K_HTC_MSG_CONNECT_SERVICE_ID = 2,
ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
ATH11K_HTC_MSG_SETUP_COMPLETE_ID = 4,
ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5,
ATH11K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6
};
enum ath11k_htc_version {
ATH11K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
ATH11K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
};
#define ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK GENMASK(1, 0)
#define ATH11K_HTC_CONN_FLAGS_RECV_ALLOC GENMASK(15, 8)
enum ath11k_htc_conn_flags {
ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH = 0x0,
ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1,
ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3,
ATH11K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 1 << 2,
ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3
};
enum ath11k_htc_conn_svc_status {
ATH11K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
ATH11K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
ATH11K_HTC_CONN_SVC_STATUS_FAILED = 2,
ATH11K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
ATH11K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
};
struct ath11k_htc_ready {
u32 id_credit_count;
u32 size_ep;
} __packed;
struct ath11k_htc_ready_extended {
struct ath11k_htc_ready base;
u32 ver_bundle;
} __packed;
struct ath11k_htc_conn_svc {
u32 msg_svc_id;
u32 flags_len;
} __packed;
struct ath11k_htc_conn_svc_resp {
u32 msg_svc_id;
u32 flags_len;
u32 svc_meta_pad;
} __packed;
struct ath11k_htc_setup_complete_extended {
u32 msg_id;
u32 flags;
u32 max_msgs_per_bundled_recv;
} __packed;
struct ath11k_htc_msg {
u32 msg_svc_id;
u32 flags_len;
} __packed __aligned(4);
enum ath11k_htc_record_id {
ATH11K_HTC_RECORD_NULL = 0,
ATH11K_HTC_RECORD_CREDITS = 1
};
struct ath11k_htc_record_hdr {
u8 id; /* @enum ath11k_htc_record_id */
u8 len;
u8 pad0;
u8 pad1;
} __packed;
struct ath11k_htc_credit_report {
u8 eid; /* @enum ath11k_htc_ep_id */
u8 credits;
u8 pad0;
u8 pad1;
} __packed;
struct ath11k_htc_record {
struct ath11k_htc_record_hdr hdr;
union {
struct ath11k_htc_credit_report credit_report[0];
u8 pauload[0];
};
} __packed __aligned(4);
/* note: the trailer offset is dynamic depending
* on payload length. this is only a struct layout draft
*/
struct ath11k_htc_frame {
struct ath11k_htc_hdr hdr;
union {
struct ath11k_htc_msg msg;
u8 payload[0];
};
struct ath11k_htc_record trailer[0];
} __packed __aligned(4);
enum ath11k_htc_svc_gid {
ATH11K_HTC_SVC_GRP_RSVD = 0,
ATH11K_HTC_SVC_GRP_WMI = 1,
ATH11K_HTC_SVC_GRP_NMI = 2,
ATH11K_HTC_SVC_GRP_HTT = 3,
ATH11K_HTC_SVC_GRP_CFG = 4,
ATH11K_HTC_SVC_GRP_IPA = 5,
ATH11K_HTC_SVC_GRP_PKTLOG = 6,
ATH11K_HTC_SVC_GRP_TEST = 254,
ATH11K_HTC_SVC_GRP_LAST = 255,
};
#define SVC(group, idx) \
(int)(((int)(group) << 8) | (int)(idx))
enum ath11k_htc_svc_id {
/* NOTE: service ID of 0x0000 is reserved and should never be used */
ATH11K_HTC_SVC_ID_RESERVED = 0x0000,
ATH11K_HTC_SVC_ID_UNUSED = ATH11K_HTC_SVC_ID_RESERVED,
ATH11K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH11K_HTC_SVC_GRP_RSVD, 1),
ATH11K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH11K_HTC_SVC_GRP_WMI, 0),
ATH11K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH11K_HTC_SVC_GRP_WMI, 1),
ATH11K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH11K_HTC_SVC_GRP_WMI, 2),
ATH11K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH11K_HTC_SVC_GRP_WMI, 3),
ATH11K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH11K_HTC_SVC_GRP_WMI, 4),
ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1 = SVC(ATH11K_HTC_SVC_GRP_WMI, 5),
ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 = SVC(ATH11K_HTC_SVC_GRP_WMI, 6),
ATH11K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH11K_HTC_SVC_GRP_NMI, 0),
ATH11K_HTC_SVC_ID_NMI_DATA = SVC(ATH11K_HTC_SVC_GRP_NMI, 1),
ATH11K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH11K_HTC_SVC_GRP_HTT, 0),
/* raw stream service (i.e. flash, tcmd, calibration apps) */
ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH11K_HTC_SVC_GRP_TEST, 0),
ATH11K_HTC_SVC_ID_IPA_TX = SVC(ATH11K_HTC_SVC_GRP_IPA, 0),
ATH11K_HTC_SVC_ID_PKT_LOG = SVC(ATH11K_HTC_SVC_GRP_PKTLOG, 0),
};
#undef SVC
enum ath11k_htc_ep_id {
ATH11K_HTC_EP_UNUSED = -1,
ATH11K_HTC_EP_0 = 0,
ATH11K_HTC_EP_1 = 1,
ATH11K_HTC_EP_2,
ATH11K_HTC_EP_3,
ATH11K_HTC_EP_4,
ATH11K_HTC_EP_5,
ATH11K_HTC_EP_6,
ATH11K_HTC_EP_7,
ATH11K_HTC_EP_8,
ATH11K_HTC_EP_COUNT,
};
struct ath11k_htc_ops {
void (*target_send_suspend_complete)(struct ath11k_base *ar);
};
struct ath11k_htc_ep_ops {
void (*ep_tx_complete)(struct ath11k_base *, struct sk_buff *);
void (*ep_rx_complete)(struct ath11k_base *, struct sk_buff *);
void (*ep_tx_credits)(struct ath11k_base *);
};
/* service connection information */
struct ath11k_htc_svc_conn_req {
u16 service_id;
struct ath11k_htc_ep_ops ep_ops;
int max_send_queue_depth;
};
/* service connection response information */
struct ath11k_htc_svc_conn_resp {
u8 buffer_len;
u8 actual_len;
enum ath11k_htc_ep_id eid;
unsigned int max_msg_len;
u8 connect_resp_code;
};
#define ATH11K_NUM_CONTROL_TX_BUFFERS 2
#define ATH11K_HTC_MAX_LEN 4096
#define ATH11K_HTC_MAX_CTRL_MSG_LEN 256
#define ATH11K_HTC_WAIT_TIMEOUT_HZ (1 * HZ)
#define ATH11K_HTC_CONTROL_BUFFER_SIZE (ATH11K_HTC_MAX_CTRL_MSG_LEN + \
sizeof(struct ath11k_htc_hdr))
#define ATH11K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ)
#define ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES 8
struct ath11k_htc_ep {
struct ath11k_htc *htc;
enum ath11k_htc_ep_id eid;
enum ath11k_htc_svc_id service_id;
struct ath11k_htc_ep_ops ep_ops;
int max_tx_queue_depth;
int max_ep_message_len;
u8 ul_pipe_id;
u8 dl_pipe_id;
u8 seq_no; /* for debugging */
int tx_credits;
bool tx_credit_flow_enabled;
};
struct ath11k_htc_svc_tx_credits {
u16 service_id;
u8 credit_allocation;
};
struct ath11k_htc {
struct ath11k_base *ab;
struct ath11k_htc_ep endpoint[ATH11K_HTC_EP_COUNT];
/* protects endpoints */
spinlock_t tx_lock;
struct ath11k_htc_ops htc_ops;
u8 control_resp_buffer[ATH11K_HTC_MAX_CTRL_MSG_LEN];
int control_resp_len;
struct completion ctl_resp;
int total_transmit_credits;
struct ath11k_htc_svc_tx_credits
service_alloc_table[ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES];
int target_credit_size;
u8 wmi_ep_count;
};
int ath11k_htc_init(struct ath11k_base *ar);
int ath11k_htc_wait_target(struct ath11k_htc *htc);
int ath11k_htc_start(struct ath11k_htc *htc);
int ath11k_htc_connect_service(struct ath11k_htc *htc,
struct ath11k_htc_svc_conn_req *conn_req,
struct ath11k_htc_svc_conn_resp *conn_resp);
int ath11k_htc_send(struct ath11k_htc *htc, enum ath11k_htc_ep_id eid,
struct sk_buff *packet);
struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ar, int size);
void ath11k_htc_rx_completion_handler(struct ath11k_base *ar,
struct sk_buff *skb);
#endif

View File

@ -0,0 +1,127 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#ifndef ATH11K_HW_H
#define ATH11K_HW_H
/* Target configuration defines */
/* Num VDEVS per radio */
#define TARGET_NUM_VDEVS (16 + 1)
#define TARGET_NUM_PEERS_PDEV (512 + TARGET_NUM_VDEVS)
/* Num of peers for Single Radio mode */
#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV)
/* Num of peers for DBS */
#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV)
/* Num of peers for DBS_SBS */
#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV)
/* Max num of stations (per radio) */
#define TARGET_NUM_STATIONS 512
#define TARGET_NUM_PEERS(x) TARGET_NUM_PEERS_##x
#define TARGET_NUM_PEER_KEYS 2
#define TARGET_NUM_TIDS(x) (2 * TARGET_NUM_PEERS(x) + \
4 * TARGET_NUM_VDEVS + 8)
#define TARGET_AST_SKID_LIMIT 16
#define TARGET_NUM_OFFLD_PEERS 4
#define TARGET_NUM_OFFLD_REORDER_BUFFS 4
#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(4))
#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(4))
#define TARGET_RX_TIMEOUT_LO_PRI 100
#define TARGET_RX_TIMEOUT_HI_PRI 40
#define TARGET_DECAP_MODE_RAW 0
#define TARGET_DECAP_MODE_NATIVE_WIFI 1
#define TARGET_DECAP_MODE_ETH 2
#define TARGET_SCAN_MAX_PENDING_REQS 4
#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8
#define TARGET_GTK_OFFLOAD_MAX_VDEV 3
#define TARGET_NUM_MCAST_GROUPS 12
#define TARGET_NUM_MCAST_TABLE_ELEMS 64
#define TARGET_MCAST2UCAST_MODE 2
#define TARGET_TX_DBG_LOG_SIZE 1024
#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
#define TARGET_VOW_CONFIG 0
#define TARGET_NUM_MSDU_DESC (2500)
#define TARGET_MAX_FRAG_ENTRIES 6
#define TARGET_MAX_BCN_OFFLD 16
#define TARGET_NUM_WDS_ENTRIES 32
#define TARGET_DMA_BURST_SIZE 1
#define TARGET_RX_BATCHMODE 1
#define ATH11K_HW_MAX_QUEUES 4
#define ATH11k_HW_RATECODE_CCK_SHORT_PREAM_MASK 0x4
#define ATH11K_FW_DIR "ath11k"
/* IPQ8074 definitions */
#define IPQ8074_FW_DIR "IPQ8074"
#define IPQ8074_MAX_BOARD_DATA_SZ (256 * 1024)
#define IPQ8074_MAX_CAL_DATA_SZ IPQ8074_MAX_BOARD_DATA_SZ
#define ATH11K_BOARD_MAGIC "QCA-ATH11K-BOARD"
#define ATH11K_BOARD_API2_FILE "board-2.bin"
#define ATH11K_DEFAULT_BOARD_FILE "bdwlan.bin"
#define ATH11K_DEFAULT_CAL_FILE "caldata.bin"
enum ath11k_hw_rate_cck {
ATH11K_HW_RATE_CCK_LP_11M = 0,
ATH11K_HW_RATE_CCK_LP_5_5M,
ATH11K_HW_RATE_CCK_LP_2M,
ATH11K_HW_RATE_CCK_LP_1M,
ATH11K_HW_RATE_CCK_SP_11M,
ATH11K_HW_RATE_CCK_SP_5_5M,
ATH11K_HW_RATE_CCK_SP_2M,
};
enum ath11k_hw_rate_ofdm {
ATH11K_HW_RATE_OFDM_48M = 0,
ATH11K_HW_RATE_OFDM_24M,
ATH11K_HW_RATE_OFDM_12M,
ATH11K_HW_RATE_OFDM_6M,
ATH11K_HW_RATE_OFDM_54M,
ATH11K_HW_RATE_OFDM_36M,
ATH11K_HW_RATE_OFDM_18M,
ATH11K_HW_RATE_OFDM_9M,
};
struct ath11k_hw_params {
const char *name;
struct {
const char *dir;
size_t board_size;
size_t cal_size;
} fw;
};
struct ath11k_fw_ie {
__le32 id;
__le32 len;
u8 data[0];
};
enum ath11k_bd_ie_board_type {
ATH11K_BD_IE_BOARD_NAME = 0,
ATH11K_BD_IE_BOARD_DATA = 1,
};
enum ath11k_bd_ie_type {
/* contains sub IEs of enum ath11k_bd_ie_board_type */
ATH11K_BD_IE_BOARD = 0,
ATH11K_BD_IE_BOARD_EXT = 1,
};
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,147 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#ifndef ATH11K_MAC_H
#define ATH11K_MAC_H
#include <net/mac80211.h>
#include <net/cfg80211.h>
struct ath11k;
struct ath11k_base;
struct ath11k_generic_iter {
struct ath11k *ar;
int ret;
};
/* number of failed packets (20 packets with 16 sw reties each) */
#define ATH11K_KICKOUT_THRESHOLD (20 * 16)
/* Use insanely high numbers to make sure that the firmware implementation
* won't start, we have the same functionality already in hostapd. Unit
* is seconds.
*/
#define ATH11K_KEEPALIVE_MIN_IDLE 3747
#define ATH11K_KEEPALIVE_MAX_IDLE 3895
#define ATH11K_KEEPALIVE_MAX_UNRESPONSIVE 3900
#define WMI_HOST_RC_DS_FLAG 0x01
#define WMI_HOST_RC_CW40_FLAG 0x02
#define WMI_HOST_RC_SGI_FLAG 0x04
#define WMI_HOST_RC_HT_FLAG 0x08
#define WMI_HOST_RC_RTSCTS_FLAG 0x10
#define WMI_HOST_RC_TX_STBC_FLAG 0x20
#define WMI_HOST_RC_RX_STBC_FLAG 0xC0
#define WMI_HOST_RC_RX_STBC_FLAG_S 6
#define WMI_HOST_RC_WEP_TKIP_FLAG 0x100
#define WMI_HOST_RC_TS_FLAG 0x200
#define WMI_HOST_RC_UAPSD_FLAG 0x400
#define WMI_HT_CAP_ENABLED 0x0001
#define WMI_HT_CAP_HT20_SGI 0x0002
#define WMI_HT_CAP_DYNAMIC_SMPS 0x0004
#define WMI_HT_CAP_TX_STBC 0x0008
#define WMI_HT_CAP_TX_STBC_MASK_SHIFT 3
#define WMI_HT_CAP_RX_STBC 0x0030
#define WMI_HT_CAP_RX_STBC_MASK_SHIFT 4
#define WMI_HT_CAP_LDPC 0x0040
#define WMI_HT_CAP_L_SIG_TXOP_PROT 0x0080
#define WMI_HT_CAP_MPDU_DENSITY 0x0700
#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8
#define WMI_HT_CAP_HT40_SGI 0x0800
#define WMI_HT_CAP_RX_LDPC 0x1000
#define WMI_HT_CAP_TX_LDPC 0x2000
#define WMI_HT_CAP_IBF_BFER 0x4000
/* These macros should be used when we wish to advertise STBC support for
* only 1SS or 2SS or 3SS.
*/
#define WMI_HT_CAP_RX_STBC_1SS 0x0010
#define WMI_HT_CAP_RX_STBC_2SS 0x0020
#define WMI_HT_CAP_RX_STBC_3SS 0x0030
#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \
WMI_HT_CAP_HT20_SGI | \
WMI_HT_CAP_HT40_SGI | \
WMI_HT_CAP_TX_STBC | \
WMI_HT_CAP_RX_STBC | \
WMI_HT_CAP_LDPC)
#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003
#define WMI_VHT_CAP_RX_LDPC 0x00000010
#define WMI_VHT_CAP_SGI_80MHZ 0x00000020
#define WMI_VHT_CAP_SGI_160MHZ 0x00000040
#define WMI_VHT_CAP_TX_STBC 0x00000080
#define WMI_VHT_CAP_RX_STBC_MASK 0x00000300
#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8
#define WMI_VHT_CAP_SU_BFER 0x00000800
#define WMI_VHT_CAP_SU_BFEE 0x00001000
#define WMI_VHT_CAP_MAX_CS_ANT_MASK 0x0000E000
#define WMI_VHT_CAP_MAX_CS_ANT_MASK_SHIFT 13
#define WMI_VHT_CAP_MAX_SND_DIM_MASK 0x00070000
#define WMI_VHT_CAP_MAX_SND_DIM_MASK_SHIFT 16
#define WMI_VHT_CAP_MU_BFER 0x00080000
#define WMI_VHT_CAP_MU_BFEE 0x00100000
#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000
#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIT 23
#define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000
#define WMI_VHT_CAP_TX_FIXED_ANT 0x20000000
#define WMI_VHT_CAP_MAX_MPDU_LEN_11454 0x00000002
/* These macros should be used when we wish to advertise STBC support for
* only 1SS or 2SS or 3SS.
*/
#define WMI_VHT_CAP_RX_STBC_1SS 0x00000100
#define WMI_VHT_CAP_RX_STBC_2SS 0x00000200
#define WMI_VHT_CAP_RX_STBC_3SS 0x00000300
#define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454 | \
WMI_VHT_CAP_SGI_80MHZ | \
WMI_VHT_CAP_TX_STBC | \
WMI_VHT_CAP_RX_STBC_MASK | \
WMI_VHT_CAP_RX_LDPC | \
WMI_VHT_CAP_MAX_AMPDU_LEN_EXP | \
WMI_VHT_CAP_RX_FIXED_ANT | \
WMI_VHT_CAP_TX_FIXED_ANT)
/* FIXME: should these be in ieee80211.h? */
#define IEEE80211_VHT_MCS_SUPPORT_0_11_MASK GENMASK(23, 16)
#define IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11 BIT(24)
#define WMI_MAX_SPATIAL_STREAM 3
#define ATH11K_CHAN_WIDTH_NUM 8
extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default;
void ath11k_mac_destroy(struct ath11k_base *ab);
void ath11k_mac_unregister(struct ath11k_base *ab);
int ath11k_mac_register(struct ath11k_base *ab);
int ath11k_mac_allocate(struct ath11k_base *ab);
int ath11k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
u16 *rate);
u8 ath11k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
u32 bitrate);
u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
u8 hw_rate, bool cck);
void __ath11k_mac_scan_finish(struct ath11k *ar);
void ath11k_mac_scan_finish(struct ath11k *ar);
struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id);
struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
u32 vdev_id);
struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id);
struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id);
struct ath11k *ath11k_mac_get_ar_vdev_stop_status(struct ath11k_base *ab,
u32 vdev_id);
void ath11k_mac_drain_tx(struct ath11k *ar);
void ath11k_mac_peer_cleanup_all(struct ath11k *ar);
int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
u8 ath11k_mac_bw_to_mac80211_bw(u8 bw);
#endif

View File

@ -0,0 +1,236 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include "core.h"
#include "peer.h"
#include "debug.h"
struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
const u8 *addr)
{
struct ath11k_peer *peer;
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
if (peer->vdev_id != vdev_id)
continue;
if (memcmp(peer->addr, addr, ETH_ALEN))
continue;
return peer;
}
return NULL;
}
struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
const u8 *addr)
{
struct ath11k_peer *peer;
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
if (memcmp(peer->addr, addr, ETH_ALEN))
continue;
return peer;
}
return NULL;
}
struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
int peer_id)
{
struct ath11k_peer *peer;
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list)
if (peer_id == peer->peer_id)
return peer;
return NULL;
}
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
{
struct ath11k_peer *peer;
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_id(ab, peer_id);
if (!peer) {
ath11k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
peer_id);
goto exit;
}
ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
peer->vdev_id, peer->addr, peer_id);
list_del(&peer->list);
kfree(peer);
wake_up(&ab->peer_mapping_wq);
exit:
spin_unlock_bh(&ab->base_lock);
}
void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
u8 *mac_addr, u16 ast_hash)
{
struct ath11k_peer *peer;
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, vdev_id, mac_addr);
if (!peer) {
peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
if (!peer)
goto exit;
peer->vdev_id = vdev_id;
peer->peer_id = peer_id;
peer->ast_hash = ast_hash;
ether_addr_copy(peer->addr, mac_addr);
list_add(&peer->list, &ab->peers);
wake_up(&ab->peer_mapping_wq);
}
ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
vdev_id, mac_addr, peer_id);
exit:
spin_unlock_bh(&ab->base_lock);
}
static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id,
const u8 *addr, bool expect_mapped)
{
int ret;
ret = wait_event_timeout(ab->peer_mapping_wq, ({
bool mapped;
spin_lock_bh(&ab->base_lock);
mapped = !!ath11k_peer_find(ab, vdev_id, addr);
spin_unlock_bh(&ab->base_lock);
(mapped == expect_mapped ||
test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags));
}), 3 * HZ);
if (ret <= 0)
return -ETIMEDOUT;
return 0;
}
void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
{
struct ath11k_peer *peer, *tmp;
struct ath11k_base *ab = ar->ab;
lockdep_assert_held(&ar->conf_mutex);
spin_lock_bh(&ab->base_lock);
list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
if (peer->vdev_id != vdev_id)
continue;
ath11k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
peer->addr, vdev_id);
list_del(&peer->list);
kfree(peer);
ar->num_peers--;
}
spin_unlock_bh(&ab->base_lock);
}
static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr)
{
return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
}
int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
if (ret) {
ath11k_warn(ar->ab,
"failed to delete peer vdev_id %d addr %pM ret %d\n",
vdev_id, addr, ret);
return ret;
}
ret = ath11k_wait_for_peer_deleted(ar, vdev_id, addr);
if (ret)
return ret;
ar->num_peers--;
return 0;
}
static int ath11k_wait_for_peer_created(struct ath11k *ar, int vdev_id, const u8 *addr)
{
return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
}
int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
struct ieee80211_sta *sta, struct peer_create_params *param)
{
struct ath11k_peer *peer;
int ret;
lockdep_assert_held(&ar->conf_mutex);
if (ar->num_peers > (ar->max_num_peers - 1)) {
ath11k_warn(ar->ab,
"failed to create peer due to insufficient peer entry resource in firmware\n");
return -ENOBUFS;
}
ret = ath11k_wmi_send_peer_create_cmd(ar, param);
if (ret) {
ath11k_warn(ar->ab,
"failed to send peer create vdev_id %d ret %d\n",
param->vdev_id, ret);
return ret;
}
ret = ath11k_wait_for_peer_created(ar, param->vdev_id,
param->peer_addr);
if (ret)
return ret;
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
if (!peer) {
spin_unlock_bh(&ar->ab->base_lock);
ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
param->peer_addr, param->vdev_id);
ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr,
param->vdev_id);
return -ENOENT;
}
peer->sta = sta;
arvif->ast_hash = peer->ast_hash;
ar->num_peers++;
spin_unlock_bh(&ar->ab->base_lock);
return 0;
}

View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#ifndef ATH11K_PEER_H
#define ATH11K_PEER_H
struct ath11k_peer {
struct list_head list;
struct ieee80211_sta *sta;
int vdev_id;
u8 addr[ETH_ALEN];
int peer_id;
u16 ast_hash;
/* protected by ab->data_lock */
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
struct dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1];
};
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
u8 *mac_addr, u16 ast_hash);
struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
const u8 *addr);
struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
const u8 *addr);
struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, int peer_id);
void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id);
int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr);
int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
struct ieee80211_sta *sta, struct peer_create_params *param);
#endif /* _PEER_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,445 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#ifndef ATH11K_QMI_H
#define ATH11K_QMI_H
#include <linux/mutex.h>
#include <linux/soc/qcom/qmi.h>
#define ATH11K_HOST_VERSION_STRING "WIN"
#define ATH11K_QMI_WLANFW_TIMEOUT_MS 5000
#define ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE 64
#define ATH11K_QMI_BDF_ADDRESS 0x4B0C0000
#define ATH11K_QMI_BDF_MAX_SIZE (256 * 1024)
#define ATH11K_QMI_CALDATA_OFFSET (128 * 1024)
#define ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
#define ATH11K_QMI_WLFW_SERVICE_ID_V01 0x45
#define ATH11K_QMI_WLFW_SERVICE_VERS_V01 0x01
#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01 0x02
#define ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32
#define ATH11K_QMI_RESP_LEN_MAX 8192
#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 32
#define ATH11K_QMI_CALDB_SIZE 0x480000
#define ATH11K_QMI_DEFAULT_CAL_FILE_NAME "caldata.bin"
#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0021
#define QMI_WLFW_FW_READY_IND_V01 0x0038
#define QMI_WLANFW_MAX_DATA_SIZE_V01 6144
#define ATH11K_FIRMWARE_MODE_OFF 4
#define ATH11K_QMI_TARGET_MEM_MODE_DEFAULT 0
struct ath11k_base;
enum ath11k_qmi_file_type {
ATH11K_QMI_FILE_TYPE_BDF_GOLDEN,
ATH11K_QMI_FILE_TYPE_CALDATA,
ATH11K_QMI_MAX_FILE_TYPE,
};
enum ath11k_qmi_event_type {
ATH11K_QMI_EVENT_SERVER_ARRIVE,
ATH11K_QMI_EVENT_SERVER_EXIT,
ATH11K_QMI_EVENT_REQUEST_MEM,
ATH11K_QMI_EVENT_FW_MEM_READY,
ATH11K_QMI_EVENT_FW_READY,
ATH11K_QMI_EVENT_COLD_BOOT_CAL_START,
ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE,
ATH11K_QMI_EVENT_REGISTER_DRIVER,
ATH11K_QMI_EVENT_UNREGISTER_DRIVER,
ATH11K_QMI_EVENT_RECOVERY,
ATH11K_QMI_EVENT_FORCE_FW_ASSERT,
ATH11K_QMI_EVENT_POWER_UP,
ATH11K_QMI_EVENT_POWER_DOWN,
ATH11K_QMI_EVENT_MAX,
};
struct ath11k_qmi_driver_event {
struct list_head list;
enum ath11k_qmi_event_type type;
void *data;
};
struct ath11k_qmi_ce_cfg {
const struct ce_pipe_config *tgt_ce;
int tgt_ce_len;
const struct service_to_pipe *svc_to_ce_map;
int svc_to_ce_map_len;
const u8 *shadow_reg;
int shadow_reg_len;
u8 *shadow_reg_v2;
int shadow_reg_v2_len;
};
struct ath11k_qmi_event_msg {
struct list_head list;
enum ath11k_qmi_event_type type;
};
struct target_mem_chunk {
u32 size;
u32 type;
dma_addr_t paddr;
u32 vaddr;
};
struct target_info {
u32 chip_id;
u32 chip_family;
u32 board_id;
u32 soc_id;
u32 fw_version;
char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
};
struct ath11k_qmi {
struct ath11k_base *ab;
struct qmi_handle handle;
struct sockaddr_qrtr sq;
struct work_struct event_work;
struct workqueue_struct *event_wq;
struct list_head event_list;
spinlock_t event_lock; /* spinlock for qmi event list */
struct ath11k_qmi_ce_cfg ce_cfg;
struct target_mem_chunk target_mem[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
u32 mem_seg_count;
u32 target_mem_mode;
u8 cal_done;
struct target_info target;
};
#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 189
#define QMI_WLANFW_HOST_CAP_REQ_V01 0x0034
#define QMI_WLANFW_HOST_CAP_RESP_MSG_V01_MAX_LEN 7
#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
#define QMI_WLFW_MAX_NUM_GPIO_V01 32
#define QMI_IPQ8074_FW_MEM_MODE 0xFF
#define HOST_DDR_REGION_TYPE 0x1
#define BDF_MEM_REGION_TYPE 0x2
#define CALDB_MEM_REGION_TYPE 0x4
struct qmi_wlanfw_host_cap_req_msg_v01 {
u8 num_clients_valid;
u32 num_clients;
u8 wake_msi_valid;
u32 wake_msi;
u8 gpios_valid;
u32 gpios_len;
u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01];
u8 nm_modem_valid;
u8 nm_modem;
u8 bdf_support_valid;
u8 bdf_support;
u8 bdf_cache_support_valid;
u8 bdf_cache_support;
u8 m3_support_valid;
u8 m3_support;
u8 m3_cache_support_valid;
u8 m3_cache_support;
u8 cal_filesys_support_valid;
u8 cal_filesys_support;
u8 cal_cache_support_valid;
u8 cal_cache_support;
u8 cal_done_valid;
u8 cal_done;
u8 mem_bucket_valid;
u32 mem_bucket;
u8 mem_cfg_mode_valid;
u8 mem_cfg_mode;
};
struct qmi_wlanfw_host_cap_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54
#define QMI_WLANFW_IND_REGISTER_REQ_V01 0x0020
#define QMI_WLANFW_IND_REGISTER_RESP_MSG_V01_MAX_LEN 18
#define QMI_WLANFW_IND_REGISTER_RESP_V01 0x0020
#define QMI_WLANFW_CLIENT_ID 0x4b4e454c
struct qmi_wlanfw_ind_register_req_msg_v01 {
u8 fw_ready_enable_valid;
u8 fw_ready_enable;
u8 initiate_cal_download_enable_valid;
u8 initiate_cal_download_enable;
u8 initiate_cal_update_enable_valid;
u8 initiate_cal_update_enable;
u8 msa_ready_enable_valid;
u8 msa_ready_enable;
u8 pin_connect_result_enable_valid;
u8 pin_connect_result_enable;
u8 client_id_valid;
u32 client_id;
u8 request_mem_enable_valid;
u8 request_mem_enable;
u8 fw_mem_ready_enable_valid;
u8 fw_mem_ready_enable;
u8 fw_init_done_enable_valid;
u8 fw_init_done_enable;
u8 rejuvenate_enable_valid;
u32 rejuvenate_enable;
u8 xo_cal_enable_valid;
u8 xo_cal_enable;
u8 cal_done_enable_valid;
u8 cal_done_enable;
};
struct qmi_wlanfw_ind_register_resp_msg_v01 {
struct qmi_response_type_v01 resp;
u8 fw_status_valid;
u64 fw_status;
};
#define QMI_WLANFW_REQUEST_MEM_IND_MSG_V01_MAX_LEN 1124
#define QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN 548
#define QMI_WLANFW_RESPOND_MEM_RESP_MSG_V01_MAX_LEN 7
#define QMI_WLANFW_REQUEST_MEM_IND_V01 0x0035
#define QMI_WLANFW_RESPOND_MEM_REQ_V01 0x0036
#define QMI_WLANFW_RESPOND_MEM_RESP_V01 0x0036
#define QMI_WLANFW_MAX_NUM_MEM_CFG_V01 2
struct qmi_wlanfw_mem_cfg_s_v01 {
u64 offset;
u32 size;
u8 secure_flag;
};
enum qmi_wlanfw_mem_type_enum_v01 {
WLANFW_MEM_TYPE_ENUM_MIN_VAL_V01 = INT_MIN,
QMI_WLANFW_MEM_TYPE_MSA_V01 = 0,
QMI_WLANFW_MEM_TYPE_DDR_V01 = 1,
QMI_WLANFW_MEM_BDF_V01 = 2,
QMI_WLANFW_MEM_M3_V01 = 3,
QMI_WLANFW_MEM_CAL_V01 = 4,
QMI_WLANFW_MEM_DPD_V01 = 5,
WLANFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
};
struct qmi_wlanfw_mem_seg_s_v01 {
u32 size;
enum qmi_wlanfw_mem_type_enum_v01 type;
u32 mem_cfg_len;
struct qmi_wlanfw_mem_cfg_s_v01 mem_cfg[QMI_WLANFW_MAX_NUM_MEM_CFG_V01];
};
struct qmi_wlanfw_request_mem_ind_msg_v01 {
u32 mem_seg_len;
struct qmi_wlanfw_mem_seg_s_v01 mem_seg[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
};
struct qmi_wlanfw_mem_seg_resp_s_v01 {
u64 addr;
u32 size;
enum qmi_wlanfw_mem_type_enum_v01 type;
u8 restore;
};
struct qmi_wlanfw_respond_mem_req_msg_v01 {
u32 mem_seg_len;
struct qmi_wlanfw_mem_seg_resp_s_v01 mem_seg[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
};
struct qmi_wlanfw_respond_mem_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
struct qmi_wlanfw_fw_mem_ready_ind_msg_v01 {
char placeholder;
};
#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 207
#define QMI_WLANFW_CAP_REQ_V01 0x0024
#define QMI_WLANFW_CAP_RESP_V01 0x0024
enum qmi_wlanfw_pipedir_enum_v01 {
QMI_WLFW_PIPEDIR_NONE_V01 = 0,
QMI_WLFW_PIPEDIR_IN_V01 = 1,
QMI_WLFW_PIPEDIR_OUT_V01 = 2,
QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
};
struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01 {
__le32 pipe_num;
__le32 pipe_dir;
__le32 nentries;
__le32 nbytes_max;
__le32 flags;
};
struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01 {
__le32 service_id;
__le32 pipe_dir;
__le32 pipe_num;
};
struct qmi_wlanfw_shadow_reg_cfg_s_v01 {
u16 id;
u16 offset;
};
struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01 {
u32 addr;
};
struct qmi_wlanfw_memory_region_info_s_v01 {
u64 region_addr;
u32 size;
u8 secure_flag;
};
struct qmi_wlanfw_rf_chip_info_s_v01 {
u32 chip_id;
u32 chip_family;
};
struct qmi_wlanfw_rf_board_info_s_v01 {
u32 board_id;
};
struct qmi_wlanfw_soc_info_s_v01 {
u32 soc_id;
};
struct qmi_wlanfw_fw_version_info_s_v01 {
u32 fw_version;
char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
};
enum qmi_wlanfw_cal_temp_id_enum_v01 {
QMI_WLANFW_CAL_TEMP_IDX_0_V01 = 0,
QMI_WLANFW_CAL_TEMP_IDX_1_V01 = 1,
QMI_WLANFW_CAL_TEMP_IDX_2_V01 = 2,
QMI_WLANFW_CAL_TEMP_IDX_3_V01 = 3,
QMI_WLANFW_CAL_TEMP_IDX_4_V01 = 4,
QMI_WLANFW_CAL_TEMP_ID_MAX_V01 = 0xFF,
};
struct qmi_wlanfw_cap_resp_msg_v01 {
struct qmi_response_type_v01 resp;
u8 chip_info_valid;
struct qmi_wlanfw_rf_chip_info_s_v01 chip_info;
u8 board_info_valid;
struct qmi_wlanfw_rf_board_info_s_v01 board_info;
u8 soc_info_valid;
struct qmi_wlanfw_soc_info_s_v01 soc_info;
u8 fw_version_info_valid;
struct qmi_wlanfw_fw_version_info_s_v01 fw_version_info;
u8 fw_build_id_valid;
char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
u8 num_macs_valid;
u8 num_macs;
};
struct qmi_wlanfw_cap_req_msg_v01 {
char placeholder;
};
#define QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN 6182
#define QMI_WLANFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_LEN 7
#define QMI_WLANFW_BDF_DOWNLOAD_RESP_V01 0x0025
#define QMI_WLANFW_BDF_DOWNLOAD_REQ_V01 0x0025
/* TODO: Need to check with MCL and FW team that data can be pointer and
* can be last element in structure
*/
struct qmi_wlanfw_bdf_download_req_msg_v01 {
u8 valid;
u8 file_id_valid;
enum qmi_wlanfw_cal_temp_id_enum_v01 file_id;
u8 total_size_valid;
u32 total_size;
u8 seg_id_valid;
u32 seg_id;
u8 data_valid;
u32 data_len;
u8 data[QMI_WLANFW_MAX_DATA_SIZE_V01];
u8 end_valid;
u8 end;
u8 bdf_type_valid;
u8 bdf_type;
};
struct qmi_wlanfw_bdf_download_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
#define QMI_WLANFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
#define QMI_WLANFW_M3_INFO_RESP_V01 0x003C
#define QMI_WLANFW_M3_INFO_REQ_V01 0x003C
struct qmi_wlanfw_m3_info_req_msg_v01 {
u64 addr;
u32 size;
};
struct qmi_wlanfw_m3_info_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN 11
#define QMI_WLANFW_WLAN_MODE_RESP_MSG_V01_MAX_LEN 7
#define QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN 803
#define QMI_WLANFW_WLAN_CFG_RESP_MSG_V01_MAX_LEN 7
#define QMI_WLANFW_WLAN_MODE_REQ_V01 0x0022
#define QMI_WLANFW_WLAN_MODE_RESP_V01 0x0022
#define QMI_WLANFW_WLAN_CFG_REQ_V01 0x0023
#define QMI_WLANFW_WLAN_CFG_RESP_V01 0x0023
#define QMI_WLANFW_MAX_STR_LEN_V01 16
#define QMI_WLANFW_MAX_NUM_CE_V01 12
#define QMI_WLANFW_MAX_NUM_SVC_V01 24
#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V01 24
#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01 36
struct qmi_wlanfw_wlan_mode_req_msg_v01 {
u32 mode;
u8 hw_debug_valid;
u8 hw_debug;
};
struct qmi_wlanfw_wlan_mode_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
struct qmi_wlanfw_wlan_cfg_req_msg_v01 {
u8 host_version_valid;
char host_version[QMI_WLANFW_MAX_STR_LEN_V01 + 1];
u8 tgt_cfg_valid;
u32 tgt_cfg_len;
struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01
tgt_cfg[QMI_WLANFW_MAX_NUM_CE_V01];
u8 svc_cfg_valid;
u32 svc_cfg_len;
struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01
svc_cfg[QMI_WLANFW_MAX_NUM_SVC_V01];
u8 shadow_reg_valid;
u32 shadow_reg_len;
struct qmi_wlanfw_shadow_reg_cfg_s_v01
shadow_reg[QMI_WLANFW_MAX_NUM_SHADOW_REG_V01];
u8 shadow_reg_v2_valid;
u32 shadow_reg_v2_len;
struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01
shadow_reg_v2[QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01];
};
struct qmi_wlanfw_wlan_cfg_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
int ath11k_qmi_firmware_start(struct ath11k_base *ab,
u32 mode);
void ath11k_qmi_firmware_stop(struct ath11k_base *ab);
void ath11k_qmi_event_work(struct work_struct *work);
void ath11k_qmi_msg_recv_work(struct work_struct *work);
void ath11k_qmi_deinit_service(struct ath11k_base *ab);
int ath11k_qmi_init_service(struct ath11k_base *ab);
#endif

View File

@ -0,0 +1,702 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include "core.h"
#include "debug.h"
/* World regdom to be used in case default regd from fw is unavailable */
#define ATH11K_2GHZ_CH01_11 REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0)
#define ATH11K_5GHZ_5150_5350 REG_RULE(5150 - 10, 5350 + 10, 80, 0, 30,\
NL80211_RRF_NO_IR)
#define ATH11K_5GHZ_5725_5850 REG_RULE(5725 - 10, 5850 + 10, 80, 0, 30,\
NL80211_RRF_NO_IR)
#define ETSI_WEATHER_RADAR_BAND_LOW 5590
#define ETSI_WEATHER_RADAR_BAND_HIGH 5650
#define ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT 600000
static const struct ieee80211_regdomain ath11k_world_regd = {
.n_reg_rules = 3,
.alpha2 = "00",
.reg_rules = {
ATH11K_2GHZ_CH01_11,
ATH11K_5GHZ_5150_5350,
ATH11K_5GHZ_5725_5850,
}
};
static bool ath11k_regdom_changes(struct ath11k *ar, char *alpha2)
{
const struct ieee80211_regdomain *regd;
regd = rcu_dereference_rtnl(ar->hw->wiphy->regd);
/* This can happen during wiphy registration where the previous
* user request is received before we update the regd received
* from firmware.
*/
if (!regd)
return true;
return memcmp(regd->alpha2, alpha2, 2) != 0;
}
static void
ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct wmi_init_country_params init_country_param;
struct ath11k *ar = hw->priv;
int ret;
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"Regulatory Notification received for %s\n", wiphy_name(wiphy));
/* Currently supporting only General User Hints. Cell base user
* hints to be handled later.
* Hints from other sources like Core, Beacons are not expected for
* self managed wiphy's
*/
if (!(request->initiator == NL80211_REGDOM_SET_BY_USER &&
request->user_reg_hint_type == NL80211_USER_REG_HINT_USER)) {
ath11k_warn(ar->ab, "Unexpected Regulatory event for this wiphy\n");
return;
}
if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS)) {
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"Country Setting is not allowed\n");
return;
}
if (!ath11k_regdom_changes(ar, request->alpha2)) {
ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Country is already set\n");
return;
}
/* Set the country code to the firmware and wait for
* the WMI_REG_CHAN_LIST_CC EVENT for updating the
* reg info
*/
init_country_param.flags = ALPHA_IS_SET;
memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
if (ret)
ath11k_warn(ar->ab,
"INIT Country code set to fw failed : %d\n", ret);
}
int ath11k_reg_update_chan_list(struct ath11k *ar)
{
struct ieee80211_supported_band **bands;
struct scan_chan_list_params *params;
struct ieee80211_channel *channel;
struct ieee80211_hw *hw = ar->hw;
struct channel_param *ch;
enum nl80211_band band;
int num_channels = 0;
int params_len;
int i, ret;
bands = hw->wiphy->bands;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!bands[band])
continue;
for (i = 0; i < bands[band]->n_channels; i++) {
if (bands[band]->channels[i].flags &
IEEE80211_CHAN_DISABLED)
continue;
num_channels++;
}
}
if (WARN_ON(!num_channels))
return -EINVAL;
params_len = sizeof(struct scan_chan_list_params) +
num_channels * sizeof(struct channel_param);
params = kzalloc(params_len, GFP_KERNEL);
if (!params)
return -ENOMEM;
params->pdev_id = ar->pdev->pdev_id;
params->nallchans = num_channels;
ch = params->ch_param;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!bands[band])
continue;
for (i = 0; i < bands[band]->n_channels; i++) {
channel = &bands[band]->channels[i];
if (channel->flags & IEEE80211_CHAN_DISABLED)
continue;
/* TODO: Set to true/false based on some condition? */
ch->allow_ht = true;
ch->allow_vht = true;
ch->allow_he = true;
ch->dfs_set =
!!(channel->flags & IEEE80211_CHAN_RADAR);
ch->is_chan_passive = !!(channel->flags &
IEEE80211_CHAN_NO_IR);
ch->is_chan_passive |= ch->dfs_set;
ch->mhz = channel->center_freq;
ch->cfreq1 = channel->center_freq;
ch->minpower = 0;
ch->maxpower = channel->max_power * 2;
ch->maxregpower = channel->max_reg_power * 2;
ch->antennamax = channel->max_antenna_gain * 2;
/* TODO: Use appropriate phymodes */
if (channel->band == NL80211_BAND_2GHZ)
ch->phy_mode = MODE_11G;
else
ch->phy_mode = MODE_11A;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"mac channel [%d/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
i, params->nallchans,
ch->mhz, ch->maxpower, ch->maxregpower,
ch->antennamax, ch->phy_mode);
ch++;
/* TODO: use quarrter/half rate, cfreq12, dfs_cfreq2
* set_agile, reg_class_idx
*/
}
}
ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
kfree(params);
return ret;
}
static void ath11k_copy_regd(struct ieee80211_regdomain *regd_orig,
struct ieee80211_regdomain *regd_copy)
{
u8 i;
/* The caller should have checked error conditions */
memcpy(regd_copy, regd_orig, sizeof(*regd_orig));
for (i = 0; i < regd_orig->n_reg_rules; i++)
memcpy(&regd_copy->reg_rules[i], &regd_orig->reg_rules[i],
sizeof(struct ieee80211_reg_rule));
}
int ath11k_regd_update(struct ath11k *ar, bool init)
{
struct ieee80211_regdomain *regd, *regd_copy = NULL;
int ret, regd_len, pdev_id;
struct ath11k_base *ab;
ab = ar->ab;
pdev_id = ar->pdev_idx;
spin_lock(&ab->base_lock);
if (init) {
/* Apply the regd received during init through
* WMI_REG_CHAN_LIST_CC event. In case of failure to
* receive the regd, initialize with a default world
* regulatory.
*/
if (ab->default_regd[pdev_id]) {
regd = ab->default_regd[pdev_id];
} else {
ath11k_warn(ab,
"failed to receive default regd during init\n");
regd = (struct ieee80211_regdomain *)&ath11k_world_regd;
}
} else {
regd = ab->new_regd[pdev_id];
}
if (!regd) {
ret = -EINVAL;
spin_unlock(&ab->base_lock);
goto err;
}
regd_len = sizeof(*regd) + (regd->n_reg_rules *
sizeof(struct ieee80211_reg_rule));
regd_copy = kzalloc(regd_len, GFP_ATOMIC);
if (regd_copy)
ath11k_copy_regd(regd, regd_copy);
spin_unlock(&ab->base_lock);
if (!regd_copy) {
ret = -ENOMEM;
goto err;
}
rtnl_lock();
ret = regulatory_set_wiphy_regd_sync_rtnl(ar->hw->wiphy, regd_copy);
rtnl_unlock();
kfree(regd_copy);
if (ret)
goto err;
if (ar->state == ATH11K_STATE_ON) {
ret = ath11k_reg_update_chan_list(ar);
if (ret)
goto err;
}
return 0;
err:
ath11k_warn(ab, "failed to perform regd update : %d\n", ret);
return ret;
}
static enum nl80211_dfs_regions
ath11k_map_fw_dfs_region(enum ath11k_dfs_region dfs_region)
{
switch (dfs_region) {
case ATH11K_DFS_REG_FCC:
case ATH11K_DFS_REG_CN:
return NL80211_DFS_FCC;
case ATH11K_DFS_REG_ETSI:
case ATH11K_DFS_REG_KR:
return NL80211_DFS_ETSI;
case ATH11K_DFS_REG_MKK:
return NL80211_DFS_JP;
default:
return NL80211_DFS_UNSET;
}
}
static u32 ath11k_map_fw_reg_flags(u16 reg_flags)
{
u32 flags = 0;
if (reg_flags & REGULATORY_CHAN_NO_IR)
flags = NL80211_RRF_NO_IR;
if (reg_flags & REGULATORY_CHAN_RADAR)
flags |= NL80211_RRF_DFS;
if (reg_flags & REGULATORY_CHAN_NO_OFDM)
flags |= NL80211_RRF_NO_OFDM;
if (reg_flags & REGULATORY_CHAN_INDOOR_ONLY)
flags |= NL80211_RRF_NO_OUTDOOR;
if (reg_flags & REGULATORY_CHAN_NO_HT40)
flags |= NL80211_RRF_NO_HT40;
if (reg_flags & REGULATORY_CHAN_NO_80MHZ)
flags |= NL80211_RRF_NO_80MHZ;
if (reg_flags & REGULATORY_CHAN_NO_160MHZ)
flags |= NL80211_RRF_NO_160MHZ;
return flags;
}
static bool
ath11k_reg_can_intersect(struct ieee80211_reg_rule *rule1,
struct ieee80211_reg_rule *rule2)
{
u32 start_freq1, end_freq1;
u32 start_freq2, end_freq2;
start_freq1 = rule1->freq_range.start_freq_khz;
start_freq2 = rule2->freq_range.start_freq_khz;
end_freq1 = rule1->freq_range.end_freq_khz;
end_freq2 = rule2->freq_range.end_freq_khz;
if ((start_freq1 >= start_freq2 &&
start_freq1 < end_freq2) ||
(start_freq2 > start_freq1 &&
start_freq2 < end_freq1))
return true;
/* TODO: Should we restrict intersection feasibility
* based on min bandwidth of the intersected region also,
* say the intersected rule should have a min bandwidth
* of 20MHz?
*/
return false;
}
static void ath11k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
struct ieee80211_reg_rule *rule2,
struct ieee80211_reg_rule *new_rule)
{
u32 start_freq1, end_freq1;
u32 start_freq2, end_freq2;
u32 freq_diff, max_bw;
start_freq1 = rule1->freq_range.start_freq_khz;
start_freq2 = rule2->freq_range.start_freq_khz;
end_freq1 = rule1->freq_range.end_freq_khz;
end_freq2 = rule2->freq_range.end_freq_khz;
new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1,
start_freq2);
new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2);
freq_diff = new_rule->freq_range.end_freq_khz -
new_rule->freq_range.start_freq_khz;
max_bw = min_t(u32, rule1->freq_range.max_bandwidth_khz,
rule2->freq_range.max_bandwidth_khz);
new_rule->freq_range.max_bandwidth_khz = min_t(u32, max_bw, freq_diff);
new_rule->power_rule.max_antenna_gain =
min_t(u32, rule1->power_rule.max_antenna_gain,
rule2->power_rule.max_antenna_gain);
new_rule->power_rule.max_eirp = min_t(u32, rule1->power_rule.max_eirp,
rule2->power_rule.max_eirp);
/* Use the flags of both the rules */
new_rule->flags = rule1->flags | rule2->flags;
/* To be safe, lts use the max cac timeout of both rules */
new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
rule2->dfs_cac_ms);
}
static struct ieee80211_regdomain *
ath11k_regd_intersect(struct ieee80211_regdomain *default_regd,
struct ieee80211_regdomain *curr_regd)
{
u8 num_old_regd_rules, num_curr_regd_rules, num_new_regd_rules;
struct ieee80211_reg_rule *old_rule, *curr_rule, *new_rule;
struct ieee80211_regdomain *new_regd = NULL;
u8 i, j, k;
num_old_regd_rules = default_regd->n_reg_rules;
num_curr_regd_rules = curr_regd->n_reg_rules;
num_new_regd_rules = 0;
/* Find the number of intersecting rules to allocate new regd memory */
for (i = 0; i < num_old_regd_rules; i++) {
old_rule = default_regd->reg_rules + i;
for (j = 0; j < num_curr_regd_rules; j++) {
curr_rule = curr_regd->reg_rules + j;
if (ath11k_reg_can_intersect(old_rule, curr_rule))
num_new_regd_rules++;
}
}
if (!num_new_regd_rules)
return NULL;
new_regd = kzalloc(sizeof(*new_regd) + (num_new_regd_rules *
sizeof(struct ieee80211_reg_rule)),
GFP_ATOMIC);
if (!new_regd)
return NULL;
/* We set the new country and dfs region directly and only trim
* the freq, power, antenna gain by intersecting with the
* default regdomain. Also MAX of the dfs cac timeout is selected.
*/
new_regd->n_reg_rules = num_new_regd_rules;
memcpy(new_regd->alpha2, curr_regd->alpha2, sizeof(new_regd->alpha2));
new_regd->dfs_region = curr_regd->dfs_region;
new_rule = new_regd->reg_rules;
for (i = 0, k = 0; i < num_old_regd_rules; i++) {
old_rule = default_regd->reg_rules + i;
for (j = 0; j < num_curr_regd_rules; j++) {
curr_rule = curr_regd->reg_rules + j;
if (ath11k_reg_can_intersect(old_rule, curr_rule))
ath11k_reg_intersect_rules(old_rule, curr_rule,
(new_rule + k++));
}
}
return new_regd;
}
static const char *
ath11k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region)
{
switch (dfs_region) {
case NL80211_DFS_FCC:
return "FCC";
case NL80211_DFS_ETSI:
return "ETSI";
case NL80211_DFS_JP:
return "JP";
default:
return "UNSET";
}
}
static u16
ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
{
u16 bw;
bw = end_freq - start_freq;
bw = min_t(u16, bw, max_bw);
if (bw >= 80 && bw < 160)
bw = 80;
else if (bw >= 40 && bw < 80)
bw = 40;
else if (bw < 40)
bw = 20;
return bw;
}
static void
ath11k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
u32 reg_flags)
{
reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
reg_rule->flags = reg_flags;
}
static void
ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
struct ieee80211_regdomain *regd,
struct cur_reg_rule *reg_rule,
u8 *rule_idx, u32 flags, u16 max_bw)
{
u32 end_freq;
u16 bw;
u8 i;
i = *rule_idx;
bw = ath11k_reg_adjust_bw(reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, max_bw);
ath11k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW,
bw, reg_rule->ant_gain, reg_rule->reg_power,
regd->reg_rules[i].dfs_cac_ms,
flags);
if (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_HIGH)
end_freq = ETSI_WEATHER_RADAR_BAND_HIGH;
else
end_freq = reg_rule->end_freq;
bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
max_bw);
i++;
ath11k_reg_update_rule(regd->reg_rules + i,
ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
bw, reg_rule->ant_gain, reg_rule->reg_power,
regd->reg_rules[i].dfs_cac_ms,
flags);
if (end_freq == reg_rule->end_freq) {
regd->n_reg_rules--;
*rule_idx = i;
return;
}
bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, max_bw);
i++;
ath11k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq,
bw, reg_rule->ant_gain, reg_rule->reg_power,
regd->reg_rules[i].dfs_cac_ms,
flags);
*rule_idx = i;
}
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info, bool intersect)
{
struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
struct cur_reg_rule *reg_rule;
u8 i = 0, j = 0;
u8 num_rules;
u16 max_bw;
u32 flags;
char alpha2[3];
num_rules = reg_info->num_5g_reg_rules + reg_info->num_2g_reg_rules;
if (!num_rules)
goto ret;
/* Add max additional rules to accommodate weather radar band */
if (reg_info->dfs_region == ATH11K_DFS_REG_ETSI)
num_rules += 2;
tmp_regd = kzalloc(sizeof(*tmp_regd) +
(num_rules * sizeof(struct ieee80211_reg_rule)),
GFP_ATOMIC);
if (!tmp_regd)
goto ret;
tmp_regd->n_reg_rules = num_rules;
memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
alpha2[2] = '\0';
tmp_regd->dfs_region = ath11k_map_fw_dfs_region(reg_info->dfs_region);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\r\nCountry %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n",
alpha2, ath11k_reg_get_regdom_str(tmp_regd->dfs_region),
reg_info->dfs_region, num_rules);
/* Update reg_rules[] below. Firmware is expected to
* send these rules in order(2G rules first and then 5G)
*/
for (; i < tmp_regd->n_reg_rules; i++) {
if (reg_info->num_2g_reg_rules &&
(i < reg_info->num_2g_reg_rules)) {
reg_rule = reg_info->reg_rules_2g_ptr + i;
max_bw = min_t(u16, reg_rule->max_bw,
reg_info->max_bw_2g);
flags = 0;
} else if (reg_info->num_5g_reg_rules &&
(j < reg_info->num_5g_reg_rules)) {
reg_rule = reg_info->reg_rules_5g_ptr + j++;
max_bw = min_t(u16, reg_rule->max_bw,
reg_info->max_bw_5g);
/* FW doesn't pass NL80211_RRF_AUTO_BW flag for
* BW Auto correction, we can enable this by default
* for all 5G rules here. The regulatory core performs
* BW correction if required and applies flags as
* per other BW rule flags we pass from here
*/
flags = NL80211_RRF_AUTO_BW;
} else {
break;
}
flags |= ath11k_map_fw_reg_flags(reg_rule->flags);
ath11k_reg_update_rule(tmp_regd->reg_rules + i,
reg_rule->start_freq,
reg_rule->end_freq, max_bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
/* Update dfs cac timeout if the dfs domain is ETSI and the
* new rule covers weather radar band.
* Default value of '0' corresponds to 60s timeout, so no
* need to update that for other rules.
*/
if (flags & NL80211_RRF_DFS &&
reg_info->dfs_region == ATH11K_DFS_REG_ETSI &&
(reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_LOW &&
reg_rule->start_freq < ETSI_WEATHER_RADAR_BAND_HIGH)){
ath11k_reg_update_weather_radar_band(ab, tmp_regd,
reg_rule, &i,
flags, max_bw);
continue;
}
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, reg_rule->start_freq, reg_rule->end_freq,
max_bw, reg_rule->ant_gain, reg_rule->reg_power,
tmp_regd->reg_rules[i].dfs_cac_ms,
flags);
}
if (intersect) {
default_regd = ab->default_regd[reg_info->phy_id];
/* Get a new regd by intersecting the received regd with
* our default regd.
*/
new_regd = ath11k_regd_intersect(default_regd, tmp_regd);
kfree(tmp_regd);
if (!new_regd) {
ath11k_warn(ab, "Unable to create intersected regdomain\n");
goto ret;
}
} else {
new_regd = tmp_regd;
}
ret:
return new_regd;
}
void ath11k_regd_update_work(struct work_struct *work)
{
struct ath11k *ar = container_of(work, struct ath11k,
regd_update_work);
int ret;
ret = ath11k_regd_update(ar, false);
if (ret) {
/* Firmware has already moved to the new regd. We need
* to maintain channel consistency across FW, Host driver
* and userspace. Hence as a fallback mechanism we can set
* the prev or default country code to the firmware.
*/
/* TODO: Implement Fallback Mechanism */
}
}
void ath11k_reg_init(struct ath11k *ar)
{
ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
}
void ath11k_reg_free(struct ath11k_base *ab)
{
int i;
for (i = 0; i < MAX_RADIOS; i++) {
kfree(ab->default_regd[i]);
kfree(ab->new_regd[i]);
}
}

View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019 The Linux Foundation. All rights reserved.
*/
#ifndef ATH11K_REG_H
#define ATH11K_REG_H
#include <linux/kernel.h>
#include <net/regulatory.h>
struct ath11k_base;
struct ath11k;
/* DFS regdomains supported by Firmware */
enum ath11k_dfs_region {
ATH11K_DFS_REG_UNSET,
ATH11K_DFS_REG_FCC,
ATH11K_DFS_REG_ETSI,
ATH11K_DFS_REG_MKK,
ATH11K_DFS_REG_CN,
ATH11K_DFS_REG_KR,
ATH11K_DFS_REG_UNDEF,
};
/* ATH11K Regulatory API's */
void ath11k_reg_init(struct ath11k *ar);
void ath11k_reg_free(struct ath11k_base *ab);
void ath11k_regd_update_work(struct work_struct *work);
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info, bool intersect);
int ath11k_regd_update(struct ath11k *ar, bool init);
int ath11k_reg_update_chan_list(struct ath11k *ar);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,199 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include "testmode.h"
#include <net/netlink.h>
#include "debug.h"
#include "wmi.h"
#include "hw.h"
#include "core.h"
#include "testmode_i.h"
static const struct nla_policy ath11k_tm_policy[ATH11K_TM_ATTR_MAX + 1] = {
[ATH11K_TM_ATTR_CMD] = { .type = NLA_U32 },
[ATH11K_TM_ATTR_DATA] = { .type = NLA_BINARY,
.len = ATH11K_TM_DATA_MAX_LEN },
[ATH11K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
[ATH11K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
[ATH11K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
};
/* Returns true if callee consumes the skb and the skb should be discarded.
* Returns false if skb is not used. Does not sleep.
*/
bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb)
{
struct sk_buff *nl_skb;
bool consumed;
int ret;
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
"testmode event wmi cmd_id %d skb %pK skb->len %d\n",
cmd_id, skb, skb->len);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
spin_lock_bh(&ar->data_lock);
consumed = true;
nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
2 * sizeof(u32) + skb->len,
GFP_ATOMIC);
if (!nl_skb) {
ath11k_warn(ar->ab,
"failed to allocate skb for testmode wmi event\n");
goto out;
}
ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI);
if (ret) {
ath11k_warn(ar->ab,
"failed to to put testmode wmi event cmd attribute: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
}
ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id);
if (ret) {
ath11k_warn(ar->ab,
"failed to to put testmode wmi even cmd_id: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
}
ret = nla_put(nl_skb, ATH11K_TM_ATTR_DATA, skb->len, skb->data);
if (ret) {
ath11k_warn(ar->ab,
"failed to copy skb to testmode wmi event: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
}
cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
out:
spin_unlock_bh(&ar->data_lock);
return consumed;
}
static int ath11k_tm_cmd_get_version(struct ath11k *ar, struct nlattr *tb[])
{
struct sk_buff *skb;
int ret;
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
"testmode cmd get version_major %d version_minor %d\n",
ATH11K_TESTMODE_VERSION_MAJOR,
ATH11K_TESTMODE_VERSION_MINOR);
skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
nla_total_size(sizeof(u32)));
if (!skb)
return -ENOMEM;
ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MAJOR,
ATH11K_TESTMODE_VERSION_MAJOR);
if (ret) {
kfree_skb(skb);
return ret;
}
ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MINOR,
ATH11K_TESTMODE_VERSION_MINOR);
if (ret) {
kfree_skb(skb);
return ret;
}
return cfg80211_testmode_reply(skb);
}
static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[])
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct sk_buff *skb;
u32 cmd_id, buf_len;
int ret;
void *buf;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON) {
ret = -ENETDOWN;
goto out;
}
if (!tb[ATH11K_TM_ATTR_DATA]) {
ret = -EINVAL;
goto out;
}
if (!tb[ATH11K_TM_ATTR_WMI_CMDID]) {
ret = -EINVAL;
goto out;
}
buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
cmd_id = nla_get_u32(tb[ATH11K_TM_ATTR_WMI_CMDID]);
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
"testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
cmd_id, buf, buf_len);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", buf, buf_len);
skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, buf_len);
if (!skb) {
ret = -ENOMEM;
goto out;
}
memcpy(skb->data, buf, buf_len);
ret = ath11k_wmi_cmd_send(wmi, skb, cmd_id);
if (ret) {
dev_kfree_skb(skb);
ath11k_warn(ar->ab, "failed to transmit wmi command (testmode): %d\n",
ret);
goto out;
}
ret = 0;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len)
{
struct ath11k *ar = hw->priv;
struct nlattr *tb[ATH11K_TM_ATTR_MAX + 1];
int ret;
ret = nla_parse(tb, ATH11K_TM_ATTR_MAX, data, len, ath11k_tm_policy,
NULL);
if (ret)
return ret;
if (!tb[ATH11K_TM_ATTR_CMD])
return -EINVAL;
switch (nla_get_u32(tb[ATH11K_TM_ATTR_CMD])) {
case ATH11K_TM_CMD_GET_VERSION:
return ath11k_tm_cmd_get_version(ar, tb);
case ATH11K_TM_CMD_WMI:
return ath11k_tm_cmd_wmi(ar, tb);
default:
return -EOPNOTSUPP;
}
}

View File

@ -0,0 +1,29 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include "core.h"
#ifdef CONFIG_NL80211_TESTMODE
bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb);
int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
#else
static inline bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id,
struct sk_buff *skb)
{
return false;
}
static inline int ath11k_tm_cmd(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
void *data, int len)
{
return 0;
}
#endif

View File

@ -0,0 +1,50 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
/* "API" level of the ath11k testmode interface. Bump it after every
* incompatible interface change.
*/
#define ATH11K_TESTMODE_VERSION_MAJOR 1
/* Bump this after every _compatible_ interface change, for example
* addition of a new command or an attribute.
*/
#define ATH11K_TESTMODE_VERSION_MINOR 0
#define ATH11K_TM_DATA_MAX_LEN 5000
enum ath11k_tm_attr {
__ATH11K_TM_ATTR_INVALID = 0,
ATH11K_TM_ATTR_CMD = 1,
ATH11K_TM_ATTR_DATA = 2,
ATH11K_TM_ATTR_WMI_CMDID = 3,
ATH11K_TM_ATTR_VERSION_MAJOR = 4,
ATH11K_TM_ATTR_VERSION_MINOR = 5,
ATH11K_TM_ATTR_WMI_OP_VERSION = 6,
/* keep last */
__ATH11K_TM_ATTR_AFTER_LAST,
ATH11K_TM_ATTR_MAX = __ATH11K_TM_ATTR_AFTER_LAST - 1,
};
/* All ath11k testmode interface commands specified in
* ATH11K_TM_ATTR_CMD
*/
enum ath11k_tm_cmd {
/* Returns the supported ath11k testmode interface version in
* ATH11K_TM_ATTR_VERSION. Always guaranteed to work. User space
* uses this to verify it's using the correct version of the
* testmode interface
*/
ATH11K_TM_CMD_GET_VERSION = 0,
/* The command used to transmit a WMI command to the firmware and
* the event to receive WMI events from the firmware. Without
* struct wmi_cmd_hdr header, only the WMI payload. Command id is
* provided with ATH11K_TM_ATTR_WMI_CMDID and payload in
* ATH11K_TM_ATTR_DATA.
*/
ATH11K_TM_CMD_WMI = 1,
};

View File

@ -0,0 +1,9 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019 The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#define CREATE_TRACE_POINTS
#include "trace.h"

View File

@ -0,0 +1,113 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019 The Linux Foundation. All rights reserved.
*/
#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#include <linux/tracepoint.h>
#include "core.h"
#define _TRACE_H_
/* create empty functions when tracing is disabled */
#if !defined(CONFIG_ATH11K_TRACING)
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
static inline void trace_ ## name(proto) {}
#endif /* !CONFIG_ATH11K_TRACING || __CHECKER__ */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ath11k
TRACE_EVENT(ath11k_htt_pktlog,
TP_PROTO(struct ath11k *ar, const void *buf, u16 buf_len),
TP_ARGS(ar, buf, buf_len),
TP_STRUCT__entry(
__string(device, dev_name(ar->ab->dev))
__string(driver, dev_driver_string(ar->ab->dev))
__field(u16, buf_len)
__dynamic_array(u8, pktlog, buf_len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->ab->dev));
__assign_str(driver, dev_driver_string(ar->ab->dev));
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(pktlog), buf, buf_len);
),
TP_printk(
"%s %s size %hu",
__get_str(driver),
__get_str(device),
__entry->buf_len
)
);
TRACE_EVENT(ath11k_htt_ppdu_stats,
TP_PROTO(struct ath11k *ar, const void *data, size_t len),
TP_ARGS(ar, data, len),
TP_STRUCT__entry(
__string(device, dev_name(ar->ab->dev))
__string(driver, dev_driver_string(ar->ab->dev))
__field(u16, len)
__dynamic_array(u8, ppdu, len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->ab->dev));
__assign_str(driver, dev_driver_string(ar->ab->dev));
__entry->len = len;
memcpy(__get_dynamic_array(ppdu), data, len);
),
TP_printk(
"%s %s ppdu len %d",
__get_str(driver),
__get_str(device),
__entry->len
)
);
TRACE_EVENT(ath11k_htt_rxdesc,
TP_PROTO(struct ath11k *ar, const void *data, size_t len),
TP_ARGS(ar, data, len),
TP_STRUCT__entry(
__string(device, dev_name(ar->ab->dev))
__string(driver, dev_driver_string(ar->ab->dev))
__field(u16, len)
__dynamic_array(u8, rxdesc, len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->ab->dev));
__assign_str(driver, dev_driver_string(ar->ab->dev));
__entry->len = len;
memcpy(__get_dynamic_array(rxdesc), data, len);
),
TP_printk(
"%s %s rxdesc len %d",
__get_str(driver),
__get_str(device),
__entry->len
)
);
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
/* This part must be outside protection */
#include <trace/define_trace.h>

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -666,14 +666,14 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
/*
* Some users have reported their EEPROM programmed with
* 0x8000 set, this is not a supported regulatory domain
* but since we have more than one user with it we need
* a solution for them. We default to 0x64, which is the
* default Atheros world regulatory domain.
* 0x8000 or 0x0 set, this is not a supported regulatory
* domain but since we have more than one user with it we
* need a solution for them. We default to 0x64, which is
* the default Atheros world regulatory domain.
*/
static void ath_regd_sanitize(struct ath_regulatory *reg)
{
if (reg->current_rd != COUNTRY_ERD_FLAG)
if (reg->current_rd != COUNTRY_ERD_FLAG && reg->current_rd != 0)
return;
printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n");
reg->current_rd = 0x64;

View File

@ -869,6 +869,7 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
u8 data_offset;
struct wil_rx_status_extended *s;
u16 sring_idx = sring - wil->srings;
int invalid_buff_id_retry;
BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
@ -882,9 +883,9 @@ again:
/* Extract the buffer ID from the status message */
buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
invalid_buff_id_retry = 0;
while (!buff_id) {
struct wil_rx_status_extended *s;
int invalid_buff_id_retry = 0;
wil_dbg_txrx(wil,
"buff_id is not updated yet by HW, (swhead 0x%x)\n",