Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git

ath.git patches for v5.9. Major changes:

ath11k

* add 6G band support

* add spectral scan support
This commit is contained in:
Kalle Valo 2020-06-15 20:18:39 +03:00
commit f5f58a0b1e
25 changed files with 2844 additions and 125 deletions

View File

@ -15,11 +15,11 @@ config WLAN_VENDOR_ATH
For more information and documentation on this module you can visit:
http://wireless.kernel.org/en/users/Drivers/ath
https://wireless.wiki.kernel.org/en/users/Drivers/ath
For information on all Atheros wireless drivers visit:
http://wireless.kernel.org/en/users/Drivers/Atheros
https://wireless.wiki.kernel.org/en/users/Drivers/Atheros
if WLAN_VENDOR_ATH

View File

@ -1591,7 +1591,9 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
err_unmap_msdu:
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
err:
return res;
}
@ -1798,7 +1800,9 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
err_unmap_msdu:
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
err:
return res;
}

View File

@ -34,3 +34,12 @@ config ATH11K_TRACING
depends on ATH11K && EVENT_TRACING
help
Select this to use ath11k tracing infrastructure.
config ATH11K_SPECTRAL
bool "QCA ath11k spectral scan support"
depends on ATH11K_DEBUGFS
depends on RELAY
help
Enable ath11k spectral scan support
Say Y to enable access to the FFT/spectral data via debugfs.

View File

@ -15,12 +15,14 @@ ath11k-y += core.o \
dp_rx.o \
debug.o \
ce.o \
peer.o
peer.o \
dbring.o
ath11k-$(CONFIG_ATH11K_DEBUGFS) += debug_htt_stats.o debugfs_sta.o
ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
ath11k-$(CONFIG_THERMAL) += thermal.o
ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)

View File

@ -400,8 +400,16 @@ static int ath11k_core_pdev_create(struct ath11k_base *ab)
goto err_dp_pdev_free;
}
ret = ath11k_spectral_init(ab);
if (ret) {
ath11k_err(ab, "failed to init spectral %d\n", ret);
goto err_thermal_unregister;
}
return 0;
err_thermal_unregister:
ath11k_thermal_unregister(ab);
err_dp_pdev_free:
ath11k_dp_pdev_free(ab);
err_mac_unregister:
@ -414,6 +422,7 @@ err_pdev_debug:
static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
{
ath11k_spectral_deinit(ab);
ath11k_thermal_unregister(ab);
ath11k_mac_unregister(ab);
ath11k_hif_irq_disable(ab);
@ -582,6 +591,7 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
ath11k_thermal_unregister(ab);
ath11k_hif_irq_disable(ab);
ath11k_dp_pdev_free(ab);
ath11k_spectral_deinit(ab);
ath11k_hif_stop(ab);
ath11k_wmi_detach(ab);
ath11k_dp_pdev_reo_cleanup(ab);

View File

@ -21,6 +21,8 @@
#include "hal_rx.h"
#include "reg.h"
#include "thermal.h"
#include "dbring.h"
#include "spectral.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@ -215,12 +217,15 @@ struct ath11k_vif {
bool is_started;
bool is_up;
bool spectral_enabled;
u32 aid;
u8 bssid[ETH_ALEN];
struct cfg80211_bitrate_mask bitrate_mask;
int num_legacy_stations;
int rtscts_prot_mode;
int txpower;
bool rsnie_present;
bool wpaie_present;
};
struct ath11k_vif_iter {
@ -353,7 +358,10 @@ struct ath11k_sta {
#endif
};
#define ATH11K_NUM_CHANS 41
#define ATH11K_MIN_5G_FREQ 4150
#define ATH11K_MIN_6G_FREQ 5945
#define ATH11K_MAX_6G_FREQ 7115
#define ATH11K_NUM_CHANS 100
#define ATH11K_MAX_5G_CHAN 173
enum ath11k_state {
@ -431,6 +439,7 @@ struct ath11k {
u32 vht_cap_info;
struct ath11k_he ar_he;
enum ath11k_state state;
bool supports_6ghz;
struct {
struct completion started;
struct completion completed;
@ -536,6 +545,9 @@ struct ath11k {
u32 cached_ppdu_id;
#ifdef CONFIG_ATH11K_DEBUGFS
struct ath11k_debug debug;
#endif
#ifdef CONFIG_ATH11K_SPECTRAL
struct ath11k_spectral spectral;
#endif
bool dfs_block_radar_events;
struct ath11k_thermal thermal;
@ -548,6 +560,7 @@ struct ath11k_band_cap {
u32 he_mcs;
u32 he_cap_phy_info[PSOC_HOST_MAX_PHY_SIZE];
struct ath11k_ppe_threshold he_ppet;
u16 he_6ghz_capa;
};
struct ath11k_pdev_cap {
@ -579,12 +592,42 @@ struct ath11k_board_data {
/* IPQ8074 HW channel counters frequency value in hertz */
#define IPQ8074_CC_FREQ_HERTZ 320000
struct ath11k_soc_dp_rx_stats {
struct ath11k_bp_stats {
/* Head Pointer reported by the last HTT Backpressure event for the ring */
u16 hp;
/* Tail Pointer reported by the last HTT Backpressure event for the ring */
u16 tp;
/* Number of Backpressure events received for the ring */
u32 count;
/* Last recorded event timestamp */
unsigned long jiffies;
};
struct ath11k_dp_ring_bp_stats {
struct ath11k_bp_stats umac_ring_bp_stats[HTT_SW_UMAC_RING_IDX_MAX];
struct ath11k_bp_stats lmac_ring_bp_stats[HTT_SW_LMAC_RING_IDX_MAX][MAX_RADIOS];
};
struct ath11k_soc_dp_tx_err_stats {
/* TCL Ring Descriptor unavailable */
u32 desc_na[DP_TCL_NUM_RING_MAX];
/* Other failures during dp_tx due to mem allocation failure
* idr unavailable etc.
*/
atomic_t misc_fail;
};
struct ath11k_soc_dp_stats {
u32 err_ring_pkts;
u32 invalid_rbm;
u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
u32 hal_reo_error[DP_REO_DST_RING_MAX];
struct ath11k_soc_dp_tx_err_stats tx_err;
struct ath11k_dp_ring_bp_stats bp_stats;
};
/* Master structure to hold the hw data which may be used in core module */
@ -653,7 +696,7 @@ struct ath11k_base {
struct dentry *debugfs_soc;
struct dentry *debugfs_ath11k;
#endif
struct ath11k_soc_dp_rx_stats soc_stats;
struct ath11k_soc_dp_stats soc_stats;
unsigned long dev_flags;
struct completion driver_recovery;
@ -668,6 +711,9 @@ struct ath11k_base {
/* Round robbin based TCL ring selector */
atomic_t tcl_ring_selector;
struct ath11k_dbring_cap *db_caps;
u32 num_db_cap;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};

View File

@ -0,0 +1,356 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
*/
#include "core.h"
#include "debug.h"
static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
struct ath11k_dbring *ring,
struct ath11k_dbring_element *buff,
gfp_t gfp)
{
struct ath11k_base *ab = ar->ab;
struct hal_srng *srng;
dma_addr_t paddr;
void *ptr_aligned, *ptr_unaligned, *desc;
int ret;
int buf_id;
u32 cookie;
srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
lockdep_assert_held(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
ptr_unaligned = buff->payload;
ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
DMA_FROM_DEVICE);
ret = dma_mapping_error(ab->dev, paddr);
if (ret)
goto err;
spin_lock_bh(&ring->idr_lock);
buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, gfp);
spin_unlock_bh(&ring->idr_lock);
if (buf_id < 0) {
ret = -ENOBUFS;
goto err_dma_unmap;
}
desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
if (!desc) {
ret = -ENOENT;
goto err_idr_remove;
}
buff->paddr = paddr;
cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) |
FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
ath11k_hal_srng_access_end(ab, srng);
return 0;
err_idr_remove:
spin_lock_bh(&ring->idr_lock);
idr_remove(&ring->bufs_idr, buf_id);
spin_unlock_bh(&ring->idr_lock);
err_dma_unmap:
dma_unmap_single(ab->dev, paddr, ring->buf_sz,
DMA_FROM_DEVICE);
err:
ath11k_hal_srng_access_end(ab, srng);
return ret;
}
static int ath11k_dbring_fill_bufs(struct ath11k *ar,
struct ath11k_dbring *ring,
gfp_t gfp)
{
struct ath11k_dbring_element *buff;
struct hal_srng *srng;
int num_remain, req_entries, num_free;
u32 align;
int size, ret;
srng = &ar->ab->hal.srng_list[ring->refill_srng.ring_id];
spin_lock_bh(&srng->lock);
num_free = ath11k_hal_srng_src_num_free(ar->ab, srng, true);
req_entries = min(num_free, ring->bufs_max);
num_remain = req_entries;
align = ring->buf_align;
size = sizeof(*buff) + ring->buf_sz + align - 1;
while (num_remain > 0) {
buff = kzalloc(size, gfp);
if (!buff)
break;
ret = ath11k_dbring_bufs_replenish(ar, ring, buff, gfp);
if (ret) {
ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
num_remain, req_entries);
kfree(buff);
break;
}
num_remain--;
}
spin_unlock_bh(&srng->lock);
return num_remain;
}
int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
struct ath11k_dbring *ring,
enum wmi_direct_buffer_module id)
{
struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {0};
int ret;
if (id >= WMI_DIRECT_BUF_MAX)
return -EINVAL;
param.pdev_id = DP_SW2HW_MACID(ring->pdev_id);
param.module_id = id;
param.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr);
param.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr);
param.head_idx_paddr_lo = lower_32_bits(ring->hp_addr);
param.head_idx_paddr_hi = upper_32_bits(ring->hp_addr);
param.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr);
param.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr);
param.num_elems = ring->bufs_max;
param.buf_size = ring->buf_sz;
param.num_resp_per_event = ring->num_resp_per_event;
param.event_timeout_ms = ring->event_timeout_ms;
ret = ath11k_wmi_pdev_dma_ring_cfg(ar, &param);
if (ret) {
ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
return ret;
}
return 0;
}
int ath11k_dbring_set_cfg(struct ath11k *ar, struct ath11k_dbring *ring,
u32 num_resp_per_event, u32 event_timeout_ms,
int (*handler)(struct ath11k *,
struct ath11k_dbring_data *))
{
if (WARN_ON(!ring))
return -EINVAL;
ring->num_resp_per_event = num_resp_per_event;
ring->event_timeout_ms = event_timeout_ms;
ring->handler = handler;
return 0;
}
int ath11k_dbring_buf_setup(struct ath11k *ar,
struct ath11k_dbring *ring,
struct ath11k_dbring_cap *db_cap)
{
struct ath11k_base *ab = ar->ab;
struct hal_srng *srng;
int ret;
srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
ring->bufs_max = ring->refill_srng.size /
ath11k_hal_srng_get_entrysize(HAL_RXDMA_DIR_BUF);
ring->buf_sz = db_cap->min_buf_sz;
ring->buf_align = db_cap->min_buf_align;
ring->pdev_id = db_cap->pdev_id;
ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
ret = ath11k_dbring_fill_bufs(ar, ring, GFP_KERNEL);
return ret;
}
int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring,
int ring_num, int num_entries)
{
int ret;
ret = ath11k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF,
ring_num, ar->pdev_idx, num_entries);
if (ret < 0) {
ath11k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n",
ret, ring_num);
goto err;
}
return 0;
err:
ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
return ret;
}
int ath11k_dbring_get_cap(struct ath11k_base *ab,
u8 pdev_idx,
enum wmi_direct_buffer_module id,
struct ath11k_dbring_cap *db_cap)
{
int i;
if (!ab->num_db_cap || !ab->db_caps)
return -ENOENT;
if (id >= WMI_DIRECT_BUF_MAX)
return -EINVAL;
for (i = 0; i < ab->num_db_cap; i++) {
if (pdev_idx == ab->db_caps[i].pdev_id &&
id == ab->db_caps[i].id) {
*db_cap = ab->db_caps[i];
return 0;
}
}
return -ENOENT;
}
int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
struct ath11k_dbring_buf_release_event *ev)
{
struct ath11k_dbring *ring;
struct hal_srng *srng;
struct ath11k *ar;
struct ath11k_dbring_element *buff;
struct ath11k_dbring_data handler_data;
struct ath11k_buffer_addr desc;
u8 *vaddr_unalign;
u32 num_entry, num_buff_reaped;
u8 pdev_idx, rbm;
u32 cookie;
int buf_id;
int size;
dma_addr_t paddr;
int ret = 0;
pdev_idx = ev->fixed.pdev_id;
if (pdev_idx >= ab->num_radios) {
ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
return -EINVAL;
}
if (ev->fixed.num_buf_release_entry !=
ev->fixed.num_meta_data_entry) {
ath11k_warn(ab, "Buffer entry %d mismatch meta entry %d\n",
ev->fixed.num_buf_release_entry,
ev->fixed.num_meta_data_entry);
return -EINVAL;
}
ar = ab->pdevs[pdev_idx].ar;
rcu_read_lock();
if (!rcu_dereference(ab->pdevs_active[pdev_idx])) {
ret = -EINVAL;
goto rcu_unlock;
}
switch (ev->fixed.module_id) {
case WMI_DIRECT_BUF_SPECTRAL:
ring = ath11k_spectral_get_dbring(ar);
break;
default:
ring = NULL;
ath11k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n",
ev->fixed.module_id);
break;
}
if (!ring) {
ret = -EINVAL;
goto rcu_unlock;
}
srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
num_entry = ev->fixed.num_buf_release_entry;
size = sizeof(*buff) + ring->buf_sz + ring->buf_align - 1;
num_buff_reaped = 0;
spin_lock_bh(&srng->lock);
while (num_buff_reaped < num_entry) {
desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo;
desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi;
handler_data.meta = ev->meta_data[num_buff_reaped];
num_buff_reaped++;
ath11k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
spin_lock_bh(&ring->idr_lock);
buff = idr_find(&ring->bufs_idr, buf_id);
if (!buff) {
spin_unlock_bh(&ring->idr_lock);
continue;
}
idr_remove(&ring->bufs_idr, buf_id);
spin_unlock_bh(&ring->idr_lock);
dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
DMA_FROM_DEVICE);
if (ring->handler) {
vaddr_unalign = buff->payload;
handler_data.data = PTR_ALIGN(vaddr_unalign,
ring->buf_align);
handler_data.data_sz = ring->buf_sz;
ring->handler(ar, &handler_data);
}
memset(buff, 0, size);
ath11k_dbring_bufs_replenish(ar, ring, buff, GFP_ATOMIC);
}
spin_unlock_bh(&srng->lock);
rcu_unlock:
rcu_read_unlock();
return ret;
}
void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
{
ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
}
void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
{
struct ath11k_dbring_element *buff;
int buf_id;
spin_lock_bh(&ring->idr_lock);
idr_for_each_entry(&ring->bufs_idr, buff, buf_id) {
idr_remove(&ring->bufs_idr, buf_id);
dma_unmap_single(ar->ab->dev, buff->paddr,
ring->buf_sz, DMA_FROM_DEVICE);
kfree(buff);
}
idr_destroy(&ring->bufs_idr);
spin_unlock_bh(&ring->idr_lock);
}

View File

@ -0,0 +1,79 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
*/
#ifndef ATH11K_DBRING_H
#define ATH11K_DBRING_H
#include <linux/types.h>
#include <linux/idr.h>
#include <linux/spinlock.h>
#include "dp.h"
struct ath11k_dbring_element {
dma_addr_t paddr;
u8 payload[0];
};
struct ath11k_dbring_data {
void *data;
u32 data_sz;
struct wmi_dma_buf_release_meta_data meta;
};
struct ath11k_dbring_buf_release_event {
struct ath11k_wmi_dma_buf_release_fixed_param fixed;
struct wmi_dma_buf_release_entry *buf_entry;
struct wmi_dma_buf_release_meta_data *meta_data;
u32 num_buf_entry;
u32 num_meta;
};
struct ath11k_dbring_cap {
u32 pdev_id;
enum wmi_direct_buffer_module id;
u32 min_elem;
u32 min_buf_sz;
u32 min_buf_align;
};
struct ath11k_dbring {
struct dp_srng refill_srng;
struct idr bufs_idr;
/* Protects bufs_idr */
spinlock_t idr_lock;
dma_addr_t tp_addr;
dma_addr_t hp_addr;
int bufs_max;
u32 pdev_id;
u32 buf_sz;
u32 buf_align;
u32 num_resp_per_event;
u32 event_timeout_ms;
int (*handler)(struct ath11k *, struct ath11k_dbring_data *);
};
int ath11k_dbring_set_cfg(struct ath11k *ar,
struct ath11k_dbring *ring,
u32 num_resp_per_event,
u32 event_timeout_ms,
int (*handler)(struct ath11k *,
struct ath11k_dbring_data *));
int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
struct ath11k_dbring *ring,
enum wmi_direct_buffer_module id);
int ath11k_dbring_buf_setup(struct ath11k *ar,
struct ath11k_dbring *ring,
struct ath11k_dbring_cap *db_cap);
int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring,
int ring_num, int num_entries);
int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
struct ath11k_dbring_buf_release_event *ev);
int ath11k_dbring_get_cap(struct ath11k_base *ab,
u8 pdev_idx,
enum wmi_direct_buffer_module id,
struct ath11k_dbring_cap *db_cap);
void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring);
void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring);
#endif /* ATH11K_DBRING_H */

View File

@ -12,6 +12,43 @@
#include "debug_htt_stats.h"
#include "peer.h"
static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = {
"REO2SW1_RING",
"REO2SW2_RING",
"REO2SW3_RING",
"REO2SW4_RING",
"WBM2REO_LINK_RING",
"REO2TCL_RING",
"REO2FW_RING",
"RELEASE_RING",
"PPE_RELEASE_RING",
"TCL2TQM_RING",
"TQM_RELEASE_RING",
"REO_RELEASE_RING",
"WBM2SW0_RELEASE_RING",
"WBM2SW1_RELEASE_RING",
"WBM2SW2_RELEASE_RING",
"WBM2SW3_RELEASE_RING",
"REO_CMD_RING",
"REO_STATUS_RING",
};
static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = {
"FW2RXDMA_BUF_RING",
"FW2RXDMA_STATUS_RING",
"FW2RXDMA_LINK_RING",
"SW2RXDMA_BUF_RING",
"WBM2RXDMA_LINK_RING",
"RXDMA2FW_RING",
"RXDMA2SW_RING",
"RXDMA2RELEASE_RING",
"RXDMA2REO_RING",
"MONITOR_STATUS_RING",
"MONITOR_BUF_RING",
"MONITOR_DESC_RING",
"MONITOR_DEST_RING",
};
void ath11k_info(struct ath11k_base *ab, const char *fmt, ...)
{
struct va_format vaf = {
@ -739,12 +776,78 @@ static const struct file_operations fops_extd_rx_stats = {
.open = simple_open,
};
static ssize_t ath11k_debug_dump_soc_rx_stats(struct file *file,
static int ath11k_fill_bp_stats(struct ath11k_base *ab,
struct ath11k_bp_stats *bp_stats,
char *buf, int len, int size)
{
lockdep_assert_held(&ab->base_lock);
len += scnprintf(buf + len, size - len, "count: %u\n",
bp_stats->count);
len += scnprintf(buf + len, size - len, "hp: %u\n",
bp_stats->hp);
len += scnprintf(buf + len, size - len, "tp: %u\n",
bp_stats->tp);
len += scnprintf(buf + len, size - len, "seen before: %ums\n\n",
jiffies_to_msecs(jiffies - bp_stats->jiffies));
return len;
}
static ssize_t ath11k_debug_dump_soc_ring_bp_stats(struct ath11k_base *ab,
char *buf, int size)
{
struct ath11k_bp_stats *bp_stats;
bool stats_rxd = false;
u8 i, pdev_idx;
int len = 0;
len += scnprintf(buf + len, size - len, "\nBackpressure Stats\n");
len += scnprintf(buf + len, size - len, "==================\n");
spin_lock_bh(&ab->base_lock);
for (i = 0; i < HTT_SW_UMAC_RING_IDX_MAX; i++) {
bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[i];
if (!bp_stats->count)
continue;
len += scnprintf(buf + len, size - len, "Ring: %s\n",
htt_bp_umac_ring[i]);
len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size);
stats_rxd = true;
}
for (i = 0; i < HTT_SW_LMAC_RING_IDX_MAX; i++) {
for (pdev_idx = 0; pdev_idx < MAX_RADIOS; pdev_idx++) {
bp_stats =
&ab->soc_stats.bp_stats.lmac_ring_bp_stats[i][pdev_idx];
if (!bp_stats->count)
continue;
len += scnprintf(buf + len, size - len, "Ring: %s\n",
htt_bp_lmac_ring[i]);
len += scnprintf(buf + len, size - len, "pdev: %d\n",
pdev_idx);
len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size);
stats_rxd = true;
}
}
spin_unlock_bh(&ab->base_lock);
if (!stats_rxd)
len += scnprintf(buf + len, size - len,
"No Ring Backpressure stats received\n\n");
return len;
}
static ssize_t ath11k_debug_dump_soc_dp_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k_base *ab = file->private_data;
struct ath11k_soc_dp_rx_stats *soc_stats = &ab->soc_stats;
struct ath11k_soc_dp_stats *soc_stats = &ab->soc_stats;
int len = 0, i, retval;
const int size = 4096;
static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = {
@ -788,6 +891,19 @@ static ssize_t ath11k_debug_dump_soc_rx_stats(struct file *file,
soc_stats->hal_reo_error[2],
soc_stats->hal_reo_error[3]);
len += scnprintf(buf + len, size - len, "\nSOC TX STATS:\n");
len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n");
for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
len += scnprintf(buf + len, size - len, "ring%d: %u\n",
i, soc_stats->tx_err.desc_na[i]);
len += scnprintf(buf + len, size - len,
"\nMisc Transmit Failures: %d\n",
atomic_read(&soc_stats->tx_err.misc_fail));
len += ath11k_debug_dump_soc_ring_bp_stats(ab, buf + len, size - len);
if (len > size)
len = size;
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
@ -796,8 +912,8 @@ static ssize_t ath11k_debug_dump_soc_rx_stats(struct file *file,
return retval;
}
static const struct file_operations fops_soc_rx_stats = {
.read = ath11k_debug_dump_soc_rx_stats,
static const struct file_operations fops_soc_dp_stats = {
.read = ath11k_debug_dump_soc_dp_stats,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
@ -819,8 +935,8 @@ int ath11k_debug_pdev_create(struct ath11k_base *ab)
debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
&fops_simulate_fw_crash);
debugfs_create_file("soc_rx_stats", 0600, ab->debugfs_soc, ab,
&fops_soc_rx_stats);
debugfs_create_file("soc_dp_stats", 0600, ab->debugfs_soc, ab,
&fops_soc_dp_stats);
return 0;
}

View File

@ -172,11 +172,12 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
case HAL_RXDMA_DST:
case HAL_RXDMA_MONITOR_DST:
case HAL_RXDMA_MONITOR_DESC:
case HAL_RXDMA_DIR_BUF:
params.intr_batch_cntr_thres_entries =
HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
break;
case HAL_RXDMA_DIR_BUF:
break;
default:
ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
return -EINVAL;

View File

@ -999,6 +999,48 @@ struct htt_resp_msg {
#define HTT_BACKPRESSURE_EVENT_HP_M GENMASK(15, 0)
#define HTT_BACKPRESSURE_EVENT_TP_M GENMASK(31, 16)
#define HTT_BACKPRESSURE_UMAC_RING_TYPE 0
#define HTT_BACKPRESSURE_LMAC_RING_TYPE 1
enum htt_backpressure_umac_ringid {
HTT_SW_RING_IDX_REO_REO2SW1_RING,
HTT_SW_RING_IDX_REO_REO2SW2_RING,
HTT_SW_RING_IDX_REO_REO2SW3_RING,
HTT_SW_RING_IDX_REO_REO2SW4_RING,
HTT_SW_RING_IDX_REO_WBM2REO_LINK_RING,
HTT_SW_RING_IDX_REO_REO2TCL_RING,
HTT_SW_RING_IDX_REO_REO2FW_RING,
HTT_SW_RING_IDX_REO_REO_RELEASE_RING,
HTT_SW_RING_IDX_WBM_PPE_RELEASE_RING,
HTT_SW_RING_IDX_TCL_TCL2TQM_RING,
HTT_SW_RING_IDX_WBM_TQM_RELEASE_RING,
HTT_SW_RING_IDX_WBM_REO_RELEASE_RING,
HTT_SW_RING_IDX_WBM_WBM2SW0_RELEASE_RING,
HTT_SW_RING_IDX_WBM_WBM2SW1_RELEASE_RING,
HTT_SW_RING_IDX_WBM_WBM2SW2_RELEASE_RING,
HTT_SW_RING_IDX_WBM_WBM2SW3_RELEASE_RING,
HTT_SW_RING_IDX_REO_REO_CMD_RING,
HTT_SW_RING_IDX_REO_REO_STATUS_RING,
HTT_SW_UMAC_RING_IDX_MAX,
};
enum htt_backpressure_lmac_ringid {
HTT_SW_RING_IDX_FW2RXDMA_BUF_RING,
HTT_SW_RING_IDX_FW2RXDMA_STATUS_RING,
HTT_SW_RING_IDX_FW2RXDMA_LINK_RING,
HTT_SW_RING_IDX_SW2RXDMA_BUF_RING,
HTT_SW_RING_IDX_WBM2RXDMA_LINK_RING,
HTT_SW_RING_IDX_RXDMA2FW_RING,
HTT_SW_RING_IDX_RXDMA2SW_RING,
HTT_SW_RING_IDX_RXDMA2RELEASE_RING,
HTT_SW_RING_IDX_RXDMA2REO_RING,
HTT_SW_RING_IDX_MONITOR_STATUS_RING,
HTT_SW_RING_IDX_MONITOR_BUF_RING,
HTT_SW_RING_IDX_MONITOR_DESC_RING,
HTT_SW_RING_IDX_MONITOR_DEST_RING,
HTT_SW_LMAC_RING_IDX_MAX,
};
/* ppdu stats
*
* @details

View File

@ -653,10 +653,8 @@ static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
spin_lock_bh(&dp->reo_cmd_lock);
list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
dp->reo_cmd_cache_flush_count++;
spin_unlock_bh(&dp->reo_cmd_lock);
/* Flush and invalidate aged REO desc from HW cache */
spin_lock_bh(&dp->reo_cmd_lock);
list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
list) {
if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
@ -1503,9 +1501,10 @@ static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
struct sk_buff *skb)
{
u32 *data = (u32 *)skb->data;
u8 pdev_id, ring_type, ring_id;
u8 pdev_id, ring_type, ring_id, pdev_idx;
u16 hp, tp;
u32 backpressure_time;
struct ath11k_bp_stats *bp_stats;
pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
@ -1520,6 +1519,31 @@ static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)
return;
bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];
} else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {
pdev_idx = DP_HW2SW_MACID(pdev_id);
if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)
return;
bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
} else {
ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",
ring_type);
return;
}
spin_lock_bh(&ab->base_lock);
bp_stats->hp = hp;
bp_stats->tp = tp;
bp_stats->count++;
bp_stats->jiffies = jiffies;
spin_unlock_bh(&ab->base_lock);
}
void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
@ -2162,6 +2186,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
struct ieee80211_rx_status *rx_status)
{
u8 channel_num;
u32 center_freq;
rx_status->freq = 0;
rx_status->rate_idx = 0;
@ -2172,8 +2197,11 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc);
center_freq = ath11k_dp_rx_h_msdu_start_freq(rx_desc) >> 16;
if (channel_num >= 1 && channel_num <= 14) {
if (center_freq >= 5935 && center_freq <= 7105) {
rx_status->band = NL80211_BAND_6GHZ;
} else if (channel_num >= 1 && channel_num <= 14) {
rx_status->band = NL80211_BAND_2GHZ;
} else if (channel_num >= 36 && channel_num <= 173) {
rx_status->band = NL80211_BAND_5GHZ;

View File

@ -121,8 +121,10 @@ tcl_ring_sel:
spin_unlock_bh(&tx_ring->tx_idr_lock);
if (ret < 0) {
if (ring_map == (BIT(DP_TCL_NUM_RING_MAX) - 1))
if (ring_map == (BIT(DP_TCL_NUM_RING_MAX) - 1)) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
return -ENOSPC;
}
/* Check if the next ring is available */
ring_selector++;
@ -180,11 +182,13 @@ tcl_ring_sel:
default:
/* TODO: Take care of other encap modes as well */
ret = -EINVAL;
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
goto fail_remove_idr;
}
ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, ti.paddr)) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
ret = -ENOMEM;
goto fail_remove_idr;
@ -208,6 +212,7 @@ tcl_ring_sel:
* desc because the desc is directly enqueued onto hw queue.
*/
ath11k_hal_srng_access_end(ab, tcl_ring);
ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
spin_unlock_bh(&tcl_ring->lock);
ret = -ENOMEM;

View File

@ -33,6 +33,15 @@
.max_power = 30, \
}
#define CHAN6G(_channel, _freq, _flags) { \
.band = NL80211_BAND_6GHZ, \
.hw_value = (_channel), \
.center_freq = (_freq), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 30, \
}
/* frame mode values are mapped as per enum ath11k_hw_txrx_mode */
static unsigned int ath11k_frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI;
module_param_named(frame_mode, ath11k_frame_mode, uint, 0644);
@ -86,6 +95,68 @@ static const struct ieee80211_channel ath11k_5ghz_channels[] = {
CHAN5G(173, 5865, 0),
};
static const struct ieee80211_channel ath11k_6ghz_channels[] = {
CHAN6G(1, 5955, 0),
CHAN6G(5, 5975, 0),
CHAN6G(9, 5995, 0),
CHAN6G(13, 6015, 0),
CHAN6G(17, 6035, 0),
CHAN6G(21, 6055, 0),
CHAN6G(25, 6075, 0),
CHAN6G(29, 6095, 0),
CHAN6G(33, 6115, 0),
CHAN6G(37, 6135, 0),
CHAN6G(41, 6155, 0),
CHAN6G(45, 6175, 0),
CHAN6G(49, 6195, 0),
CHAN6G(53, 6215, 0),
CHAN6G(57, 6235, 0),
CHAN6G(61, 6255, 0),
CHAN6G(65, 6275, 0),
CHAN6G(69, 6295, 0),
CHAN6G(73, 6315, 0),
CHAN6G(77, 6335, 0),
CHAN6G(81, 6355, 0),
CHAN6G(85, 6375, 0),
CHAN6G(89, 6395, 0),
CHAN6G(93, 6415, 0),
CHAN6G(97, 6435, 0),
CHAN6G(101, 6455, 0),
CHAN6G(105, 6475, 0),
CHAN6G(109, 6495, 0),
CHAN6G(113, 6515, 0),
CHAN6G(117, 6535, 0),
CHAN6G(121, 6555, 0),
CHAN6G(125, 6575, 0),
CHAN6G(129, 6595, 0),
CHAN6G(133, 6615, 0),
CHAN6G(137, 6635, 0),
CHAN6G(141, 6655, 0),
CHAN6G(145, 6675, 0),
CHAN6G(149, 6695, 0),
CHAN6G(153, 6715, 0),
CHAN6G(157, 6735, 0),
CHAN6G(161, 6755, 0),
CHAN6G(165, 6775, 0),
CHAN6G(169, 6795, 0),
CHAN6G(173, 6815, 0),
CHAN6G(177, 6835, 0),
CHAN6G(181, 6855, 0),
CHAN6G(185, 6875, 0),
CHAN6G(189, 6895, 0),
CHAN6G(193, 6915, 0),
CHAN6G(197, 6935, 0),
CHAN6G(201, 6955, 0),
CHAN6G(205, 6975, 0),
CHAN6G(209, 6995, 0),
CHAN6G(213, 7015, 0),
CHAN6G(217, 7035, 0),
CHAN6G(221, 7055, 0),
CHAN6G(225, 7075, 0),
CHAN6G(229, 7095, 0),
CHAN6G(233, 7115, 0),
};
static struct ieee80211_rate ath11k_legacy_rates[] = {
{ .bitrate = 10,
.hw_value = ATH11K_HW_RATE_CCK_LP_1M },
@ -134,6 +205,17 @@ ath11k_phymodes[NUM_NL80211_BANDS][ATH11K_CHAN_WIDTH_NUM] = {
[NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
[NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
},
[NL80211_BAND_6GHZ] = {
[NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20,
[NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20,
[NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40,
[NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80,
[NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
[NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
},
};
const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default = {
@ -698,6 +780,8 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_mutable_offsets offs = {};
struct sk_buff *bcn;
struct ieee80211_mgmt *mgmt;
u8 *ies;
int ret;
if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
@ -709,6 +793,17 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
return -EPERM;
}
ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
ies += sizeof(mgmt->u.beacon);
if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies)))
arvif->rsnie_present = true;
if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPA,
ies, (skb_tail_pointer(bcn) - ies)))
arvif->wpaie_present = true;
ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
kfree_skb(bcn);
@ -798,6 +893,7 @@ static void ath11k_peer_assoc_h_crypto(struct ath11k *ar,
struct ieee80211_bss_conf *info = &vif->bss_conf;
struct cfg80211_chan_def def;
struct cfg80211_bss *bss;
struct ath11k_vif *arvif = (struct ath11k_vif *)vif->drv_priv;
const u8 *rsnie = NULL;
const u8 *wpaie = NULL;
@ -808,7 +904,12 @@ static void ath11k_peer_assoc_h_crypto(struct ath11k *ar,
bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
if (bss) {
if (arvif->rsnie_present || arvif->wpaie_present) {
arg->need_ptk_4_way = true;
if (arvif->wpaie_present)
arg->need_gtk_2_way = true;
} else if (bss) {
const struct cfg80211_bss_ies *ies;
rcu_read_lock();
@ -1489,6 +1590,7 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
}
break;
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
/* Check HE first */
if (sta->he_cap.has_he) {
phymode = ath11k_mac_get_phymode_he(ar, sta);
@ -2125,6 +2227,9 @@ static int ath11k_start_scan(struct ath11k *ar,
lockdep_assert_held(&ar->conf_mutex);
if (ath11k_spectral_get_mode(ar) == ATH11K_SPECTRAL_BACKGROUND)
ath11k_spectral_reset_buffer(ar);
ret = ath11k_wmi_send_scan_start_cmd(ar, arg);
if (ret)
return ret;
@ -3411,7 +3516,7 @@ static void ath11k_mac_setup_ht_vht_cap(struct ath11k *ar,
rate_cap_rx_chainmask);
}
if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && !ar->supports_6ghz) {
band = &ar->mac.sbands[NL80211_BAND_5GHZ];
ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info;
if (ht_cap_info)
@ -3532,6 +3637,35 @@ ath11k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem)
he_cap_elem->phy_cap_info[9] &= ~m;
}
static __le16 ath11k_mac_setup_he_6ghz_cap(struct ath11k_pdev_cap *pcap,
struct ath11k_band_cap *bcap)
{
u8 val;
bcap->he_6ghz_capa = IEEE80211_HT_MPDU_DENSITY_NONE;
if (bcap->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
bcap->he_6ghz_capa |=
FIELD_PREP(IEEE80211_HE_6GHZ_CAP_SM_PS,
WLAN_HT_CAP_SM_PS_DYNAMIC);
else
bcap->he_6ghz_capa |=
FIELD_PREP(IEEE80211_HE_6GHZ_CAP_SM_PS,
WLAN_HT_CAP_SM_PS_DISABLED);
val = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
pcap->vht_cap);
bcap->he_6ghz_capa |=
FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP, val);
val = FIELD_GET(IEEE80211_VHT_CAP_MAX_MPDU_MASK, pcap->vht_cap);
bcap->he_6ghz_capa |=
FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN, val);
if (pcap->vht_cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN)
bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
if (pcap->vht_cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN)
bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS;
return cpu_to_le16(bcap->he_6ghz_capa);
}
static int ath11k_mac_copy_he_cap(struct ath11k *ar,
struct ath11k_pdev_cap *cap,
struct ieee80211_sband_iftype_data *data,
@ -3614,6 +3748,11 @@ static int ath11k_mac_copy_he_cap(struct ath11k *ar,
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)
ath11k_gen_ppe_thresh(&band_cap->he_ppet,
he_cap->ppe_thres);
if (band == NL80211_BAND_6GHZ) {
data[idx].he_6ghz_capa.capa =
ath11k_mac_setup_he_6ghz_cap(cap, band_cap);
}
idx++;
}
@ -3643,6 +3782,16 @@ static void ath11k_mac_setup_he_cap(struct ath11k *ar,
band->iftype_data = ar->mac.iftype[NL80211_BAND_5GHZ];
band->n_iftype_data = count;
}
if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
ar->supports_6ghz) {
count = ath11k_mac_copy_he_cap(ar, cap,
ar->mac.iftype[NL80211_BAND_6GHZ],
NL80211_BAND_6GHZ);
band = &ar->mac.sbands[NL80211_BAND_6GHZ];
band->iftype_data = ar->mac.iftype[NL80211_BAND_6GHZ];
band->n_iftype_data = count;
}
}
static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant)
@ -4085,6 +4234,11 @@ ath11k_mac_setup_vdev_create_params(struct ath11k_vif *arvif,
params->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
params->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
}
if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP &&
ar->supports_6ghz) {
params->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains;
params->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains;
}
}
static u32
@ -5217,7 +5371,7 @@ ath11k_mac_get_single_legacy_rate(struct ath11k *ar,
rate_idx = ffs(mask->control[band].legacy) - 1;
if (band == NL80211_BAND_5GHZ)
if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ)
rate_idx += ATH11K_MAC_FIRST_OFDM_RATE_IDX;
hw_rate = ath11k_legacy_rates[rate_idx].hw_value;
@ -5683,7 +5837,8 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
void *channels;
BUILD_BUG_ON((ARRAY_SIZE(ath11k_2ghz_channels) +
ARRAY_SIZE(ath11k_5ghz_channels)) !=
ARRAY_SIZE(ath11k_5ghz_channels) +
ARRAY_SIZE(ath11k_6ghz_channels)) !=
ATH11K_NUM_CHANS);
reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
@ -5696,6 +5851,7 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
return -ENOMEM;
band = &ar->mac.sbands[NL80211_BAND_2GHZ];
band->band = NL80211_BAND_2GHZ;
band->n_channels = ARRAY_SIZE(ath11k_2ghz_channels);
band->channels = channels;
band->n_bitrates = ath11k_g_rates_size;
@ -5707,23 +5863,48 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
}
if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
channels = kmemdup(ath11k_5ghz_channels,
sizeof(ath11k_5ghz_channels),
GFP_KERNEL);
if (!channels) {
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
return -ENOMEM;
if (reg_cap->high_5ghz_chan >= ATH11K_MAX_6G_FREQ) {
channels = kmemdup(ath11k_6ghz_channels,
sizeof(ath11k_6ghz_channels), GFP_KERNEL);
if (!channels) {
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
return -ENOMEM;
}
ar->supports_6ghz = true;
band = &ar->mac.sbands[NL80211_BAND_6GHZ];
band->band = NL80211_BAND_6GHZ;
band->n_channels = ARRAY_SIZE(ath11k_6ghz_channels);
band->channels = channels;
band->n_bitrates = ath11k_a_rates_size;
band->bitrates = ath11k_a_rates;
ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
ath11k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
}
band = &ar->mac.sbands[NL80211_BAND_5GHZ];
band->n_channels = ARRAY_SIZE(ath11k_5ghz_channels);
band->channels = channels;
band->n_bitrates = ath11k_a_rates_size;
band->bitrates = ath11k_a_rates;
ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
ath11k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
if (reg_cap->low_5ghz_chan < ATH11K_MIN_6G_FREQ) {
channels = kmemdup(ath11k_5ghz_channels,
sizeof(ath11k_5ghz_channels),
GFP_KERNEL);
if (!channels) {
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
return -ENOMEM;
}
band = &ar->mac.sbands[NL80211_BAND_5GHZ];
band->band = NL80211_BAND_5GHZ;
band->n_channels = ARRAY_SIZE(ath11k_5ghz_channels);
band->channels = channels;
band->n_bitrates = ath11k_a_rates_size;
band->bitrates = ath11k_a_rates;
ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
ath11k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
}
}
return 0;
@ -5777,6 +5958,7 @@ static void __ath11k_mac_unregister(struct ath11k *ar)
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
SET_IEEE80211_DEV(ar->hw, NULL);
}

View File

@ -161,6 +161,10 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
else
ch->phy_mode = MODE_11A;
if (channel->band == NL80211_BAND_6GHZ &&
cfg80211_channel_is_psc(channel))
ch->psc_channel = true;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"mac channel [%d/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
i, params->nallchans,

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,82 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
*/
#ifndef ATH11K_SPECTRAL_H
#define ATH11K_SPECTRAL_H
#include "../spectral_common.h"
#include "dbring.h"
/* enum ath11k_spectral_mode:
*
* @SPECTRAL_DISABLED: spectral mode is disabled
* @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
* something else.
* @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
* is performed manually.
*/
enum ath11k_spectral_mode {
ATH11K_SPECTRAL_DISABLED = 0,
ATH11K_SPECTRAL_BACKGROUND,
ATH11K_SPECTRAL_MANUAL,
};
struct ath11k_spectral {
struct ath11k_dbring rx_ring;
/* Protects enabled */
spinlock_t lock;
struct rchan *rfs_scan; /* relay(fs) channel for spectral scan */
struct dentry *scan_ctl;
struct dentry *scan_count;
struct dentry *scan_bins;
enum ath11k_spectral_mode mode;
u16 count;
u8 fft_size;
bool enabled;
};
#ifdef CONFIG_ATH11K_SPECTRAL
int ath11k_spectral_init(struct ath11k_base *ab);
void ath11k_spectral_deinit(struct ath11k_base *ab);
int ath11k_spectral_vif_stop(struct ath11k_vif *arvif);
void ath11k_spectral_reset_buffer(struct ath11k *ar);
enum ath11k_spectral_mode ath11k_spectral_get_mode(struct ath11k *ar);
struct ath11k_dbring *ath11k_spectral_get_dbring(struct ath11k *ar);
#else
static inline int ath11k_spectral_init(struct ath11k_base *ab)
{
return 0;
}
static inline void ath11k_spectral_deinit(struct ath11k_base *ab)
{
}
static inline int ath11k_spectral_vif_stop(struct ath11k_vif *arvif)
{
return 0;
}
static inline void ath11k_spectral_reset_buffer(struct ath11k *ar)
{
}
static inline
enum ath11k_spectral_mode ath11k_spectral_get_mode(struct ath11k *ar)
{
return ATH11K_SPECTRAL_DISABLED;
}
static inline
struct ath11k_dbring *ath11k_spectral_get_dbring(struct ath11k *ar)
{
return NULL;
}
#endif /* CONFIG_ATH11K_SPECTRAL */
#endif /* ATH11K_SPECTRAL_H */

View File

@ -27,6 +27,11 @@ struct wmi_tlv_svc_ready_parse {
bool wmi_svc_bitmap_done;
};
struct wmi_tlv_dma_ring_caps_parse {
struct wmi_dma_ring_capabilities *dma_ring_caps;
u32 n_dma_ring_caps;
};
struct wmi_tlv_svc_rdy_ext_parse {
struct ath11k_service_ext_param param;
struct wmi_soc_mac_phy_hw_mode_caps *hw_caps;
@ -39,15 +44,35 @@ struct wmi_tlv_svc_rdy_ext_parse {
struct wmi_soc_hal_reg_capabilities *soc_hal_reg_caps;
struct wmi_hal_reg_capabilities_ext *ext_hal_reg_caps;
u32 n_ext_hal_reg_caps;
struct wmi_tlv_dma_ring_caps_parse dma_caps_parse;
bool hw_mode_done;
bool mac_phy_done;
bool ext_hal_reg_done;
bool mac_phy_chainmask_combo_done;
bool mac_phy_chainmask_cap_done;
bool oem_dma_ring_cap_done;
bool dma_ring_cap_done;
};
struct wmi_tlv_svc_rdy_ext2_parse {
struct wmi_tlv_dma_ring_caps_parse dma_caps_parse;
bool dma_ring_cap_done;
};
struct wmi_tlv_rdy_parse {
u32 num_extra_mac_addr;
};
struct wmi_tlv_dma_buf_release_parse {
struct ath11k_wmi_dma_buf_release_fixed_param fixed;
struct wmi_dma_buf_release_entry *buf_entry;
struct wmi_dma_buf_release_meta_data *meta_data;
u32 num_buf_entry;
u32 num_meta;
bool buf_entry_done;
bool meta_data_done;
};
static const struct wmi_tlv_policy wmi_tlv_policies[] = {
[WMI_TAG_ARRAY_BYTE]
= { .min_len = 0 },
@ -368,6 +393,17 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
sizeof(struct ath11k_ppe_threshold));
cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
sizeof(struct ath11k_ppe_threshold));
return 0;
}
@ -1692,10 +1728,10 @@ ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
*/
if (param->auth_flag)
cmd->peer_flags |= WMI_PEER_AUTH;
if (param->need_ptk_4_way)
if (param->need_ptk_4_way) {
cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
else
cmd->peer_flags &= ~WMI_PEER_NEED_PTK_4_WAY;
cmd->peer_flags &= ~WMI_PEER_AUTH;
}
if (param->need_gtk_2_way)
cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
/* safe mode bypass the 4-way handshake */
@ -1778,6 +1814,7 @@ int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0];
cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1];
cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal;
cmd->peer_he_caps_6ghz = param->peer_he_caps_6ghz;
cmd->peer_he_ops = param->peer_he_ops;
memcpy(&cmd->peer_he_cap_phy, &param->peer_he_cap_phyinfo,
sizeof(param->peer_he_cap_phyinfo));
@ -1831,6 +1868,7 @@ int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
/* HE Rates */
cmd->peer_he_mcs = param->peer_he_mcs_count;
cmd->min_data_rate = param->min_data_rate;
ptr += sizeof(*mcs);
@ -1886,6 +1924,8 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar,
arg->dwell_time_active = 50;
arg->dwell_time_active_2g = 0;
arg->dwell_time_passive = 150;
arg->dwell_time_active_6g = 40;
arg->dwell_time_passive_6g = 30;
arg->min_rest_time = 50;
arg->max_rest_time = 500;
arg->repeat_probe_time = 0;
@ -1990,6 +2030,8 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
int i, ret, len;
u32 *tmp_ptr;
u8 extraie_len_with_pad = 0;
struct hint_short_ssid *s_ssid = NULL;
struct hint_bssid *hint_bssid = NULL;
len = sizeof(*cmd);
@ -2011,6 +2053,14 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
roundup(params->extraie.len, sizeof(u32));
len += extraie_len_with_pad;
if (params->num_hint_bssid)
len += TLV_HDR_SIZE +
params->num_hint_bssid * sizeof(struct hint_bssid);
if (params->num_hint_s_ssid)
len += TLV_HDR_SIZE +
params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
@ -2032,6 +2082,8 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
cmd->dwell_time_active = params->dwell_time_active;
cmd->dwell_time_active_2g = params->dwell_time_active_2g;
cmd->dwell_time_passive = params->dwell_time_passive;
cmd->dwell_time_active_6g = params->dwell_time_active_6g;
cmd->dwell_time_passive_6g = params->dwell_time_passive_6g;
cmd->min_rest_time = params->min_rest_time;
cmd->max_rest_time = params->max_rest_time;
cmd->repeat_probe_time = params->repeat_probe_time;
@ -2109,6 +2161,68 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
ptr += extraie_len_with_pad;
if (params->num_hint_s_ssid) {
len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, len);
ptr += TLV_HDR_SIZE;
s_ssid = ptr;
for (i = 0; i < params->num_hint_s_ssid; ++i) {
s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags;
s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid;
s_ssid++;
}
ptr += len;
}
if (params->num_hint_bssid) {
len = params->num_hint_bssid * sizeof(struct hint_bssid);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, len);
ptr += TLV_HDR_SIZE;
hint_bssid = ptr;
for (i = 0; i < params->num_hint_bssid; ++i) {
hint_bssid->freq_flags =
params->hint_bssid[i].freq_flags;
ether_addr_copy(&params->hint_bssid[i].bssid.addr[0],
&hint_bssid->bssid.addr[0]);
hint_bssid++;
}
}
len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, len);
ptr += TLV_HDR_SIZE;
if (params->num_hint_s_ssid) {
s_ssid = ptr;
for (i = 0; i < params->num_hint_s_ssid; ++i) {
s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags;
s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid;
s_ssid++;
}
}
ptr += len;
len = params->num_hint_bssid * sizeof(struct hint_bssid);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, len);
ptr += TLV_HDR_SIZE;
if (params->num_hint_bssid) {
hint_bssid = ptr;
for (i = 0; i < params->num_hint_bssid; ++i) {
hint_bssid->freq_flags =
params->hint_bssid[i].freq_flags;
ether_addr_copy(&params->hint_bssid[i].bssid.addr[0],
&hint_bssid->bssid.addr[0]);
hint_bssid++;
}
}
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_START_SCAN_CMDID);
if (ret) {
@ -2178,91 +2292,110 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
struct wmi_tlv *tlv;
void *ptr;
int i, ret, len;
u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
u32 *reg1, *reg2;
len = sizeof(*cmd) + TLV_HDR_SIZE +
sizeof(*chan_info) * chan_list->nallchans;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_CHAN_LIST_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"WMI no.of chan = %d len = %d\n", chan_list->nallchans, len);
cmd->pdev_id = chan_list->pdev_id;
cmd->num_scan_chans = chan_list->nallchans;
ptr = skb->data + sizeof(*cmd);
len = sizeof(*chan_info) * chan_list->nallchans;
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
ptr += TLV_HDR_SIZE;
tchan_info = &chan_list->ch_param[0];
while (chan_list->nallchans) {
len = sizeof(*cmd) + TLV_HDR_SIZE;
max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
sizeof(*chan_info);
for (i = 0; i < chan_list->nallchans; ++i) {
chan_info = ptr;
memset(chan_info, 0, sizeof(*chan_info));
len = sizeof(*chan_info);
chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_CHANNEL) |
FIELD_PREP(WMI_TLV_LEN,
len - TLV_HDR_SIZE);
if (chan_list->nallchans > max_chan_limit)
num_send_chans = max_chan_limit;
else
num_send_chans = chan_list->nallchans;
reg1 = &chan_info->reg_info_1;
reg2 = &chan_info->reg_info_2;
chan_info->mhz = tchan_info->mhz;
chan_info->band_center_freq1 = tchan_info->cfreq1;
chan_info->band_center_freq2 = tchan_info->cfreq2;
chan_list->nallchans -= num_send_chans;
len += sizeof(*chan_info) * num_send_chans;
if (tchan_info->is_chan_passive)
chan_info->info |= WMI_CHAN_INFO_PASSIVE;
if (tchan_info->allow_he)
chan_info->info |= WMI_CHAN_INFO_ALLOW_HE;
else if (tchan_info->allow_vht)
chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT;
else if (tchan_info->allow_ht)
chan_info->info |= WMI_CHAN_INFO_ALLOW_HT;
if (tchan_info->half_rate)
chan_info->info |= WMI_CHAN_INFO_HALF_RATE;
if (tchan_info->quarter_rate)
chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE,
tchan_info->phy_mode);
*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR,
tchan_info->minpower);
*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
tchan_info->maxpower);
*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
tchan_info->maxregpower);
*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS,
tchan_info->reg_class_id);
*reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
tchan_info->antennamax);
cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_CHAN_LIST_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->pdev_id = chan_list->pdev_id;
cmd->num_scan_chans = num_send_chans;
if (num_sends)
cmd->flags |= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"WMI chan scan list chan[%d] = %u\n",
i, chan_info->mhz);
"WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
num_send_chans, len, cmd->pdev_id, num_sends);
ptr += sizeof(*chan_info);
ptr = skb->data + sizeof(*cmd);
tchan_info++;
len = sizeof(*chan_info) * num_send_chans;
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
ptr += TLV_HDR_SIZE;
for (i = 0; i < num_send_chans; ++i) {
chan_info = ptr;
memset(chan_info, 0, sizeof(*chan_info));
len = sizeof(*chan_info);
chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_CHANNEL) |
FIELD_PREP(WMI_TLV_LEN,
len - TLV_HDR_SIZE);
reg1 = &chan_info->reg_info_1;
reg2 = &chan_info->reg_info_2;
chan_info->mhz = tchan_info->mhz;
chan_info->band_center_freq1 = tchan_info->cfreq1;
chan_info->band_center_freq2 = tchan_info->cfreq2;
if (tchan_info->is_chan_passive)
chan_info->info |= WMI_CHAN_INFO_PASSIVE;
if (tchan_info->allow_he)
chan_info->info |= WMI_CHAN_INFO_ALLOW_HE;
else if (tchan_info->allow_vht)
chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT;
else if (tchan_info->allow_ht)
chan_info->info |= WMI_CHAN_INFO_ALLOW_HT;
if (tchan_info->half_rate)
chan_info->info |= WMI_CHAN_INFO_HALF_RATE;
if (tchan_info->quarter_rate)
chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE;
if (tchan_info->psc_channel)
chan_info->info |= WMI_CHAN_INFO_PSC;
chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE,
tchan_info->phy_mode);
*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR,
tchan_info->minpower);
*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
tchan_info->maxpower);
*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
tchan_info->maxregpower);
*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS,
tchan_info->reg_class_id);
*reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
tchan_info->antennamax);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
i, chan_info->mhz, chan_info->info);
ptr += sizeof(*chan_info);
tchan_info++;
}
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
dev_kfree_skb(skb);
return ret;
}
num_sends++;
}
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
dev_kfree_skb(skb);
}
return ret;
return 0;
}
int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
@ -3265,6 +3398,236 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab)
return ath11k_init_cmd_send(&wmi_sc->wmi[0], &init_param);
}
int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar,
struct ath11k_wmi_vdev_spectral_conf_param *param)
{
struct ath11k_wmi_vdev_spectral_conf_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct ath11k_wmi_vdev_spectral_conf_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
memcpy(&cmd->param, param, sizeof(*param));
ret = ath11k_wmi_cmd_send(ar->wmi, skb,
WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send spectral scan config wmi cmd\n");
goto err;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"WMI spectral scan config cmd vdev_id 0x%x\n",
param->vdev_id);
return 0;
err:
dev_kfree_skb(skb);
return ret;
}
int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id,
u32 trigger, u32 enable)
{
struct ath11k_wmi_vdev_spectral_enable_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct ath11k_wmi_vdev_spectral_enable_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->trigger_cmd = trigger;
cmd->enable_cmd = enable;
ret = ath11k_wmi_cmd_send(ar->wmi, skb,
WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send spectral enable wmi cmd\n");
goto err;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"WMI spectral enable cmd vdev id 0x%x\n",
vdev_id);
return 0;
err:
dev_kfree_skb(skb);
return ret;
}
int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar,
struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *param)
{
struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DMA_RING_CFG_REQ) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->pdev_id = param->pdev_id;
cmd->module_id = param->module_id;
cmd->base_paddr_lo = param->base_paddr_lo;
cmd->base_paddr_hi = param->base_paddr_hi;
cmd->head_idx_paddr_lo = param->head_idx_paddr_lo;
cmd->head_idx_paddr_hi = param->head_idx_paddr_hi;
cmd->tail_idx_paddr_lo = param->tail_idx_paddr_lo;
cmd->tail_idx_paddr_hi = param->tail_idx_paddr_hi;
cmd->num_elems = param->num_elems;
cmd->buf_size = param->buf_size;
cmd->num_resp_per_event = param->num_resp_per_event;
cmd->event_timeout_ms = param->event_timeout_ms;
ret = ath11k_wmi_cmd_send(ar->wmi, skb,
WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send dma ring cfg req wmi cmd\n");
goto err;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"WMI DMA ring cfg req cmd pdev_id 0x%x\n",
param->pdev_id);
return 0;
err:
dev_kfree_skb(skb);
return ret;
}
static int ath11k_wmi_tlv_dma_buf_entry_parse(struct ath11k_base *soc,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_dma_buf_release_parse *parse = data;
if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
return -EPROTO;
if (parse->num_buf_entry >= parse->fixed.num_buf_release_entry)
return -ENOBUFS;
parse->num_buf_entry++;
return 0;
}
static int ath11k_wmi_tlv_dma_buf_meta_parse(struct ath11k_base *soc,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_dma_buf_release_parse *parse = data;
if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
return -EPROTO;
if (parse->num_meta >= parse->fixed.num_meta_data_entry)
return -ENOBUFS;
parse->num_meta++;
return 0;
}
static int ath11k_wmi_tlv_dma_buf_parse(struct ath11k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_dma_buf_release_parse *parse = data;
int ret;
switch (tag) {
case WMI_TAG_DMA_BUF_RELEASE:
memcpy(&parse->fixed, ptr,
sizeof(struct ath11k_wmi_dma_buf_release_fixed_param));
parse->fixed.pdev_id = DP_HW2SW_MACID(parse->fixed.pdev_id);
break;
case WMI_TAG_ARRAY_STRUCT:
if (!parse->buf_entry_done) {
parse->num_buf_entry = 0;
parse->buf_entry = (struct wmi_dma_buf_release_entry *)ptr;
ret = ath11k_wmi_tlv_iter(ab, ptr, len,
ath11k_wmi_tlv_dma_buf_entry_parse,
parse);
if (ret) {
ath11k_warn(ab, "failed to parse dma buf entry tlv %d\n",
ret);
return ret;
}
parse->buf_entry_done = true;
} else if (!parse->meta_data_done) {
parse->num_meta = 0;
parse->meta_data = (struct wmi_dma_buf_release_meta_data *)ptr;
ret = ath11k_wmi_tlv_iter(ab, ptr, len,
ath11k_wmi_tlv_dma_buf_meta_parse,
parse);
if (ret) {
ath11k_warn(ab, "failed to parse dma buf meta tlv %d\n",
ret);
return ret;
}
parse->meta_data_done = true;
}
break;
default:
break;
}
return 0;
}
static void ath11k_wmi_pdev_dma_ring_buf_release_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct wmi_tlv_dma_buf_release_parse parse = { };
struct ath11k_dbring_buf_release_event param;
int ret;
ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
ath11k_wmi_tlv_dma_buf_parse,
&parse);
if (ret) {
ath11k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
return;
}
param.fixed = parse.fixed;
param.buf_entry = parse.buf_entry;
param.num_buf_entry = parse.num_buf_entry;
param.meta_data = parse.meta_data;
param.num_meta = parse.num_meta;
ret = ath11k_dbring_buffer_release_event(ab, &param);
if (ret) {
ath11k_warn(ab, "failed to handle dma buf release event %d\n", ret);
return;
}
}
static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base *soc,
u16 tag, u16 len,
const void *ptr, void *data)
@ -3445,6 +3808,95 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
return 0;
}
static int ath11k_wmi_tlv_dma_ring_caps_parse(struct ath11k_base *soc,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_dma_ring_caps_parse *parse = data;
if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
return -EPROTO;
parse->n_dma_ring_caps++;
return 0;
}
static int ath11k_wmi_alloc_dbring_caps(struct ath11k_base *ab,
u32 num_cap)
{
size_t sz;
void *ptr;
sz = num_cap * sizeof(struct ath11k_dbring_cap);
ptr = kzalloc(sz, GFP_ATOMIC);
if (!ptr)
return -ENOMEM;
ab->db_caps = ptr;
ab->num_db_cap = num_cap;
return 0;
}
static void ath11k_wmi_free_dbring_caps(struct ath11k_base *ab)
{
kfree(ab->db_caps);
ab->db_caps = NULL;
}
static int ath11k_wmi_tlv_dma_ring_caps(struct ath11k_base *ab,
u16 len, const void *ptr, void *data)
{
struct wmi_tlv_dma_ring_caps_parse *dma_caps_parse = data;
struct wmi_dma_ring_capabilities *dma_caps;
struct ath11k_dbring_cap *dir_buff_caps;
int ret;
u32 i;
dma_caps_parse->n_dma_ring_caps = 0;
dma_caps = (struct wmi_dma_ring_capabilities *)ptr;
ret = ath11k_wmi_tlv_iter(ab, ptr, len,
ath11k_wmi_tlv_dma_ring_caps_parse,
dma_caps_parse);
if (ret) {
ath11k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
return ret;
}
if (!dma_caps_parse->n_dma_ring_caps)
return 0;
if (ab->num_db_cap) {
ath11k_warn(ab, "Already processed, so ignoring dma ring caps\n");
return 0;
}
ret = ath11k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
if (ret)
return ret;
dir_buff_caps = ab->db_caps;
for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
if (dma_caps[i].module_id >= WMI_DIRECT_BUF_MAX) {
ath11k_warn(ab, "Invalid module id %d\n", dma_caps[i].module_id);
ret = -EINVAL;
goto free_dir_buff;
}
dir_buff_caps[i].id = dma_caps[i].module_id;
dir_buff_caps[i].pdev_id = DP_HW2SW_MACID(dma_caps[i].pdev_id);
dir_buff_caps[i].min_elem = dma_caps[i].min_elem;
dir_buff_caps[i].min_buf_sz = dma_caps[i].min_buf_sz;
dir_buff_caps[i].min_buf_align = dma_caps[i].min_buf_align;
}
return 0;
free_dir_buff:
ath11k_wmi_free_dbring_caps(ab);
return ret;
}
static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
@ -3501,7 +3953,19 @@ static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base *ab,
return ret;
svc_rdy_ext->ext_hal_reg_done = true;
complete(&ab->wmi_ab.service_ready);
} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
svc_rdy_ext->mac_phy_chainmask_combo_done = true;
} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
svc_rdy_ext->mac_phy_chainmask_cap_done = true;
} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
svc_rdy_ext->oem_dma_ring_cap_done = true;
} else if (!svc_rdy_ext->dma_ring_cap_done) {
ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr,
&svc_rdy_ext->dma_caps_parse);
if (ret)
return ret;
svc_rdy_ext->dma_ring_cap_done = true;
}
break;
@ -3522,11 +3986,66 @@ static int ath11k_service_ready_ext_event(struct ath11k_base *ab,
&svc_rdy_ext);
if (ret) {
ath11k_warn(ab, "failed to parse tlv %d\n", ret);
return ret;
goto err;
}
if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
complete(&ab->wmi_ab.service_ready);
kfree(svc_rdy_ext.mac_phy_caps);
return 0;
err:
ath11k_wmi_free_dbring_caps(ab);
return ret;
}
static int ath11k_wmi_tlv_svc_rdy_ext2_parse(struct ath11k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_svc_rdy_ext2_parse *parse = data;
int ret;
switch (tag) {
case WMI_TAG_ARRAY_STRUCT:
if (!parse->dma_ring_cap_done) {
ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr,
&parse->dma_caps_parse);
if (ret)
return ret;
parse->dma_ring_cap_done = true;
}
break;
default:
break;
}
return 0;
}
static int ath11k_service_ready_ext2_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct wmi_tlv_svc_rdy_ext2_parse svc_rdy_ext2 = { };
int ret;
ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
ath11k_wmi_tlv_svc_rdy_ext2_parse,
&svc_rdy_ext2);
if (ret) {
ath11k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
goto err;
}
complete(&ab->wmi_ab.service_ready);
return 0;
err:
ath11k_wmi_free_dbring_caps(ab);
return ret;
}
static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buff *skb,
@ -3822,6 +4341,7 @@ static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base *ab,
}
hdr->pdev_id = ev->pdev_id;
hdr->chan_freq = ev->chan_freq;
hdr->channel = ev->channel;
hdr->snr = ev->snr;
hdr->rate = ev->rate;
@ -5193,7 +5713,9 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ) {
status->band = NL80211_BAND_6GHZ;
} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
status->band = NL80211_BAND_2GHZ;
} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH11K_MAX_5G_CHAN) {
status->band = NL80211_BAND_5GHZ;
@ -5206,9 +5728,10 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
goto exit;
}
if (rx_ev.phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
if (rx_ev.phy_mode == MODE_11B &&
(status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
ath11k_dbg(ab, ATH11K_DBG_WMI,
"wmi mgmt rx 11b (CCK) on 5GHz\n");
"wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
sband = &ar->mac.sbands[status->band];
@ -5933,6 +6456,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_SERVICE_READY_EXT_EVENTID:
ath11k_service_ready_ext_event(ab, skb);
break;
case WMI_SERVICE_READY_EXT2_EVENTID:
ath11k_service_ready_ext2_event(ab, skb);
break;
case WMI_REG_CHAN_LIST_CC_EVENTID:
ath11k_reg_chan_list_event(ab, skb);
break;
@ -5994,12 +6520,16 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_PDEV_TEMPERATURE_EVENTID:
ath11k_wmi_pdev_temperature_event(ab, skb);
break;
case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
ath11k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
break;
/* add Unsupported events here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_VDEV_DELETE_RESP_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
case WMI_TWT_ENABLE_EVENTID:
case WMI_TWT_DISABLE_EVENTID:
case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
ath11k_dbg(ab, ATH11K_DBG_WMI,
"ignoring unsupported event 0x%x\n", id);
break;
@ -6213,4 +6743,6 @@ void ath11k_wmi_detach(struct ath11k_base *ab)
for (i = 0; i < ab->htc.wmi_ep_count; i++)
ath11k_wmi_pdev_detach(ab, i);
ath11k_wmi_free_dbring_caps(ab);
}

View File

@ -24,6 +24,8 @@ struct ath11k_fw_stats;
#define HE_PET_8_USEC 1
#define HE_PET_16_USEC 2
#define WMI_MAX_CHAINS 8
#define WMI_MAX_NUM_SS MAX_HE_NSS
#define WMI_MAX_NUM_RU MAX_HE_RU
@ -50,10 +52,20 @@ struct wmi_tlv {
#define WMI_MAX_MEM_REQS 32
#define ATH11K_MAX_HW_LISTEN_INTERVAL 5
#define WLAN_SCAN_MAX_HINT_S_SSID 10
#define WLAN_SCAN_MAX_HINT_BSSID 10
#define MAX_RNR_BSS 5
#define WLAN_SCAN_MAX_HINT_S_SSID 10
#define WLAN_SCAN_MAX_HINT_BSSID 10
#define MAX_RNR_BSS 5
#define WLAN_SCAN_PARAMS_MAX_SSID 16
#define WLAN_SCAN_PARAMS_MAX_BSSID 4
#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
#define WMI_BA_MODE_BUFFER_SIZE_256 3
/*
* HW mode config type replicated from FW header
@ -586,6 +598,11 @@ enum wmi_tlv_event_id {
WMI_PDEV_DMA_RING_CFG_RSP_EVENTID,
WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID,
WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID,
WMI_PDEV_CSC_SWITCH_COUNT_STATUS_EVENTID,
WMI_PDEV_COLD_BOOT_CAL_DATA_EVENTID,
WMI_PDEV_RAP_INFO_EVENTID,
WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID,
WMI_SERVICE_READY_EXT2_EVENTID,
WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV),
WMI_VDEV_STOPPED_EVENTID,
WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
@ -1011,6 +1028,7 @@ enum wmi_tlv_vdev_param {
WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME,
WMI_VDEV_PARAM_BA_MODE = 0x7e,
WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87,
WMI_VDEV_PARAM_6GHZ_PARAMS = 0x99,
WMI_VDEV_PARAM_PROTOTYPE = 0x8000,
WMI_VDEV_PARAM_BSS_COLOR,
WMI_VDEV_PARAM_SET_HEMU_MODE,
@ -2013,9 +2031,10 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_DSM_ROAM_FILTER = 211,
WMI_TLV_SERVICE_PACKET_CAPTURE_SUPPORT = 212,
WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
WMI_TLV_SERVICE_EXT2_MSG = 220,
WMI_MAX_EXT_SERVICE
};
enum {
@ -2076,6 +2095,14 @@ enum wmi_beacon_gen_mode {
WMI_BEACON_BURST_MODE = 1
};
enum wmi_direct_buffer_module {
WMI_DIRECT_BUF_SPECTRAL = 0,
WMI_DIRECT_BUF_CFR = 1,
/* keep it last */
WMI_DIRECT_BUF_MAX
};
struct wmi_host_pdev_band_to_mac {
u32 pdev_id;
u32 start_freq;
@ -2382,6 +2409,15 @@ struct wmi_mac_addr {
} __packed;
} __packed;
struct wmi_dma_ring_capabilities {
u32 tlv_header;
u32 pdev_id;
u32 module_id;
u32 min_elem;
u32 min_buf_sz;
u32 min_buf_align;
} __packed;
struct wmi_ready_event_min {
struct wmi_abi_version fw_abi_vers;
struct wmi_mac_addr mac_addr;
@ -2519,7 +2555,8 @@ struct channel_param {
allow_ht:1,
allow_vht:1,
allow_he:1,
set_agile:1;
set_agile:1,
psc_channel:1;
u32 phy_mode;
u32 cfreq1;
u32 cfreq2;
@ -3059,6 +3096,9 @@ struct wmi_start_scan_cmd {
u32 num_vendor_oui;
u32 scan_ctrl_flags_ext;
u32 dwell_time_active_2g;
u32 dwell_time_active_6g;
u32 dwell_time_passive_6g;
u32 scan_start_offset;
} __packed;
#define WMI_SCAN_FLAG_PASSIVE 0x1
@ -3098,6 +3138,16 @@ enum {
((flag) |= (((mode) << WMI_SCAN_DWELL_MODE_SHIFT) & \
WMI_SCAN_DWELL_MODE_MASK))
struct hint_short_ssid {
u32 freq_flags;
u32 short_ssid;
};
struct hint_bssid {
u32 freq_flags;
struct wmi_mac_addr bssid;
};
struct scan_req_params {
u32 scan_id;
u32 scan_req_id;
@ -3125,6 +3175,8 @@ struct scan_req_params {
u32 dwell_time_active;
u32 dwell_time_active_2g;
u32 dwell_time_passive;
u32 dwell_time_active_6g;
u32 dwell_time_passive_6g;
u32 min_rest_time;
u32 max_rest_time;
u32 repeat_probe_time;
@ -3175,6 +3227,10 @@ struct scan_req_params {
struct element_info extraie;
struct element_info htcap;
struct element_info vhtcap;
u32 num_hint_s_ssid;
u32 num_hint_bssid;
struct hint_short_ssid hint_s_ssid[WLAN_SCAN_MAX_HINT_S_SSID];
struct hint_bssid hint_bssid[WLAN_SCAN_MAX_HINT_BSSID];
};
struct wmi_ssid_arg {
@ -3264,6 +3320,7 @@ struct wmi_bcn_send_from_host_cmd {
#define WMI_CHAN_INFO_QUARTER_RATE BIT(15)
#define WMI_CHAN_INFO_DFS_FREQ2 BIT(16)
#define WMI_CHAN_INFO_ALLOW_HE BIT(17)
#define WMI_CHAN_INFO_PSC BIT(18)
#define WMI_CHAN_REG_INFO1_MIN_PWR GENMASK(7, 0)
#define WMI_CHAN_REG_INFO1_MAX_PWR GENMASK(15, 8)
@ -3444,6 +3501,7 @@ struct peer_assoc_params {
u32 tx_max_rate;
u32 tx_mcs_set;
u8 vht_capable;
u8 min_data_rate;
u32 tx_max_mcs_nss;
u32 peer_bw_rxnss_override;
bool is_pmf_enabled;
@ -3472,6 +3530,7 @@ struct peer_assoc_params {
bool he_flag;
u32 peer_he_cap_macinfo[2];
u32 peer_he_cap_macinfo_internal;
u32 peer_he_caps_6ghz;
u32 peer_he_ops;
u32 peer_he_cap_phyinfo[WMI_HOST_MAX_HECAP_PHY_SIZE];
u32 peer_he_mcs_count;
@ -3509,6 +3568,8 @@ struct wmi_peer_assoc_complete_cmd {
u32 peer_he_mcs;
u32 peer_he_cap_info_ext;
u32 peer_he_cap_info_internal;
u32 min_data_rate;
u32 peer_he_caps_6ghz;
} __packed;
struct wmi_stop_scan_cmd {
@ -4228,6 +4289,7 @@ struct wmi_pdev_temperature_event {
#define WLAN_MGMT_TXRX_HOST_MAX_ANTENNA 4
struct mgmt_rx_event_params {
u32 chan_freq;
u32 channel;
u32 snr;
u8 rssi_ctl[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA];
@ -4257,6 +4319,7 @@ struct wmi_mgmt_rx_hdr {
u32 rx_tsf_l32;
u32 rx_tsf_u32;
u32 pdev_id;
u32 chan_freq;
} __packed;
#define MAX_ANTENNA_EIGHT 8
@ -4734,6 +4797,117 @@ struct ath11k_wmi_pdev_lro_config_cmd {
u32 pdev_id;
} __packed;
#define ATH11K_WMI_SPECTRAL_COUNT_DEFAULT 0
#define ATH11K_WMI_SPECTRAL_PERIOD_DEFAULT 224
#define ATH11K_WMI_SPECTRAL_PRIORITY_DEFAULT 1
#define ATH11K_WMI_SPECTRAL_FFT_SIZE_DEFAULT 7
#define ATH11K_WMI_SPECTRAL_GC_ENA_DEFAULT 1
#define ATH11K_WMI_SPECTRAL_RESTART_ENA_DEFAULT 0
#define ATH11K_WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT -96
#define ATH11K_WMI_SPECTRAL_INIT_DELAY_DEFAULT 80
#define ATH11K_WMI_SPECTRAL_NB_TONE_THR_DEFAULT 12
#define ATH11K_WMI_SPECTRAL_STR_BIN_THR_DEFAULT 8
#define ATH11K_WMI_SPECTRAL_WB_RPT_MODE_DEFAULT 0
#define ATH11K_WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT 0
#define ATH11K_WMI_SPECTRAL_RSSI_THR_DEFAULT 0xf0
#define ATH11K_WMI_SPECTRAL_PWR_FORMAT_DEFAULT 0
#define ATH11K_WMI_SPECTRAL_RPT_MODE_DEFAULT 2
#define ATH11K_WMI_SPECTRAL_BIN_SCALE_DEFAULT 1
#define ATH11K_WMI_SPECTRAL_DBM_ADJ_DEFAULT 1
#define ATH11K_WMI_SPECTRAL_CHN_MASK_DEFAULT 1
struct ath11k_wmi_vdev_spectral_conf_param {
u32 vdev_id;
u32 scan_count;
u32 scan_period;
u32 scan_priority;
u32 scan_fft_size;
u32 scan_gc_ena;
u32 scan_restart_ena;
u32 scan_noise_floor_ref;
u32 scan_init_delay;
u32 scan_nb_tone_thr;
u32 scan_str_bin_thr;
u32 scan_wb_rpt_mode;
u32 scan_rssi_rpt_mode;
u32 scan_rssi_thr;
u32 scan_pwr_format;
u32 scan_rpt_mode;
u32 scan_bin_scale;
u32 scan_dbm_adj;
u32 scan_chn_mask;
} __packed;
struct ath11k_wmi_vdev_spectral_conf_cmd {
u32 tlv_header;
struct ath11k_wmi_vdev_spectral_conf_param param;
} __packed;
#define ATH11K_WMI_SPECTRAL_TRIGGER_CMD_TRIGGER 1
#define ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR 2
#define ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE 1
#define ATH11K_WMI_SPECTRAL_ENABLE_CMD_DISABLE 2
struct ath11k_wmi_vdev_spectral_enable_cmd {
u32 tlv_header;
u32 vdev_id;
u32 trigger_cmd;
u32 enable_cmd;
} __packed;
struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd {
u32 tlv_header;
u32 pdev_id;
u32 module_id; /* see enum wmi_direct_buffer_module */
u32 base_paddr_lo;
u32 base_paddr_hi;
u32 head_idx_paddr_lo;
u32 head_idx_paddr_hi;
u32 tail_idx_paddr_lo;
u32 tail_idx_paddr_hi;
u32 num_elems; /* Number of elems in the ring */
u32 buf_size; /* size of allocated buffer in bytes */
/* Number of wmi_dma_buf_release_entry packed together */
u32 num_resp_per_event;
/* Target should timeout and send whatever resp
* it has if this time expires, units in milliseconds
*/
u32 event_timeout_ms;
} __packed;
struct ath11k_wmi_dma_buf_release_fixed_param {
u32 pdev_id;
u32 module_id;
u32 num_buf_release_entry;
u32 num_meta_data_entry;
} __packed;
struct wmi_dma_buf_release_entry {
u32 tlv_header;
u32 paddr_lo;
/* Bits 11:0: address of data
* Bits 31:12: host context data
*/
u32 paddr_hi;
} __packed;
#define WMI_SPECTRAL_META_INFO1_FREQ1 GENMASK(15, 0)
#define WMI_SPECTRAL_META_INFO1_FREQ2 GENMASK(31, 16)
#define WMI_SPECTRAL_META_INFO2_CHN_WIDTH GENMASK(7, 0)
struct wmi_dma_buf_release_meta_data {
u32 tlv_header;
s32 noise_floor[WMI_MAX_CHAINS];
u32 reset_delay;
u32 freq1;
u32 freq2;
u32 ch_width;
} __packed;
struct target_resource_config {
u32 num_vdevs;
u32 num_peers;
@ -4941,4 +5115,10 @@ int ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id,
int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id,
bool enable);
int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar, int pdev_id);
int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar,
struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *param);
int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id,
u32 trigger, u32 enable);
int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar,
struct ath11k_wmi_vdev_spectral_conf_param *param);
#endif

View File

@ -34,7 +34,7 @@ config ATH9K
APs that come with these cards refer to ath9k wiki
products page:
http://wireless.kernel.org/en/users/Drivers/ath9k/products
https://wireless.wiki.kernel.org/en/users/Drivers/ath9k/products
If you choose to build a module, it'll be called ath9k.
@ -185,7 +185,8 @@ config ATH9K_HTC
Support for Atheros HTC based cards.
Chipsets supported: AR9271
For more information: http://wireless.kernel.org/en/users/Drivers/ath9k_htc
For more information:
https://wireless.wiki.kernel.org/en/users/Drivers/ath9k_htc
The built module will be ath9k_htc.

View File

@ -2410,7 +2410,7 @@ static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask)
* of tests. The testing requirements are going to be documented. Desired
* test requirements are documented at:
*
* http://wireless.kernel.org/en/users/Drivers/ath9k/dfs
* https://wireless.wiki.kernel.org/en/users/Drivers/ath9k/dfs
*
* Once a new chipset gets properly tested an individual commit can be used
* to document the testing for DFS for that chipset.

View File

@ -10,7 +10,7 @@ config CARL9170
It needs a special firmware (carl9170-1.fw), which can be downloaded
from our wiki here:
<http://wireless.kernel.org/en/users/Drivers/carl9170>
<https://wireless.wiki.kernel.org/en/users/Drivers/carl9170>
If you choose to build a module, it'll be called carl9170.

View File

@ -61,7 +61,7 @@ MODULE_ALIAS("arusb_lnx");
* Note:
*
* Always update our wiki's device list (located at:
* http://wireless.kernel.org/en/users/Drivers/ar9170/devices ),
* https://wireless.wiki.kernel.org/en/users/Drivers/ar9170/devices ),
* whenever you add a new device.
*/
static const struct usb_device_id carl9170_usb_ids[] = {

View File

@ -24,6 +24,7 @@
* could be acquired so far.
*/
#define SPECTRAL_ATH10K_MAX_NUM_BINS 256
#define SPECTRAL_ATH11K_MAX_NUM_BINS 512
/* FFT sample format given to userspace via debugfs.
*
@ -37,6 +38,7 @@ enum ath_fft_sample_type {
ATH_FFT_SAMPLE_HT20 = 1,
ATH_FFT_SAMPLE_HT20_40,
ATH_FFT_SAMPLE_ATH10K,
ATH_FFT_SAMPLE_ATH11K
};
struct fft_sample_tlv {
@ -110,4 +112,19 @@ struct fft_sample_ath10k {
u8 data[0];
} __packed;
struct fft_sample_ath11k {
struct fft_sample_tlv tlv;
u8 chan_width_mhz;
s8 max_index;
u8 max_exp;
__be16 freq1;
__be16 freq2;
__be16 max_magnitude;
__be16 rssi;
__be32 tsf;
__be32 noise;
u8 data[0];
} __packed;
#endif /* SPECTRAL_COMMON_H */

View File

@ -10,7 +10,7 @@ config WIL6210
wil6210 chip by Wilocity. It supports operation on the
60 GHz band, covered by the IEEE802.11ad standard.
http://wireless.kernel.org/en/users/Drivers/wil6210
https://wireless.wiki.kernel.org/en/users/Drivers/wil6210
If you choose to build it as a module, it will be called
wil6210