wireless-drivers-next patches for 5.1

First set of patches for 5.1. Lots of new features in various drivers
 but nothing really special standing out.
 
 Major changes:
 
 brcmfmac
 
 * DMI nvram filename quirk for PoV TAB-P1006W-232 tablet
 
 rsi
 
 * support for hardware scan offload
 
 iwlwifi
 
 * support for Target Wakeup Time (TWT) -- a feature that allows the AP
   to specify when individual stations can access the medium
 
 * support for mac80211 AMSDU handling
 
 * some new PCI IDs
 
 * relicense the pcie submodule to dual GPL/BSD
 
 * reworked the TOF/CSI (channel estimation matrix) implementation
 
 * Some product name updates in the human-readable strings
 
 mt76
 
 * energy detect regulatory compliance fixes
 
 * preparation for MT7603 support
 
 * channel switch announcement support
 
 mwifiex
 
 * support for sd8977 chipset
 
 qtnfmac
 
 * support for 4addr mode
 
 * convert to SPDX license identifiers
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJcWoRZAAoJEG4XJFUm622bOm4H/j2Tt6qS5yz3ioH2I7R+f7e/
 8C2JJia+uQs8iChdyCjCFcDymmjB2l5u72JvupwCdERzp3okv/xmJiLW5wFW2z4x
 B3Nrd4FV2EMIdsRXg1RWbwZHC4wIY6lFhL1OcUcuNwpb5ab1ppvQFHmH5logd7DC
 euFSe02g8xmXdUMciRDKGUdiSiDIApmx9dUfguaqYtepqeW3hmDEEkeDicaf2fjq
 cM5qAvssIAgUTqwIImEQQEU7j7A2TgMYkRBNzwv2QG47jy6OlgXkZVByvtq59irg
 v4BFNtG6uRWTugxQ/FwYWeqsrnjegFNKm5MFbS6BDxbe05M7QzeK8zm55fE9ZGk=
 =cHbd
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-for-davem-2019-02-06' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for 5.1

First set of patches for 5.1. Lots of new features in various drivers
but nothing really special standing out.

Major changes:

brcmfmac

* DMI nvram filename quirk for PoV TAB-P1006W-232 tablet

rsi

* support for hardware scan offload

iwlwifi

* support for Target Wakeup Time (TWT) -- a feature that allows the AP
  to specify when individual stations can access the medium

* support for mac80211 AMSDU handling

* some new PCI IDs

* relicense the pcie submodule to dual GPL/BSD

* reworked the TOF/CSI (channel estimation matrix) implementation

* Some product name updates in the human-readable strings

mt76

* energy detect regulatory compliance fixes

* preparation for MT7603 support

* channel switch announcement support

mwifiex

* support for sd8977 chipset

qtnfmac

* support for 4addr mode

* convert to SPDX license identifiers
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-02-06 09:36:36 -08:00
commit 5661f29ade
183 changed files with 4087 additions and 3931 deletions

View File

@ -10,13 +10,13 @@
#include <linux/delay.h>
#define bcma_err(bus, fmt, ...) \
pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
dev_err((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
#define bcma_warn(bus, fmt, ...) \
pr_warn("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
dev_warn((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
#define bcma_info(bus, fmt, ...) \
pr_info("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
dev_info((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
#define bcma_debug(bus, fmt, ...) \
pr_debug("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
dev_dbg((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
struct bcma_bus;
@ -33,7 +33,6 @@ int __init bcma_bus_early_register(struct bcma_bus *bus);
int bcma_bus_suspend(struct bcma_bus *bus);
int bcma_bus_resume(struct bcma_bus *bus);
#endif
struct device *bcma_bus_get_host_dev(struct bcma_bus *bus);
/* scan.c */
void bcma_detect_chip(struct bcma_bus *bus);

View File

@ -183,7 +183,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
chip->owner = THIS_MODULE;
chip->parent = bcma_bus_get_host_dev(bus);
chip->parent = bus->dev;
#if IS_BUILTIN(CONFIG_OF)
chip->of_node = cc->core->dev.of_node;
#endif

View File

@ -196,6 +196,8 @@ static int bcma_host_pci_probe(struct pci_dev *dev,
goto err_pci_release_regions;
}
bus->dev = &dev->dev;
/* Map MMIO */
err = -ENOMEM;
bus->mmio = pci_iomap(dev, 0, ~0UL);

View File

@ -179,7 +179,6 @@ int __init bcma_host_soc_register(struct bcma_soc *soc)
/* Host specific */
bus->hosttype = BCMA_HOSTTYPE_SOC;
bus->ops = &bcma_host_soc_ops;
bus->host_pdev = NULL;
/* Initialize struct, detect chip */
bcma_init_bus(bus);
@ -213,6 +212,8 @@ static int bcma_host_soc_probe(struct platform_device *pdev)
if (!bus)
return -ENOMEM;
bus->dev = dev;
/* Map MMIO */
bus->mmio = of_iomap(np, 0);
if (!bus->mmio)
@ -221,7 +222,6 @@ static int bcma_host_soc_probe(struct platform_device *pdev)
/* Host specific */
bus->hosttype = BCMA_HOSTTYPE_SOC;
bus->ops = &bcma_host_soc_ops;
bus->host_pdev = pdev;
/* Initialize struct, detect chip */
bcma_init_bus(bus);

View File

@ -223,8 +223,8 @@ unsigned int bcma_core_irq(struct bcma_device *core, int num)
mips_irq = bcma_core_mips_irq(core);
return mips_irq <= 4 ? mips_irq + 2 : 0;
}
if (bus->host_pdev)
return bcma_of_get_irq(&bus->host_pdev->dev, core, num);
if (bus->dev)
return bcma_of_get_irq(bus->dev, core, num);
return 0;
case BCMA_HOSTTYPE_SDIO:
return 0;
@ -239,18 +239,18 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
core->dev.release = bcma_release_core_dev;
core->dev.bus = &bcma_bus_type;
dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
core->dev.parent = bcma_bus_get_host_dev(bus);
if (core->dev.parent)
bcma_of_fill_device(core->dev.parent, core);
core->dev.parent = bus->dev;
if (bus->dev)
bcma_of_fill_device(bus->dev, core);
switch (bus->hosttype) {
case BCMA_HOSTTYPE_PCI:
core->dma_dev = &bus->host_pci->dev;
core->dma_dev = bus->dev;
core->irq = bus->host_pci->irq;
break;
case BCMA_HOSTTYPE_SOC:
if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) {
core->dma_dev = &bus->host_pdev->dev;
if (IS_ENABLED(CONFIG_OF) && bus->dev) {
core->dma_dev = bus->dev;
} else {
core->dev.dma_mask = &core->dev.coherent_dma_mask;
core->dma_dev = &core->dev;
@ -261,28 +261,6 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
}
}
struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
{
switch (bus->hosttype) {
case BCMA_HOSTTYPE_PCI:
if (bus->host_pci)
return &bus->host_pci->dev;
else
return NULL;
case BCMA_HOSTTYPE_SOC:
if (bus->host_pdev)
return &bus->host_pdev->dev;
else
return NULL;
case BCMA_HOSTTYPE_SDIO:
if (bus->host_sdio)
return &bus->host_sdio->dev;
else
return NULL;
}
return NULL;
}
void bcma_init_bus(struct bcma_bus *bus)
{
mutex_lock(&bcma_buses_mutex);
@ -402,7 +380,6 @@ int bcma_bus_register(struct bcma_bus *bus)
{
int err;
struct bcma_device *core;
struct device *dev;
/* Scan for devices (cores) */
err = bcma_bus_scan(bus);
@ -425,10 +402,8 @@ int bcma_bus_register(struct bcma_bus *bus)
bcma_core_pci_early_init(&bus->drv_pci[0]);
}
dev = bcma_bus_get_host_dev(bus);
if (dev) {
of_platform_default_populate(dev->of_node, NULL, dev);
}
if (bus->dev)
of_platform_default_populate(bus->dev->of_node, NULL, bus->dev);
/* Cores providing flash access go before SPROM init */
list_for_each_entry(core, &bus->cores, list) {

View File

@ -1986,7 +1986,7 @@ static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
/* no default handler to allow compiler to check that the
* enum is fully handled
*/
};
}
return "<unknown>";
}

View File

@ -1140,7 +1140,7 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
len -= ie_len;
data += ie_len;
};
}
ret = 0;
out:

View File

@ -668,14 +668,12 @@ static void b43_remove_dynamic_debug(struct b43_wldev *dev)
static void b43_add_dynamic_debug(struct b43_wldev *dev)
{
struct b43_dfsentry *e = dev->dfsentry;
struct dentry *d;
#define add_dyn_dbg(name, id, initstate) do { \
e->dyn_debug[id] = (initstate); \
d = debugfs_create_bool(name, 0600, e->subdir, \
e->dyn_debug_dentries[id] = \
debugfs_create_bool(name, 0600, e->subdir, \
&(e->dyn_debug[id])); \
if (!IS_ERR(d)) \
e->dyn_debug_dentries[id] = d; \
} while (0)
add_dyn_dbg("debug_xmitpower", B43_DBG_XMITPOWER, false);
@ -718,19 +716,6 @@ void b43_debugfs_add_device(struct b43_wldev *dev)
snprintf(devdir, sizeof(devdir), "%s", wiphy_name(dev->wl->hw->wiphy));
e->subdir = debugfs_create_dir(devdir, rootdir);
if (!e->subdir || IS_ERR(e->subdir)) {
if (e->subdir == ERR_PTR(-ENODEV)) {
b43dbg(dev->wl, "DebugFS (CONFIG_DEBUG_FS) not "
"enabled in kernel config\n");
} else {
b43err(dev->wl, "debugfs: cannot create %s directory\n",
devdir);
}
dev->dfsentry = NULL;
kfree(log->log);
kfree(e);
return;
}
e->mmio16read_next = 0xFFFF; /* invalid address */
e->mmio32read_next = 0xFFFF; /* invalid address */
@ -741,13 +726,10 @@ void b43_debugfs_add_device(struct b43_wldev *dev)
#define ADD_FILE(name, mode) \
do { \
struct dentry *d; \
d = debugfs_create_file(__stringify(name), \
e->file_##name.dentry = \
debugfs_create_file(__stringify(name), \
mode, e->subdir, dev, \
&fops_##name.fops); \
e->file_##name.dentry = NULL; \
if (!IS_ERR(d)) \
e->file_##name.dentry = d; \
} while (0)
@ -818,8 +800,6 @@ void b43_debugfs_log_txstat(struct b43_wldev *dev,
void b43_debugfs_init(void)
{
rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
if (IS_ERR(rootdir))
rootdir = NULL;
}
void b43_debugfs_exit(void)

View File

@ -361,14 +361,12 @@ static void b43legacy_remove_dynamic_debug(struct b43legacy_wldev *dev)
static void b43legacy_add_dynamic_debug(struct b43legacy_wldev *dev)
{
struct b43legacy_dfsentry *e = dev->dfsentry;
struct dentry *d;
#define add_dyn_dbg(name, id, initstate) do { \
e->dyn_debug[id] = (initstate); \
d = debugfs_create_bool(name, 0600, e->subdir, \
e->dyn_debug_dentries[id] = \
debugfs_create_bool(name, 0600, e->subdir, \
&(e->dyn_debug[id])); \
if (!IS_ERR(d)) \
e->dyn_debug_dentries[id] = d; \
} while (0)
add_dyn_dbg("debug_xmitpower", B43legacy_DBG_XMITPOWER, false);
@ -408,29 +406,14 @@ void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev)
snprintf(devdir, sizeof(devdir), "%s", wiphy_name(dev->wl->hw->wiphy));
e->subdir = debugfs_create_dir(devdir, rootdir);
if (!e->subdir || IS_ERR(e->subdir)) {
if (e->subdir == ERR_PTR(-ENODEV)) {
b43legacydbg(dev->wl, "DebugFS (CONFIG_DEBUG_FS) not "
"enabled in kernel config\n");
} else {
b43legacyerr(dev->wl, "debugfs: cannot create %s directory\n",
devdir);
}
dev->dfsentry = NULL;
kfree(log->log);
kfree(e);
return;
}
#define ADD_FILE(name, mode) \
do { \
struct dentry *d; \
d = debugfs_create_file(__stringify(name), \
e->file_##name.dentry = \
debugfs_create_file(__stringify(name), \
mode, e->subdir, dev, \
&fops_##name.fops); \
e->file_##name.dentry = NULL; \
if (!IS_ERR(d)) \
e->file_##name.dentry = d; \
} while (0)
@ -492,8 +475,6 @@ void b43legacy_debugfs_log_txstat(struct b43legacy_wldev *dev,
void b43legacy_debugfs_init(void)
{
rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
if (IS_ERR(rootdir))
rootdir = NULL;
}
void b43legacy_debugfs_exit(void)

View File

@ -16,8 +16,8 @@
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ccflags-y += \
-Idrivers/net/wireless/broadcom/brcm80211/brcmfmac \
-Idrivers/net/wireless/broadcom/brcm80211/include
-I $(srctree)/$(src) \
-I $(srctree)/$(src)/../include
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += \

View File

@ -315,7 +315,7 @@ static int brcmf_sdiod_skbuff_read(struct brcmf_sdio_dev *sdiodev,
/* bail out as things are really fishy here */
WARN(1, "invalid sdio function number: %d\n", func->num);
err = -ENOMEDIUM;
};
}
if (err == -ENOMEDIUM)
brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);

View File

@ -149,7 +149,7 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
return err;
}
err = request_firmware(&clm, clm_name, bus->dev);
err = firmware_request_nowarn(&clm, clm_name, bus->dev);
if (err) {
brcmf_info("no clm_blob available (err=%d), device may have limited channels available\n",
err);

View File

@ -43,6 +43,10 @@ static const struct brcmf_dmi_data meegopad_t08_data = {
BRCM_CC_43340_CHIP_ID, 2, "meegopad-t08"
};
static const struct brcmf_dmi_data pov_tab_p1006w_data = {
BRCM_CC_43340_CHIP_ID, 2, "pov-tab-p1006w-data"
};
static const struct dmi_system_id dmi_platform_data[] = {
{
/* Match for the GPDwin which unfortunately uses somewhat
@ -81,6 +85,17 @@ static const struct dmi_system_id dmi_platform_data[] = {
},
.driver_data = (void *)&meegopad_t08_data,
},
{
/* Point of View TAB-P1006W-232 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
/* Note 105b is Foxcon's USB/PCI vendor id */
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "105B"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"),
},
.driver_data = (void *)&pov_tab_p1006w_data,
},
{}
};

View File

@ -496,6 +496,11 @@ int brcmf_pno_stop_sched_scan(struct brcmf_if *ifp, u64 reqid)
brcmf_dbg(TRACE, "reqid=%llu\n", reqid);
pi = ifp_to_pno(ifp);
/* No PNO request */
if (!pi->n_reqs)
return 0;
err = brcmf_pno_remove_request(pi, reqid);
if (err)
return err;

View File

@ -1550,6 +1550,10 @@ void brcmf_usb_exit(void)
void brcmf_usb_register(void)
{
int ret;
brcmf_dbg(USB, "Enter\n");
usb_register(&brcmf_usbdrvr);
ret = usb_register(&brcmf_usbdrvr);
if (ret)
brcmf_err("usb_register failed %d\n", ret);
}

View File

@ -16,9 +16,9 @@
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ccflags-y := \
-Idrivers/net/wireless/broadcom/brcm80211/brcmsmac \
-Idrivers/net/wireless/broadcom/brcm80211/brcmsmac/phy \
-Idrivers/net/wireless/broadcom/brcm80211/include
-I $(srctree)/$(src) \
-I $(srctree)/$(src)/phy \
-I $(srctree)/$(src)/../include
brcmsmac-y := \
mac80211_if.o \

View File

@ -37,27 +37,18 @@ static struct dentry *root_folder;
void brcms_debugfs_init(void)
{
root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL);
if (IS_ERR(root_folder))
root_folder = NULL;
}
void brcms_debugfs_exit(void)
{
if (!root_folder)
return;
debugfs_remove_recursive(root_folder);
root_folder = NULL;
}
int brcms_debugfs_attach(struct brcms_pub *drvr)
void brcms_debugfs_attach(struct brcms_pub *drvr)
{
if (!root_folder)
return -ENODEV;
drvr->dbgfs_dir = debugfs_create_dir(
dev_name(&drvr->wlc->hw->d11core->dev), root_folder);
return PTR_ERR_OR_ZERO(drvr->dbgfs_dir);
}
void brcms_debugfs_detach(struct brcms_pub *drvr)
@ -195,7 +186,7 @@ static const struct file_operations brcms_debugfs_def_ops = {
.llseek = seq_lseek
};
static int
static void
brcms_debugfs_add_entry(struct brcms_pub *drvr, const char *fn,
int (*read_fn)(struct seq_file *seq, void *data))
{
@ -203,27 +194,18 @@ brcms_debugfs_add_entry(struct brcms_pub *drvr, const char *fn,
struct dentry *dentry = drvr->dbgfs_dir;
struct brcms_debugfs_entry *entry;
if (IS_ERR_OR_NULL(dentry))
return -ENOENT;
entry = devm_kzalloc(dev, sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
return;
entry->read = read_fn;
entry->drvr = drvr;
dentry = debugfs_create_file(fn, 0444, dentry, entry,
&brcms_debugfs_def_ops);
return PTR_ERR_OR_ZERO(dentry);
debugfs_create_file(fn, 0444, dentry, entry, &brcms_debugfs_def_ops);
}
void brcms_debugfs_create_files(struct brcms_pub *drvr)
{
if (IS_ERR_OR_NULL(drvr->dbgfs_dir))
return;
brcms_debugfs_add_entry(drvr, "hardware", brcms_debugfs_hardware_read);
brcms_debugfs_add_entry(drvr, "macstat", brcms_debugfs_macstat_read);
}

View File

@ -68,7 +68,7 @@ void __brcms_dbg(struct device *dev, u32 level, const char *func,
struct brcms_pub;
void brcms_debugfs_init(void);
void brcms_debugfs_exit(void);
int brcms_debugfs_attach(struct brcms_pub *drvr);
void brcms_debugfs_attach(struct brcms_pub *drvr);
void brcms_debugfs_detach(struct brcms_pub *drvr);
struct dentry *brcms_debugfs_get_devdir(struct brcms_pub *drvr);
void brcms_debugfs_create_files(struct brcms_pub *drvr);

View File

@ -1199,8 +1199,6 @@ wlc_lcnphy_rx_iq_est(struct brcms_phy *pi,
{
int wait_count = 0;
bool result = true;
u8 phybw40;
phybw40 = CHSPEC_IS40(pi->radio_chanspec);
mod_phy_reg(pi, 0x6da, (0x1 << 5), (1) << 5);
@ -3082,7 +3080,7 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
u8 bbmult;
struct phytbl_info tab;
s32 a1, b0, b1;
s32 tssi, pwr, maxtargetpwr, mintargetpwr;
s32 tssi, pwr, mintargetpwr;
bool suspend;
struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
@ -3119,7 +3117,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
b0 = pi->txpa_2g[0];
b1 = pi->txpa_2g[1];
a1 = pi->txpa_2g[2];
maxtargetpwr = wlc_lcnphy_tssi2dbm(10, a1, b0, b1);
mintargetpwr = wlc_lcnphy_tssi2dbm(125, a1, b0, b1);
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
@ -4212,7 +4209,7 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
s8 index;
struct phytbl_info tab;
s32 a1, b0, b1;
s32 tssi, pwr, maxtargetpwr, mintargetpwr;
s32 tssi, pwr, mintargetpwr;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
pi->phy_lastcal = pi->sh->now;
@ -4249,7 +4246,6 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
b0 = pi->txpa_2g[0];
b1 = pi->txpa_2g[1];
a1 = pi->txpa_2g[2];
maxtargetpwr = wlc_lcnphy_tssi2dbm(10, a1, b0, b1);
mintargetpwr = wlc_lcnphy_tssi2dbm(125, a1, b0, b1);
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
@ -4622,13 +4618,10 @@ static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
{
uint idx;
u8 phybw40;
struct phytbl_info tab;
const struct phytbl_info *tb;
u32 val;
phybw40 = CHSPEC_IS40(pi->radio_chanspec);
for (idx = 0; idx < dot11lcnphytbl_info_sz_rev0; idx++)
wlc_lcnphy_write_table(pi, &dot11lcnphytbl_info_rev0[idx]);
@ -4831,9 +4824,7 @@ static void wlc_lcnphy_baseband_init(struct brcms_phy *pi)
void wlc_phy_init_lcnphy(struct brcms_phy *pi)
{
u8 phybw40;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
phybw40 = CHSPEC_IS40(pi->radio_chanspec);
pi_lcn->lcnphy_cal_counter = 0;
pi_lcn->lcnphy_cal_temper = pi_lcn->lcnphy_rawtempsense;

View File

@ -15,9 +15,7 @@
# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ccflags-y := \
-Idrivers/net/wireless/broadcom/brcm80211/brcmutil \
-Idrivers/net/wireless/broadcom/brcm80211/include
ccflags-y := -I $(srctree)/$(src)/../include
obj-$(CONFIG_BRCMUTIL) += brcmutil.o
brcmutil-objs = utils.o d11.o

View File

@ -3756,10 +3756,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto out_remove_sysfs;
err = il_dbgfs_register(il, DRV_NAME);
if (err)
IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
err);
il_dbgfs_register(il, DRV_NAME);
/* Start monitoring the killswitch */
queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ);

View File

@ -4988,10 +4988,7 @@ il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
if (err)
goto out_unbind;
err = il_dbgfs_register(il, DRV_NAME);
if (err)
IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
err);
il_dbgfs_register(il, DRV_NAME);
err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
if (err) {

View File

@ -2974,13 +2974,11 @@ il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len)
#endif /* CONFIG_IWLEGACY_DEBUG */
#ifdef CONFIG_IWLEGACY_DEBUGFS
int il_dbgfs_register(struct il_priv *il, const char *name);
void il_dbgfs_register(struct il_priv *il, const char *name);
void il_dbgfs_unregister(struct il_priv *il);
#else
static inline int
il_dbgfs_register(struct il_priv *il, const char *name)
static inline void il_dbgfs_register(struct il_priv *il, const char *name)
{
return 0;
}
static inline void

View File

@ -128,23 +128,12 @@ EXPORT_SYMBOL(il_update_stats);
/* create and remove of files */
#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
if (!debugfs_create_file(#name, mode, parent, il, \
&il_dbgfs_##name##_ops)) \
goto err; \
debugfs_create_file(#name, mode, parent, il, \
&il_dbgfs_##name##_ops); \
} while (0)
#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
struct dentry *__tmp; \
__tmp = debugfs_create_bool(#name, 0600, parent, ptr); \
if (IS_ERR(__tmp) || !__tmp) \
goto err; \
} while (0)
#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
struct dentry *__tmp; \
__tmp = debugfs_create_x32(#name, 0600, parent, ptr); \
if (IS_ERR(__tmp) || !__tmp) \
goto err; \
debugfs_create_bool(#name, 0600, parent, ptr); \
} while (0)
/* file operation */
@ -1341,27 +1330,18 @@ DEBUGFS_WRITE_FILE_OPS(wd_timeout);
* Create the debugfs files and directories
*
*/
int
void
il_dbgfs_register(struct il_priv *il, const char *name)
{
struct dentry *phyd = il->hw->wiphy->debugfsdir;
struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
dir_drv = debugfs_create_dir(name, phyd);
if (!dir_drv)
return -ENOMEM;
il->debugfs_dir = dir_drv;
dir_data = debugfs_create_dir("data", dir_drv);
if (!dir_data)
goto err;
dir_rf = debugfs_create_dir("rf", dir_drv);
if (!dir_rf)
goto err;
dir_debug = debugfs_create_dir("debug", dir_drv);
if (!dir_debug)
goto err;
DEBUGFS_ADD_FILE(nvm, dir_data, 0400);
DEBUGFS_ADD_FILE(sram, dir_data, 0600);
@ -1399,12 +1379,6 @@ il_dbgfs_register(struct il_priv *il, const char *name)
DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
&il->disable_chain_noise_cal);
DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal);
return 0;
err:
IL_ERR("Can't create the debugfs directory\n");
il_dbgfs_unregister(il);
return -ENOMEM;
}
EXPORT_SYMBOL(il_dbgfs_register);

View File

@ -83,6 +83,7 @@
#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-"
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
@ -104,6 +105,8 @@
IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
#define IWL_CC_A_MODULE_FIRMWARE(api) \
IWL_CC_A_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@ -195,8 +198,8 @@ const struct iwl_cfg iwl22000_2ac_cfg_jf = {
IWL_DEVICE_22500,
};
const struct iwl_cfg iwl22000_2ax_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AX 22000",
const struct iwl_cfg iwl22560_2ax_cfg_hr = {
.name = "Intel(R) Wireless-AX 22560",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@ -207,6 +210,42 @@ const struct iwl_cfg iwl22000_2ax_cfg_hr = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl22260_2ax_cfg = {
.name = "Intel(R) Wireless-AX 22260",
.fw_name_pre = IWL_CC_A_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg killer1650x_2ax_cfg = {
.name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (22260NGW)",
.fw_name_pre = IWL_CC_A_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg killer1650w_2ax_cfg = {
.name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (22260D2W)",
.fw_name_pre = IWL_CC_A_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
/*
* All JF radio modules are part of the 9000 series, but the MAC part
* looks more like 22000. That's why this device is here, but called
@ -230,6 +269,12 @@ const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = {
IWL_DEVICE_22500,
};
const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
};
const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = {
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
@ -242,6 +287,30 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
IWL_DEVICE_22500,
};
const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
.name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
.name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl22000_2ax_cfg_jf = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
@ -324,3 +393,4 @@ MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));

View File

@ -73,21 +73,12 @@
#define IWL9000_SMEM_OFFSET 0x400000
#define IWL9000_SMEM_LEN 0x68000
#define IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
#define IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-"
#define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-"
#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
#define IWL9000A_MODULE_FIRMWARE(api) \
IWL9000A_FW_PRE __stringify(api) ".ucode"
#define IWL9000B_MODULE_FIRMWARE(api) \
IWL9000B_FW_PRE __stringify(api) ".ucode"
#define IWL9000RFB_MODULE_FIRMWARE(api) \
IWL9000RFB_FW_PRE __stringify(api) ".ucode"
#define IWL9260A_MODULE_FIRMWARE(api) \
IWL9260A_FW_PRE __stringify(api) ".ucode"
#define IWL9260B_MODULE_FIRMWARE(api) \
IWL9260B_FW_PRE __stringify(api) ".ucode"
#define IWL9000_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-"
#define IWL9260_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
#define IWL9000_MODULE_FIRMWARE(api) \
IWL9000_FW_PRE __stringify(api) ".ucode"
#define IWL9260_MODULE_FIRMWARE(api) \
IWL9260_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl9000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@ -162,44 +153,43 @@ static const struct iwl_tt_params iwl9000_tt_params = {
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
.fw_name_pre = IWL9260A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
.fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260",
.fw_name_pre = IWL9260A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
.fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9260_2ac_160_cfg = {
.name = "Intel(R) Wireless-AC 9260 160MHz",
.fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9260_killer_2ac_cfg = {
.name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)",
.fw_name_pre = IWL9260A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
.fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9270_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9270",
.fw_name_pre = IWL9260A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
.fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9460_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9460",
.fw_name_pre = IWL9260A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
.fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9460_2ac_cfg_soc = {
.name = "Intel(R) Dual Band Wireless AC 9460",
.fw_name_pre = IWL9000A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@ -207,9 +197,7 @@ const struct iwl_cfg iwl9460_2ac_cfg_soc = {
const struct iwl_cfg iwl9461_2ac_cfg_soc = {
.name = "Intel(R) Dual Band Wireless AC 9461",
.fw_name_pre = IWL9000A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@ -217,9 +205,7 @@ const struct iwl_cfg iwl9461_2ac_cfg_soc = {
const struct iwl_cfg iwl9462_2ac_cfg_soc = {
.name = "Intel(R) Dual Band Wireless AC 9462",
.fw_name_pre = IWL9000A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@ -227,16 +213,27 @@ const struct iwl_cfg iwl9462_2ac_cfg_soc = {
const struct iwl_cfg iwl9560_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9560",
.fw_name_pre = IWL9260A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
.fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9560_2ac_160_cfg = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9560_2ac_cfg_soc = {
.name = "Intel(R) Dual Band Wireless AC 9560",
.fw_name_pre = IWL9000A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
};
const struct iwl_cfg iwl9560_2ac_160_cfg_soc = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@ -244,9 +241,7 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = {
const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
.fw_name_pre = IWL9000A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@ -254,9 +249,7 @@ const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
.name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
.fw_name_pre = IWL9000A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@ -264,9 +257,7 @@ const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
.name = "Intel(R) Dual Band Wireless AC 9460",
.fw_name_pre = IWL9000A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@ -275,9 +266,7 @@ const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = {
.name = "Intel(R) Dual Band Wireless AC 9461",
.fw_name_pre = IWL9000A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@ -286,9 +275,7 @@ const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = {
const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = {
.name = "Intel(R) Dual Band Wireless AC 9462",
.fw_name_pre = IWL9000A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@ -297,9 +284,16 @@ const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = {
const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
.name = "Intel(R) Dual Band Wireless AC 9560",
.fw_name_pre = IWL9000A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
};
const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@ -308,9 +302,7 @@ const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
.fw_name_pre = IWL9000A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@ -319,17 +311,12 @@ const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = {
.name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
.fw_name_pre = IWL9000A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
};
MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));

View File

@ -11,4 +11,4 @@ iwldvm-objs += rxon.o devices.o
iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
ccflags-y += -I$(src)/../
ccflags-y += -I $(srctree)/$(src)/../

View File

@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@ -710,24 +711,6 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return ret;
}
static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
{
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
return false;
return true;
}
static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
{
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
return false;
if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
return true;
/* disabled by default */
return false;
}
static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
@ -752,7 +735,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
if (!iwl_enable_rx_ampdu(priv->cfg))
if (!iwl_enable_rx_ampdu())
break;
IWL_DEBUG_HT(priv, "start Rx\n");
ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
@ -764,7 +747,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_START:
if (!priv->trans->ops->txq_enable)
break;
if (!iwl_enable_tx_ampdu(priv->cfg))
if (!iwl_enable_tx_ampdu())
break;
IWL_DEBUG_HT(priv, "start Tx\n");
ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);

View File

@ -1054,7 +1054,7 @@ static void iwl_bg_restart(struct work_struct *data)
ieee80211_restart_hw(priv->hw);
else
IWL_ERR(priv,
"Cannot request restart before registrating with mac80211\n");
"Cannot request restart before registering with mac80211\n");
} else {
WARN_ON(1);
}

View File

@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@ -325,9 +326,9 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
iwl_prepare_ct_kill_task(priv);
tt->state = old_state;
}
} else if (old_state == IWL_TI_CT_KILL &&
tt->state != IWL_TI_CT_KILL)
} else if (old_state == IWL_TI_CT_KILL) {
iwl_perform_ct_kill_task(priv, false);
}
IWL_DEBUG_TEMP(priv, "Temperature state changed %u\n",
tt->state);
IWL_DEBUG_TEMP(priv, "Power Index change to %u\n",

View File

@ -77,7 +77,8 @@
* @DATA_PATH_GROUP: data path group, uses command IDs from
* &enum iwl_data_path_subcmd_ids
* @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids
* @TOF_GROUP: TOF group, uses command IDs from &enum iwl_tof_subcmd_ids
* @LOCATION_GROUP: location group, uses command IDs from
* &enum iwl_location_subcmd_ids
* @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from
* &enum iwl_prot_offload_subcmd_ids
* @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from
@ -92,7 +93,7 @@ enum iwl_mvm_command_groups {
PHY_OPS_GROUP = 0x4,
DATA_PATH_GROUP = 0x5,
NAN_GROUP = 0x7,
TOF_GROUP = 0x8,
LOCATION_GROUP = 0x8,
PROT_OFFLOAD_GROUP = 0xb,
REGULATORY_AND_NVM_GROUP = 0xc,
DEBUG_GROUP = 0xf,
@ -352,16 +353,6 @@ enum iwl_legacy_cmds {
*/
PHY_DB_CMD = 0x6c,
/**
* @TOF_CMD: &struct iwl_tof_config_cmd
*/
TOF_CMD = 0x10,
/**
* @TOF_NOTIFICATION: &struct iwl_tof_gen_resp_cmd
*/
TOF_NOTIFICATION = 0x11,
/**
* @POWER_TABLE_CMD: &struct iwl_device_power_cmd
*/
@ -415,7 +406,11 @@ enum iwl_legacy_cmds {
TX_ANT_CONFIGURATION_CMD = 0x98,
/**
* @STATISTICS_CMD: &struct iwl_statistics_cmd
* @STATISTICS_CMD:
* one of &struct iwl_statistics_cmd,
* &struct iwl_notif_statistics_v11,
* &struct iwl_notif_statistics_v10,
* &struct iwl_notif_statistics
*/
STATISTICS_CMD = 0x9c,
@ -423,7 +418,7 @@ enum iwl_legacy_cmds {
* @STATISTICS_NOTIFICATION:
* one of &struct iwl_notif_statistics_v10,
* &struct iwl_notif_statistics_v11,
* &struct iwl_notif_statistics_cdb
* &struct iwl_notif_statistics
*/
STATISTICS_NOTIFICATION = 0x9d,

View File

@ -224,8 +224,18 @@ struct iwl_wowlan_pattern {
#define IWL_WOWLAN_MAX_PATTERNS 20
/**
* struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns
*/
struct iwl_wowlan_patterns_cmd {
/**
* @n_patterns: number of patterns
*/
__le32 n_patterns;
/**
* @patterns: the patterns, array length in @n_patterns
*/
struct iwl_wowlan_pattern patterns[];
} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */

View File

@ -104,6 +104,12 @@ enum iwl_data_path_subcmd_ids {
*/
HE_AIR_SNIFFER_CONFIG_CMD = 0x13,
/**
* @CHEST_COLLECTOR_FILTER_CONFIG_CMD: Configure the CSI
* matrix collection, uses &struct iwl_channel_estimation_cfg
*/
CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14,
/**
* @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data
*/
@ -156,4 +162,53 @@ struct iwl_mu_group_mgmt_notif {
__le32 user_position[4];
} __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */
enum iwl_channel_estimation_flags {
IWL_CHANNEL_ESTIMATION_ENABLE = BIT(0),
IWL_CHANNEL_ESTIMATION_TIMER = BIT(1),
IWL_CHANNEL_ESTIMATION_COUNTER = BIT(2),
};
/**
* struct iwl_channel_estimation_cfg - channel estimation reporting config
*/
struct iwl_channel_estimation_cfg {
/**
* @flags: flags, see &enum iwl_channel_estimation_flags
*/
__le32 flags;
/**
* @timer: if enabled via flags, automatically disable after this many
* microseconds
*/
__le32 timer;
/**
* @count: if enabled via flags, automatically disable after this many
* frames with channel estimation matrix were captured
*/
__le32 count;
/**
* @rate_n_flags_mask: only try to record the channel estimation matrix
* if the rate_n_flags value for the received frame (let's call
* that rx_rnf) matches the mask/value given here like this:
* (rx_rnf & rate_n_flags_mask) == rate_n_flags_val.
*/
__le32 rate_n_flags_mask;
/**
* @rate_n_flags_val: see @rate_n_flags_mask
*/
__le32 rate_n_flags_val;
/**
* @reserved: reserved (for alignment)
*/
__le32 reserved;
/**
* @frame_types: bitmap of frame types to capture, the received frame's
* subtype|type takes 6 bits in the frame and the corresponding bit
* in this field must be set to 1 to capture channel estimation for
* that frame type. Set to all-ones to enable capturing for all
* frame types.
*/
__le64 frame_types;
} __packed; /* CHEST_COLLECTOR_FILTER_CMD_API_S_VER_1 */
#endif /* __iwl_fw_api_datapath_h__ */

View File

@ -335,29 +335,11 @@ struct iwl_dbg_mem_access_rsp {
__le32 data[];
} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */
#define CONT_REC_COMMAND_SIZE 80
#define ENABLE_CONT_RECORDING 0x15
#define DISABLE_CONT_RECORDING 0x16
#define LDBG_CFG_COMMAND_SIZE 80
#define BUFFER_ALLOCATION 0x27
#define START_DEBUG_RECORDING 0x29
#define STOP_DEBUG_RECORDING 0x2A
/*
* struct iwl_continuous_record_mode - recording mode
*/
struct iwl_continuous_record_mode {
__le16 enable_recording;
} __packed;
/*
* struct iwl_continuous_record_cmd - enable/disable continuous recording
*/
struct iwl_continuous_record_cmd {
struct iwl_continuous_record_mode record_mode;
u8 pad[CONT_REC_COMMAND_SIZE -
sizeof(struct iwl_continuous_record_mode)];
} __packed;
/* maximum fragments to be allocated per target of allocationId */
#define IWL_BUFFER_LOCATION_MAX_FRAGS 2
@ -385,4 +367,17 @@ struct iwl_buffer_allocation_cmd {
struct iwl_fragment_data fragments[IWL_BUFFER_LOCATION_MAX_FRAGS];
} __packed; /* BUFFER_ALLOCATION_CMD_API_S_VER_1 */
/**
* struct iwl_ldbg_config_cmd - LDBG config command
* @type: configuration type
* @pad: reserved space for type-dependent data
*/
struct iwl_ldbg_config_cmd {
__le32 type;
union {
u8 pad[LDBG_CFG_COMMAND_SIZE - sizeof(__le32)];
struct iwl_buffer_allocation_cmd buffer_allocation;
}; /* LDBG_CFG_BODY_API_U_VER_2 (partially) */
} __packed; /* LDBG_CFG_CMD_API_S_VER_2 */
#endif /* __iwl_fw_api_debug_h__ */

View File

@ -0,0 +1,711 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_location_h__
#define __iwl_fw_api_location_h__
/**
* enum iwl_location_subcmd_ids - location group command IDs
*/
enum iwl_location_subcmd_ids {
/**
* @TOF_RANGE_REQ_CMD: TOF ranging request,
* uses &struct iwl_tof_range_req_cmd
*/
TOF_RANGE_REQ_CMD = 0x0,
/**
* @TOF_CONFIG_CMD: TOF configuration, uses &struct iwl_tof_config_cmd
*/
TOF_CONFIG_CMD = 0x1,
/**
* @TOF_RANGE_ABORT_CMD: abort ongoing ranging, uses
* &struct iwl_tof_range_abort_cmd
*/
TOF_RANGE_ABORT_CMD = 0x2,
/**
* @TOF_RANGE_REQ_EXT_CMD: TOF extended ranging config,
* uses &struct iwl_tof_range_request_ext_cmd
*/
TOF_RANGE_REQ_EXT_CMD = 0x3,
/**
* @TOF_RESPONDER_CONFIG_CMD: FTM responder configuration,
* uses &struct iwl_tof_responder_config_cmd
*/
TOF_RESPONDER_CONFIG_CMD = 0x4,
/**
* @TOF_RESPONDER_DYN_CONFIG_CMD: FTM dynamic configuration,
* uses &struct iwl_tof_responder_dyn_config_cmd
*/
TOF_RESPONDER_DYN_CONFIG_CMD = 0x5,
/**
* @CSI_HEADER_NOTIFICATION: CSI header
*/
CSI_HEADER_NOTIFICATION = 0xFA,
/**
* @CSI_CHUNKS_NOTIFICATION: CSI chunk,
* uses &struct iwl_csi_chunk_notification
*/
CSI_CHUNKS_NOTIFICATION = 0xFB,
/**
* @TOF_LC_NOTIF: used for LCI/civic location, contains just
* the action frame
*/
TOF_LC_NOTIF = 0xFC,
/**
* @TOF_RESPONDER_STATS: FTM responder statistics notification,
* uses &struct iwl_ftm_responder_stats
*/
TOF_RESPONDER_STATS = 0xFD,
/**
* @TOF_MCSI_DEBUG_NOTIF: MCSI debug notification, uses
* &struct iwl_tof_mcsi_notif
*/
TOF_MCSI_DEBUG_NOTIF = 0xFE,
/**
* @TOF_RANGE_RESPONSE_NOTIF: ranging response, using
* &struct iwl_tof_range_rsp_ntfy
*/
TOF_RANGE_RESPONSE_NOTIF = 0xFF,
};
/**
* struct iwl_tof_config_cmd - ToF configuration
* @tof_disabled: indicates if ToF is disabled (or not)
* @one_sided_disabled: indicates if one-sided is disabled (or not)
* @is_debug_mode: indiciates if debug mode is active
* @is_buf_required: indicates if channel estimation buffer is required
*/
struct iwl_tof_config_cmd {
u8 tof_disabled;
u8 one_sided_disabled;
u8 is_debug_mode;
u8 is_buf_required;
} __packed;
/**
* enum iwl_tof_bandwidth - values for iwl_tof_range_req_ap_entry.bandwidth
* @IWL_TOF_BW_20_LEGACY: 20 MHz non-HT
* @IWL_TOF_BW_20_HT: 20 MHz HT
* @IWL_TOF_BW_40: 40 MHz
* @IWL_TOF_BW_80: 80 MHz
* @IWL_TOF_BW_160: 160 MHz
*/
enum iwl_tof_bandwidth {
IWL_TOF_BW_20_LEGACY,
IWL_TOF_BW_20_HT,
IWL_TOF_BW_40,
IWL_TOF_BW_80,
IWL_TOF_BW_160,
}; /* LOCAT_BW_TYPE_E */
/*
* enum iwl_tof_algo_type - Algorithym type for range measurement request
*/
enum iwl_tof_algo_type {
IWL_TOF_ALGO_TYPE_MAX_LIKE = 0,
IWL_TOF_ALGO_TYPE_LINEAR_REG = 1,
IWL_TOF_ALGO_TYPE_FFT = 2,
/* Keep last */
IWL_TOF_ALGO_TYPE_INVALID,
}; /* ALGO_TYPE_E */
/*
* enum iwl_tof_mcsi_ntfy - Enable/Disable MCSI notifications
*/
enum iwl_tof_mcsi_enable {
IWL_TOF_MCSI_DISABLED = 0,
IWL_TOF_MCSI_ENABLED = 1,
}; /* MCSI_ENABLE_E */
/**
* enum iwl_tof_responder_cmd_valid_field - valid fields in the responder cfg
* @IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO: channel info is valid
* @IWL_TOF_RESPONDER_CMD_VALID_TOA_OFFSET: ToA offset is valid
* @IWL_TOF_RESPONDER_CMD_VALID_COMMON_CALIB: common calibration mode is valid
* @IWL_TOF_RESPONDER_CMD_VALID_SPECIFIC_CALIB: spefici calibration mode is
* valid
* @IWL_TOF_RESPONDER_CMD_VALID_BSSID: BSSID is valid
* @IWL_TOF_RESPONDER_CMD_VALID_TX_ANT: TX antenna is valid
* @IWL_TOF_RESPONDER_CMD_VALID_ALGO_TYPE: algorithm type is valid
* @IWL_TOF_RESPONDER_CMD_VALID_NON_ASAP_SUPPORT: non-ASAP support is valid
* @IWL_TOF_RESPONDER_CMD_VALID_STATISTICS_REPORT_SUPPORT: statistics report
* support is valid
* @IWL_TOF_RESPONDER_CMD_VALID_MCSI_NOTIF_SUPPORT: MCSI notification support
* is valid
* @IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT: fast algorithm support
* is valid
* @IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL: retry on algorithm failure
* is valid
* @IWL_TOF_RESPONDER_CMD_VALID_STA_ID: station ID is valid
*/
enum iwl_tof_responder_cmd_valid_field {
IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO = BIT(0),
IWL_TOF_RESPONDER_CMD_VALID_TOA_OFFSET = BIT(1),
IWL_TOF_RESPONDER_CMD_VALID_COMMON_CALIB = BIT(2),
IWL_TOF_RESPONDER_CMD_VALID_SPECIFIC_CALIB = BIT(3),
IWL_TOF_RESPONDER_CMD_VALID_BSSID = BIT(4),
IWL_TOF_RESPONDER_CMD_VALID_TX_ANT = BIT(5),
IWL_TOF_RESPONDER_CMD_VALID_ALGO_TYPE = BIT(6),
IWL_TOF_RESPONDER_CMD_VALID_NON_ASAP_SUPPORT = BIT(7),
IWL_TOF_RESPONDER_CMD_VALID_STATISTICS_REPORT_SUPPORT = BIT(8),
IWL_TOF_RESPONDER_CMD_VALID_MCSI_NOTIF_SUPPORT = BIT(9),
IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT = BIT(10),
IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL = BIT(11),
IWL_TOF_RESPONDER_CMD_VALID_STA_ID = BIT(12),
};
/**
* enum iwl_tof_responder_cfg_flags - responder configuration flags
* @IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT: non-ASAP support
* @IWL_TOF_RESPONDER_FLAGS_REPORT_STATISTICS: report statistics
* @IWL_TOF_RESPONDER_FLAGS_REPORT_MCSI: report MCSI
* @IWL_TOF_RESPONDER_FLAGS_ALGO_TYPE: algorithm type
* @IWL_TOF_RESPONDER_FLAGS_TOA_OFFSET_MODE: ToA offset mode
* @IWL_TOF_RESPONDER_FLAGS_COMMON_CALIB_MODE: common calibration mode
* @IWL_TOF_RESPONDER_FLAGS_SPECIFIC_CALIB_MODE: specific calibration mode
* @IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT: fast algorithm support
* @IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL: retry on algorithm fail
* @IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT: TX antenna mask
*/
enum iwl_tof_responder_cfg_flags {
IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT = BIT(0),
IWL_TOF_RESPONDER_FLAGS_REPORT_STATISTICS = BIT(1),
IWL_TOF_RESPONDER_FLAGS_REPORT_MCSI = BIT(2),
IWL_TOF_RESPONDER_FLAGS_ALGO_TYPE = BIT(3) | BIT(4) | BIT(5),
IWL_TOF_RESPONDER_FLAGS_TOA_OFFSET_MODE = BIT(6),
IWL_TOF_RESPONDER_FLAGS_COMMON_CALIB_MODE = BIT(7),
IWL_TOF_RESPONDER_FLAGS_SPECIFIC_CALIB_MODE = BIT(8),
IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT = BIT(9),
IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL = BIT(10),
IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT = RATE_MCS_ANT_ABC_MSK,
};
/**
* struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
* @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
* @responder_cfg_flags: &iwl_tof_responder_cfg_flags
* @bandwidth: current AP Bandwidth: &enum iwl_tof_bandwidth
* @rate: current AP rate
* @channel_num: current AP Channel
* @ctrl_ch_position: coding of the control channel position relative to
* the center frequency, see iwl_mvm_get_ctrl_pos()
* @sta_id: index of the AP STA when in AP mode
* @reserved1: reserved
* @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug
* purposes, simulating station movement by adding various values
* to this field
* @common_calib: XVT: common calibration value
* @specific_calib: XVT: specific calibration value
* @bssid: Current AP BSSID
* @reserved2: reserved
*/
struct iwl_tof_responder_config_cmd {
__le32 cmd_valid_fields;
__le32 responder_cfg_flags;
u8 bandwidth;
u8 rate;
u8 channel_num;
u8 ctrl_ch_position;
u8 sta_id;
u8 reserved1;
__le16 toa_offset;
__le16 common_calib;
__le16 specific_calib;
u8 bssid[ETH_ALEN];
__le16 reserved2;
} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */
#define IWL_LCI_CIVIC_IE_MAX_SIZE 400
/**
* struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings
* @lci_len: The length of the 1st (LCI) part in the @lci_civic buffer
* @civic_len: The length of the 2nd (CIVIC) part in the @lci_civic buffer
* @lci_civic: The LCI/CIVIC buffer. LCI data (if exists) comes first, then, if
* needed, 0-padding such that the next part is dword-aligned, then CIVIC
* data (if exists) follows, and then 0-padding again to complete a
* 4-multiple long buffer.
*/
struct iwl_tof_responder_dyn_config_cmd {
__le32 lci_len;
__le32 civic_len;
u8 lci_civic[];
} __packed; /* TOF_RESPONDER_DYN_CONFIG_CMD_API_S_VER_2 */
/**
* struct iwl_tof_range_request_ext_cmd - extended range req for WLS
* @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
* @reserved: reserved
* @min_delta_ftm: Minimal time between two consecutive measurements,
* in units of 100us. 0 means no preference by station
* @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended
* value be sent to the AP
* @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended
* value to be sent to the AP
* @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended
* value to be sent to the AP
*/
struct iwl_tof_range_req_ext_cmd {
__le16 tsf_timer_offset_msec;
__le16 reserved;
u8 min_delta_ftm;
u8 ftm_format_and_bw20M;
u8 ftm_format_and_bw40M;
u8 ftm_format_and_bw80M;
} __packed;
/**
* enum iwl_tof_location_query - values for query bitmap
* @IWL_TOF_LOC_LCI: query LCI
* @IWL_TOF_LOC_CIVIC: query civic
*/
enum iwl_tof_location_query {
IWL_TOF_LOC_LCI = 0x01,
IWL_TOF_LOC_CIVIC = 0x02,
};
/**
* struct iwl_tof_range_req_ap_entry - AP configuration parameters
* @channel_num: Current AP Channel
* @bandwidth: Current AP Bandwidth. One of iwl_tof_bandwidth.
* @tsf_delta_direction: TSF relatively to the subject AP
* @ctrl_ch_position: Coding of the control channel position relative to the
* center frequency, see iwl_mvm_get_ctrl_pos().
* @bssid: AP's BSSID
* @measure_type: Measurement type: 0 - two sided, 1 - One sided
* @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the
* number of measurement iterations (min 2^0 = 1, max 2^14)
* @burst_period: Recommended value to be sent to the AP. Measurement
* periodicity In units of 100ms. ignored if num_of_bursts = 0
* @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31);
* 1-sided: how many rts/cts pairs should be used per burst.
* @retries_per_sample: Max number of retries that the LMAC should send
* in case of no replies by the AP.
* @tsf_delta: TSF Delta in units of microseconds.
* The difference between the AP TSF and the device local clock.
* @location_req: Location Request Bit[0] LCI should be sent in the FTMR;
* Bit[1] Civic should be sent in the FTMR
* @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided)
* @enable_dyn_ack: Enable Dynamic ACK BW.
* 0: Initiator interact with regular AP;
* 1: Initiator interact with Responder machine: need to send the
* Initiator Acks with HT 40MHz / 80MHz, since the Responder should
* use it for its ch est measurement (this flag will be set when we
* configure the opposite machine to be Responder).
* @rssi: Last received value
* legal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
* @algo_type: &enum iwl_tof_algo_type
* @notify_mcsi: &enum iwl_tof_mcsi_ntfy.
* @reserved: For alignment and future use
*/
struct iwl_tof_range_req_ap_entry {
u8 channel_num;
u8 bandwidth;
u8 tsf_delta_direction;
u8 ctrl_ch_position;
u8 bssid[ETH_ALEN];
u8 measure_type;
u8 num_of_bursts;
__le16 burst_period;
u8 samples_per_burst;
u8 retries_per_sample;
__le32 tsf_delta;
u8 location_req;
u8 asap_mode;
u8 enable_dyn_ack;
s8 rssi;
u8 algo_type;
u8 notify_mcsi;
__le16 reserved;
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_3 */
/**
* enum iwl_tof_response_mode
* @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as
* possible (not supported for this release)
* @IWL_MVM_TOF_RESPONSE_TIMEOUT: report all AP measurements as a batch upon
* timeout expiration
* @IWL_MVM_TOF_RESPONSE_COMPLETE: report all AP measurements as a batch at the
* earlier of: measurements completion / timeout
* expiration.
*/
enum iwl_tof_response_mode {
IWL_MVM_TOF_RESPONSE_ASAP,
IWL_MVM_TOF_RESPONSE_TIMEOUT,
IWL_MVM_TOF_RESPONSE_COMPLETE,
};
/**
* enum iwl_tof_initiator_flags
*
* @IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED: disable fast algo, meaning run
* the algo on ant A+B, instead of only one of them.
* @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_A: open RX antenna A for FTMs RX
* @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_B: open RX antenna B for FTMs RX
* @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_C: open RX antenna C for FTMs RX
* @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_A: use antenna A fo TX ACKs during FTM
* @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_B: use antenna B fo TX ACKs during FTM
* @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_C: use antenna C fo TX ACKs during FTM
* @IWL_TOF_INITIATOR_FLAGS_MINDELTA_NO_PREF: no preference for minDeltaFTM
*/
enum iwl_tof_initiator_flags {
IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED = BIT(0),
IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_A = BIT(1),
IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_B = BIT(2),
IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_C = BIT(3),
IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_A = BIT(4),
IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_B = BIT(5),
IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_C = BIT(6),
IWL_TOF_INITIATOR_FLAGS_MINDELTA_NO_PREF = BIT(7),
}; /* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */
#define IWL_MVM_TOF_MAX_APS 5
#define IWL_MVM_TOF_MAX_TWO_SIDED_APS 5
/**
* struct iwl_tof_range_req_cmd - start measurement cmd
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
* @initiator: 0- NW initiated, 1 - Client Initiated
* @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided,
* '1' - run ML-Algo for ToF only
* @req_timeout: Requested timeout of the response in units of 100ms.
* This is equivalent to the session time configured to the
* LMAC in Initiator Request
* @report_policy: Supported partially for this release: For current release -
* the range report will be uploaded as a batch when ready or
* when the session is done (successfully / partially).
* one of iwl_tof_response_mode.
* @reserved0: reserved
* @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
* @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
* '1' Use MAC Address randomization according to the below
* @range_req_bssid: ranging request BSSID
* @macaddr_template: MAC address template to use for non-randomized bits
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
* @ftm_rx_chains: Rx chain to open to receive Responder's FTMs (XVT)
* @ftm_tx_chains: Tx chain to send the ack to the Responder FTM (XVT)
* @common_calib: The common calib value to inject to this measurement calc
* @specific_calib: The specific calib value to inject to this measurement calc
* @ap: per-AP request data
*/
struct iwl_tof_range_req_cmd {
__le32 initiator_flags;
u8 request_id;
u8 initiator;
u8 one_sided_los_disable;
u8 req_timeout;
u8 report_policy;
u8 reserved0;
u8 num_of_ap;
u8 macaddr_random;
u8 range_req_bssid[ETH_ALEN];
u8 macaddr_template[ETH_ALEN];
u8 macaddr_mask[ETH_ALEN];
u8 ftm_rx_chains;
u8 ftm_tx_chains;
__le16 common_calib;
__le16 specific_calib;
struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
} __packed;
/* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */
/*
* enum iwl_tof_range_request_status - status of the sent request
* @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the
* request
* @IWL_TOF_RANGE_REQUEST_STATUS_BUSY - FW is busy with a previous request, the
* sent request will not be handled
*/
enum iwl_tof_range_request_status {
IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS,
IWL_TOF_RANGE_REQUEST_STATUS_BUSY,
};
/**
* enum iwl_tof_entry_status
*
* @IWL_TOF_ENTRY_SUCCESS: successful measurement.
* @IWL_TOF_ENTRY_GENERAL_FAILURE: General failure.
* @IWL_TOF_ENTRY_NO_RESPONSE: Responder didn't reply to the request.
* @IWL_TOF_ENTRY_REQUEST_REJECTED: Responder rejected the request.
* @IWL_TOF_ENTRY_NOT_SCHEDULED: Time event was scheduled but not called yet.
* @IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: Time event triggered but no
* measurement was completed.
* @IWL_TOF_ENTRY_TARGET_DIFF_CH_CANNOT_CHANGE: No range due inability to switch
* from the primary channel.
* @IWL_TOF_ENTRY_RANGE_NOT_SUPPORTED: Device doesn't support FTM.
* @IWL_TOF_ENTRY_REQUEST_ABORT_UNKNOWN_REASON: Request aborted due to unknown
* reason.
* @IWL_TOF_ENTRY_LOCATION_INVALID_T1_T4_TIME_STAMP: Failure due to invalid
* T1/T4.
* @IWL_TOF_ENTRY_11MC_PROTOCOL_FAILURE: Failure due to invalid FTM frame
* structure.
* @IWL_TOF_ENTRY_REQUEST_CANNOT_SCHED: Request cannot be scheduled.
* @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE: Responder cannot serve the
* initiator for some period, period supplied in @refusal_period.
* @IWL_TOF_ENTRY_BAD_REQUEST_ARGS: Bad request arguments.
* @IWL_TOF_ENTRY_WIFI_NOT_ENABLED: Wifi not enabled.
* @IWL_TOF_ENTRY_RESPONDER_OVERRIDE_PARAMS: Responder override the original
* parameters within the current session.
*/
enum iwl_tof_entry_status {
IWL_TOF_ENTRY_SUCCESS = 0,
IWL_TOF_ENTRY_GENERAL_FAILURE = 1,
IWL_TOF_ENTRY_NO_RESPONSE = 2,
IWL_TOF_ENTRY_REQUEST_REJECTED = 3,
IWL_TOF_ENTRY_NOT_SCHEDULED = 4,
IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT = 5,
IWL_TOF_ENTRY_TARGET_DIFF_CH_CANNOT_CHANGE = 6,
IWL_TOF_ENTRY_RANGE_NOT_SUPPORTED = 7,
IWL_TOF_ENTRY_REQUEST_ABORT_UNKNOWN_REASON = 8,
IWL_TOF_ENTRY_LOCATION_INVALID_T1_T4_TIME_STAMP = 9,
IWL_TOF_ENTRY_11MC_PROTOCOL_FAILURE = 10,
IWL_TOF_ENTRY_REQUEST_CANNOT_SCHED = 11,
IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE = 12,
IWL_TOF_ENTRY_BAD_REQUEST_ARGS = 13,
IWL_TOF_ENTRY_WIFI_NOT_ENABLED = 14,
IWL_TOF_ENTRY_RESPONDER_OVERRIDE_PARAMS = 15,
}; /* LOCATION_RANGE_RSP_AP_ENTRY_NTFY_API_S_VER_2 */
/**
* struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
* @bssid: BSSID of the AP
* @measure_status: current APs measurement status, one of
* &enum iwl_tof_entry_status.
* @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
* @rtt: The Round Trip Time that took for the last measurement for
* current AP [pSec]
* @rtt_variance: The Variance of the RTT values measured for current AP
* @rtt_spread: The Difference between the maximum and the minimum RTT
* values measured for current AP in the current session [pSec]
* @rssi: RSSI as uploaded in the Channel Estimation notification
* @rssi_spread: The Difference between the maximum and the minimum RSSI values
* measured for current AP in the current session
* @reserved: reserved
* @refusal_period: refusal period in case of
* @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec]
* @range: Measured range [cm]
* @range_variance: Measured range variance [cm]
* @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
* uploaded by the LMAC
* @t2t3_initiator: as calculated from the algo in the initiator
* @t1t4_responder: as calculated from the algo in the responder
* @common_calib: Calib val that was used in for this AP measurement
* @specific_calib: val that was used in for this AP measurement
* @papd_calib_output: The result of the tof papd calibration that was injected
* into the algorithm.
*/
struct iwl_tof_range_rsp_ap_entry_ntfy {
u8 bssid[ETH_ALEN];
u8 measure_status;
u8 measure_bw;
__le32 rtt;
__le32 rtt_variance;
__le32 rtt_spread;
s8 rssi;
u8 rssi_spread;
u8 reserved;
u8 refusal_period;
__le32 range;
__le32 range_variance;
__le32 timestamp;
__le32 t2t3_initiator;
__le32 t1t4_responder;
__le16 common_calib;
__le16 specific_calib;
__le32 papd_calib_output;
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_3 */
/**
* enum iwl_tof_response_status - tof response status
*
* @IWL_TOF_RESPONSE_SUCCESS: successful range.
* @IWL_TOF_RESPONSE_TIMEOUT: request aborted due to timeout expiration.
* partial result of ranges done so far is included in the response.
* @IWL_TOF_RESPONSE_ABORTED: Measurement aborted by command.
* @IWL_TOF_RESPONSE_FAILED: Measurement request command failed.
*/
enum iwl_tof_response_status {
IWL_TOF_RESPONSE_SUCCESS = 0,
IWL_TOF_RESPONSE_TIMEOUT = 1,
IWL_TOF_RESPONSE_ABORTED = 4,
IWL_TOF_RESPONSE_FAILED = 5,
}; /* LOCATION_RNG_RSP_STATUS */
/**
* struct iwl_tof_range_rsp_ntfy - ranging response notification
* @request_id: A Token ID of the corresponding Range request
* @request_status: status of current measurement session, one of
* &enum iwl_tof_response_status.
* @last_in_batch: reprot policy (when not all responses are uploaded at once)
* @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
* @ap: per-AP data
*/
struct iwl_tof_range_rsp_ntfy {
u8 request_id;
u8 request_status;
u8 last_in_batch;
u8 num_of_aps;
struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
} __packed;
#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
/**
* struct iwl_tof_mcsi_notif - used for debug
* @token: token ID for the current session
* @role: '0' - initiator, '1' - responder
* @reserved: reserved
* @initiator_bssid: initiator machine
* @responder_bssid: responder machine
* @mcsi_buffer: debug data
*/
struct iwl_tof_mcsi_notif {
u8 token;
u8 role;
__le16 reserved;
u8 initiator_bssid[ETH_ALEN];
u8 responder_bssid[ETH_ALEN];
u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
} __packed;
/**
* struct iwl_tof_range_abort_cmd
* @request_id: corresponds to a range request
* @reserved: reserved
*/
struct iwl_tof_range_abort_cmd {
u8 request_id;
u8 reserved[3];
} __packed;
enum ftm_responder_stats_flags {
FTM_RESP_STAT_NON_ASAP_STARTED = BIT(0),
FTM_RESP_STAT_NON_ASAP_IN_WIN = BIT(1),
FTM_RESP_STAT_NON_ASAP_OUT_WIN = BIT(2),
FTM_RESP_STAT_TRIGGER_DUP = BIT(3),
FTM_RESP_STAT_DUP = BIT(4),
FTM_RESP_STAT_DUP_IN_WIN = BIT(5),
FTM_RESP_STAT_DUP_OUT_WIN = BIT(6),
FTM_RESP_STAT_SCHED_SUCCESS = BIT(7),
FTM_RESP_STAT_ASAP_REQ = BIT(8),
FTM_RESP_STAT_NON_ASAP_REQ = BIT(9),
FTM_RESP_STAT_ASAP_RESP = BIT(10),
FTM_RESP_STAT_NON_ASAP_RESP = BIT(11),
FTM_RESP_STAT_FAIL_INITIATOR_INACTIVE = BIT(12),
FTM_RESP_STAT_FAIL_INITIATOR_OUT_WIN = BIT(13),
FTM_RESP_STAT_FAIL_INITIATOR_RETRY_LIM = BIT(14),
FTM_RESP_STAT_FAIL_NEXT_SERVED = BIT(15),
FTM_RESP_STAT_FAIL_TRIGGER_ERR = BIT(16),
FTM_RESP_STAT_FAIL_GC = BIT(17),
FTM_RESP_STAT_SUCCESS = BIT(18),
FTM_RESP_STAT_INTEL_IE = BIT(19),
FTM_RESP_STAT_INITIATOR_ACTIVE = BIT(20),
FTM_RESP_STAT_MEASUREMENTS_AVAILABLE = BIT(21),
FTM_RESP_STAT_TRIGGER_UNKNOWN = BIT(22),
FTM_RESP_STAT_PROCESS_FAIL = BIT(23),
FTM_RESP_STAT_ACK = BIT(24),
FTM_RESP_STAT_NACK = BIT(25),
FTM_RESP_STAT_INVALID_INITIATOR_ID = BIT(26),
FTM_RESP_STAT_TIMER_MIN_DELTA = BIT(27),
FTM_RESP_STAT_INITIATOR_REMOVED = BIT(28),
FTM_RESP_STAT_INITIATOR_ADDED = BIT(29),
FTM_RESP_STAT_ERR_LIST_FULL = BIT(30),
FTM_RESP_STAT_INITIATOR_SCHED_NOW = BIT(31),
}; /* RESP_IND_E */
/**
* struct iwl_ftm_responder_stats - FTM responder statistics
* @addr: initiator address
* @success_ftm: number of successful ftm frames
* @ftm_per_burst: num of FTM frames that were received
* @flags: &enum ftm_responder_stats_flags
* @duration: actual duration of FTM
* @allocated_duration: time that was allocated for this FTM session
* @bw: FTM request bandwidth
* @rate: FTM request rate
* @reserved: for alingment and future use
*/
struct iwl_ftm_responder_stats {
u8 addr[ETH_ALEN];
u8 success_ftm;
u8 ftm_per_burst;
__le32 flags;
__le32 duration;
__le32 allocated_duration;
u8 bw;
u8 rate;
__le16 reserved;
} __packed; /* TOF_RESPONDER_STATISTICS_NTFY_S_VER_2 */
#define IWL_CSI_CHUNK_CTL_NUM_MASK 0x3
#define IWL_CSI_CHUNK_CTL_IDX_MASK 0xc
struct iwl_csi_chunk_notification {
__le32 token;
__le16 seq;
__le16 ctl;
__le32 size;
u8 data[];
} __packed; /* CSI_CHUNKS_HDR_NTFY_API_S_VER_1 */
#endif /* __iwl_fw_api_location_h__ */

View File

@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -95,17 +97,36 @@
#define PHY_VHT_CTRL_POS_4_ABOVE (0x7)
/*
* struct iwl_fw_channel_info_v1 - channel information
*
* @band: PHY_BAND_*
* @channel: channel number
* @width: PHY_[VHT|LEGACY]_CHANNEL_*
* @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
*/
struct iwl_fw_channel_info {
struct iwl_fw_channel_info_v1 {
u8 band;
u8 channel;
u8 width;
u8 ctrl_pos;
} __packed;
} __packed; /* CHANNEL_CONFIG_API_S_VER_1 */
/*
* struct iwl_fw_channel_info - channel information
*
* @channel: channel number
* @band: PHY_BAND_*
* @width: PHY_[VHT|LEGACY]_CHANNEL_*
* @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
* @reserved: for future use and alignment
*/
struct iwl_fw_channel_info {
__le32 channel;
u8 band;
u8 width;
u8 ctrl_pos;
u8 reserved;
} __packed; /*CHANNEL_CONFIG_API_S_VER_2 */
#define PHY_RX_CHAIN_DRIVER_FORCE_POS (0)
#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \
@ -133,6 +154,22 @@ struct iwl_fw_channel_info {
#define NUM_PHY_CTX 3
/* TODO: complete missing documentation */
/**
* struct iwl_phy_context_cmd_tail - tail of iwl_phy_ctx_cmd for alignment with
* various channel structures.
*
* @txchain_info: ???
* @rxchain_info: ???
* @acquisition_data: ???
* @dsp_cfg_flags: set to 0
*/
struct iwl_phy_context_cmd_tail {
__le32 txchain_info;
__le32 rxchain_info;
__le32 acquisition_data;
__le32 dsp_cfg_flags;
} __packed;
/**
* struct iwl_phy_context_cmd - config of the PHY context
* ( PHY_CONTEXT_CMD = 0x8 )
@ -142,10 +179,7 @@ struct iwl_fw_channel_info {
* other value means apply new params after X usecs
* @tx_param_color: ???
* @ci: channel info
* @txchain_info: ???
* @rxchain_info: ???
* @acquisition_data: ???
* @dsp_cfg_flags: set to 0
* @tail: command tail
*/
struct iwl_phy_context_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
@ -155,10 +189,7 @@ struct iwl_phy_context_cmd {
__le32 apply_time;
__le32 tx_param_color;
struct iwl_fw_channel_info ci;
__le32 txchain_info;
__le32 rxchain_info;
__le32 acquisition_data;
__le32 dsp_cfg_flags;
struct iwl_phy_context_cmd_tail tail;
} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
#endif /* __iwl_fw_api_phy_ctxt_h__ */

View File

@ -209,8 +209,6 @@ enum iwl_rx_phy_flags {
* @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
* @RX_MPDU_RES_STATUS_STA_ID_MSK: station ID mask
* @RX_MDPU_RES_STATUS_STA_ID_SHIFT: station ID bit shift
* @RX_MPDU_RES_STATUS_FILTERING_MSK: filter status
* @RX_MPDU_RES_STATUS2_FILTERING_MSK: filter status 2
*/
enum iwl_mvm_rx_status {
RX_MPDU_RES_STATUS_CRC_OK = BIT(0),
@ -238,8 +236,6 @@ enum iwl_mvm_rx_status {
RX_MPDU_RES_STATUS_CSUM_OK = BIT(17),
RX_MDPU_RES_STATUS_STA_ID_SHIFT = 24,
RX_MPDU_RES_STATUS_STA_ID_MSK = 0x1f << RX_MDPU_RES_STATUS_STA_ID_SHIFT,
RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000),
RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000),
};
/* 9000 series API */
@ -337,6 +333,8 @@ enum iwl_rx_mpdu_phy_info {
IWL_RX_MPDU_PHY_AMPDU = BIT(5),
IWL_RX_MPDU_PHY_AMPDU_TOGGLE = BIT(6),
IWL_RX_MPDU_PHY_SHORT_PREAMBLE = BIT(7),
/* short preamble is only for CCK, for non-CCK overridden by this */
IWL_RX_MPDU_PHY_NCCK_ADDTL_NTFY = BIT(7),
IWL_RX_MPDU_PHY_TSF_OVERLOAD = BIT(8),
};
@ -723,6 +721,9 @@ struct iwl_rx_mpdu_desc {
#define RX_NO_DATA_FRAME_TIME_POS 0
#define RX_NO_DATA_FRAME_TIME_MSK (0xfffff << RX_NO_DATA_FRAME_TIME_POS)
#define RX_NO_DATA_RX_VEC0_HE_NSTS_MSK 0x03800000
#define RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK 0x38000000
/**
* struct iwl_rx_no_data - RX no data descriptor
* @info: 7:0 frame type, 15:8 RX error type
@ -743,7 +744,7 @@ struct iwl_rx_no_data {
__le32 fr_time;
__le32 rate;
__le32 phy_info[2];
__le32 rx_vec[3];
__le32 rx_vec[2];
} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1 */
/**

View File

@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -29,6 +30,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -363,14 +365,7 @@ struct mvm_statistics_general_v8 {
u8 reserved[4 - (NUM_MAC_INDEX % 4)];
} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
struct mvm_statistics_general_cdb_v9 {
struct mvm_statistics_general_common_v19 common;
__le32 beacon_counter[NUM_MAC_INDEX_CDB];
u8 beacon_average_energy[NUM_MAC_INDEX_CDB];
u8 reserved[4 - (NUM_MAC_INDEX_CDB % 4)];
} __packed; /* STATISTICS_GENERAL_API_S_VER_9 */
struct mvm_statistics_general_cdb {
struct mvm_statistics_general {
struct mvm_statistics_general_common common;
__le32 beacon_counter[MAC_INDEX_AUX];
u8 beacon_average_energy[MAC_INDEX_AUX];
@ -435,11 +430,11 @@ struct iwl_notif_statistics_v11 {
struct mvm_statistics_load_v1 load_stats;
} __packed; /* STATISTICS_NTFY_API_S_VER_11 */
struct iwl_notif_statistics_cdb {
struct iwl_notif_statistics {
__le32 flag;
struct mvm_statistics_rx rx;
struct mvm_statistics_tx tx;
struct mvm_statistics_general_cdb general;
struct mvm_statistics_general general;
struct mvm_statistics_load load_stats;
} __packed; /* STATISTICS_NTFY_API_S_VER_13 */

View File

@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -110,6 +112,17 @@ struct iwl_tdls_channel_switch_frame {
u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE];
} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */
/**
* struct iwl_tdls_channel_switch_cmd_tail - tail of iwl_tdls_channel_switch_cmd
*
* @timing: timing related data for command
* @frame: channel-switch request/response template, depending to switch_type
*/
struct iwl_tdls_channel_switch_cmd_tail {
struct iwl_tdls_channel_switch_timing timing;
struct iwl_tdls_channel_switch_frame frame;
} __packed;
/**
* struct iwl_tdls_channel_switch_cmd - TDLS channel switch command
*
@ -119,15 +132,13 @@ struct iwl_tdls_channel_switch_frame {
* @switch_type: see &enum iwl_tdls_channel_switch_type
* @peer_sta_id: station id of TDLS peer
* @ci: channel we switch to
* @timing: timing related data for command
* @frame: channel-switch request/response template, depending to switch_type
* @tail: command tail
*/
struct iwl_tdls_channel_switch_cmd {
u8 switch_type;
__le32 peer_sta_id;
struct iwl_fw_channel_info ci;
struct iwl_tdls_channel_switch_timing timing;
struct iwl_tdls_channel_switch_frame frame;
struct iwl_tdls_channel_switch_cmd_tail tail;
} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */
/**

View File

@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -317,6 +319,25 @@ struct iwl_time_event_notif {
__le32 status;
} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */
/*
* struct iwl_hs20_roc_req_tail - tail of iwl_hs20_roc_req
*
* @node_addr: Our MAC Address
* @reserved: reserved for alignment
* @apply_time: GP2 value to start (should always be the current GP2 value)
* @apply_time_max_delay: Maximum apply time delay value in TU. Defines max
* time by which start of the event is allowed to be postponed.
* @duration: event duration in TU To calculate event duration:
* timeEventDuration = min(duration, remainingQuota)
*/
struct iwl_hs20_roc_req_tail {
u8 node_addr[ETH_ALEN];
__le16 reserved;
__le32 apply_time;
__le32 apply_time_max_delay;
__le32 duration;
} __packed;
/*
* Aux ROC command
*
@ -336,13 +357,6 @@ struct iwl_time_event_notif {
* @sta_id_and_color: station id and color, resumed during "Remain On Channel"
* activity.
* @channel_info: channel info
* @node_addr: Our MAC Address
* @reserved: reserved for alignment
* @apply_time: GP2 value to start (should always be the current GP2 value)
* @apply_time_max_delay: Maximum apply time delay value in TU. Defines max
* time by which start of the event is allowed to be postponed.
* @duration: event duration in TU To calculate event duration:
* timeEventDuration = min(duration, remainingQuota)
*/
struct iwl_hs20_roc_req {
/* COMMON_INDEX_HDR_API_S_VER_1 hdr */
@ -351,11 +365,7 @@ struct iwl_hs20_roc_req {
__le32 event_unique_id;
__le32 sta_id_and_color;
struct iwl_fw_channel_info channel_info;
u8 node_addr[ETH_ALEN];
__le16 reserved;
__le32 apply_time;
__le32 apply_time_max_delay;
__le32 duration;
struct iwl_hs20_roc_req_tail tail;
} __packed; /* HOT_SPOT_CMD_API_S_VER_1 */
/*

View File

@ -1,393 +0,0 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_fw_api_tof_h__
#define __iwl_fw_api_tof_h__
/* ToF sub-group command IDs */
enum iwl_mvm_tof_sub_grp_ids {
TOF_RANGE_REQ_CMD = 0x1,
TOF_CONFIG_CMD = 0x2,
TOF_RANGE_ABORT_CMD = 0x3,
TOF_RANGE_REQ_EXT_CMD = 0x4,
TOF_RESPONDER_CONFIG_CMD = 0x5,
TOF_NW_INITIATED_RES_SEND_CMD = 0x6,
TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7,
TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC,
TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD,
TOF_RANGE_RESPONSE_NOTIF = 0xFE,
TOF_MCSI_DEBUG_NOTIF = 0xFB,
};
/**
* struct iwl_tof_config_cmd - ToF configuration
* @tof_disabled: 0 enabled, 1 - disabled
* @one_sided_disabled: 0 enabled, 1 - disabled
* @is_debug_mode: 1 debug mode, 0 - otherwise
* @is_buf_required: 1 channel estimation buffer required, 0 - otherwise
*/
struct iwl_tof_config_cmd {
__le32 sub_grp_cmd_id;
u8 tof_disabled;
u8 one_sided_disabled;
u8 is_debug_mode;
u8 is_buf_required;
} __packed;
/**
* struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
* @burst_period: future use: (currently hard coded in the LMAC)
* The interval between two sequential bursts.
* @min_delta_ftm: future use: (currently hard coded in the LMAC)
* The minimum delay between two sequential FTM Responses
* in the same burst.
* @burst_duration: future use: (currently hard coded in the LMAC)
* The total time for all FTMs handshake in the same burst.
* Affect the time events duration in the LMAC.
* @num_of_burst_exp: future use: (currently hard coded in the LMAC)
* The number of bursts for the current ToF request. Affect
* the number of events allocations in the current iteration.
* @get_ch_est: for xVT only, NA for driver
* @abort_responder: when set to '1' - Responder will terminate its activity
* (all other fields in the command are ignored)
* @recv_sta_req_params: 1 - Responder will ignore the other Responder's
* params and use the recomended Initiator params.
* 0 - otherwise
* @channel_num: current AP Channel
* @bandwidth: current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
* @rate: current AP rate
* @ctrl_ch_position: coding of the control channel position relative to
* the center frequency:
*
* 40 MHz
* 0 below center, 1 above center
*
* 80 MHz
* bits [0..1]
* * 0 the near 20MHz to the center,
* * 1 the far 20MHz to the center
* bit[2]
* as above 40MHz
* @ftm_per_burst: FTMs per Burst
* @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response,
* '1' - we measure over the Initial FTM Response
* @asap_mode: ASAP / Non ASAP mode for the current WLS station
* @sta_id: index of the AP STA when in AP mode
* @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF
* @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug
* purposes, simulating station movement by adding various values
* to this field
* @bssid: Current AP BSSID
*/
struct iwl_tof_responder_config_cmd {
__le32 sub_grp_cmd_id;
__le16 burst_period;
u8 min_delta_ftm;
u8 burst_duration;
u8 num_of_burst_exp;
u8 get_ch_est;
u8 abort_responder;
u8 recv_sta_req_params;
u8 channel_num;
u8 bandwidth;
u8 rate;
u8 ctrl_ch_position;
u8 ftm_per_burst;
u8 ftm_resp_ts_avail;
u8 asap_mode;
u8 sta_id;
__le16 tsf_timer_offset_msecs;
__le16 toa_offset;
u8 bssid[ETH_ALEN];
} __packed;
/**
* struct iwl_tof_range_request_ext_cmd - extended range req for WLS
* @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
* @reserved: reserved
* @min_delta_ftm: Minimal time between two consecutive measurements,
* in units of 100us. 0 means no preference by station
* @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended
* value be sent to the AP
* @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended
* value to be sent to the AP
* @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended
* value to be sent to the AP
*/
struct iwl_tof_range_req_ext_cmd {
__le32 sub_grp_cmd_id;
__le16 tsf_timer_offset_msec;
__le16 reserved;
u8 min_delta_ftm;
u8 ftm_format_and_bw20M;
u8 ftm_format_and_bw40M;
u8 ftm_format_and_bw80M;
} __packed;
#define IWL_MVM_TOF_MAX_APS 21
/**
* struct iwl_tof_range_req_ap_entry - AP configuration parameters
* @channel_num: Current AP Channel
* @bandwidth: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
* @tsf_delta_direction: TSF relatively to the subject AP
* @ctrl_ch_position: Coding of the control channel position relative to the
* center frequency.
* 40MHz 0 below center, 1 above center
* 80MHz bits [0..1]: 0 the near 20MHz to the center,
* 1 the far 20MHz to the center
* bit[2] as above 40MHz
* @bssid: AP's bss id
* @measure_type: Measurement type: 0 - two sided, 1 - One sided
* @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the
* number of measurement iterations (min 2^0 = 1, max 2^14)
* @burst_period: Recommended value to be sent to the AP. Measurement
* periodicity In units of 100ms. ignored if num_of_bursts = 0
* @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31)
* 1-sided: how many rts/cts pairs should be used per burst.
* @retries_per_sample: Max number of retries that the LMAC should send
* in case of no replies by the AP.
* @tsf_delta: TSF Delta in units of microseconds.
* The difference between the AP TSF and the device local clock.
* @location_req: Location Request Bit[0] LCI should be sent in the FTMR
* Bit[1] Civic should be sent in the FTMR
* @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided)
* @enable_dyn_ack: Enable Dynamic ACK BW.
* 0 Initiator interact with regular AP
* 1 Initiator interact with Responder machine: need to send the
* Initiator Acks with HT 40MHz / 80MHz, since the Responder should
* use it for its ch est measurement (this flag will be set when we
* configure the opposite machine to be Responder).
* @rssi: Last received value
* leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
*/
struct iwl_tof_range_req_ap_entry {
u8 channel_num;
u8 bandwidth;
u8 tsf_delta_direction;
u8 ctrl_ch_position;
u8 bssid[ETH_ALEN];
u8 measure_type;
u8 num_of_bursts;
__le16 burst_period;
u8 samples_per_burst;
u8 retries_per_sample;
__le32 tsf_delta;
u8 location_req;
u8 asap_mode;
u8 enable_dyn_ack;
s8 rssi;
} __packed;
/**
* enum iwl_tof_response_mode
* @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as
* possible (not supported for this release)
* @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon
* timeout expiration
* @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the
* earlier of: measurements completion / timeout
* expiration.
*/
enum iwl_tof_response_mode {
IWL_MVM_TOF_RESPOSE_ASAP = 1,
IWL_MVM_TOF_RESPOSE_TIMEOUT,
IWL_MVM_TOF_RESPOSE_COMPLETE,
};
/**
* struct iwl_tof_range_req_cmd - start measurement cmd
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
* @initiator: 0- NW initiated, 1 - Client Initiated
* @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided,
* '1' - run ML-Algo for ToF only
* @req_timeout: Requested timeout of the response in units of 100ms.
* This is equivalent to the session time configured to the
* LMAC in Initiator Request
* @report_policy: Supported partially for this release: For current release -
* the range report will be uploaded as a batch when ready or
* when the session is done (successfully / partially).
* one of iwl_tof_response_mode.
* @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
* @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
* '1' Use MAC Address randomization according to the below
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
* @ap: per-AP request data
*/
struct iwl_tof_range_req_cmd {
__le32 sub_grp_cmd_id;
u8 request_id;
u8 initiator;
u8 one_sided_los_disable;
u8 req_timeout;
u8 report_policy;
u8 los_det_disable;
u8 num_of_ap;
u8 macaddr_random;
u8 macaddr_template[ETH_ALEN];
u8 macaddr_mask[ETH_ALEN];
struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
} __packed;
/**
* struct iwl_tof_gen_resp_cmd - generic ToF response
*/
struct iwl_tof_gen_resp_cmd {
__le32 sub_grp_cmd_id;
u8 data[];
} __packed;
/**
* struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
* @bssid: BSSID of the AP
* @measure_status: current APs measurement status, one of
* &enum iwl_tof_entry_status.
* @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
* @rtt: The Round Trip Time that took for the last measurement for
* current AP [nSec]
* @rtt_variance: The Variance of the RTT values measured for current AP
* @rtt_spread: The Difference between the maximum and the minimum RTT
* values measured for current AP in the current session [nsec]
* @rssi: RSSI as uploaded in the Channel Estimation notification
* @rssi_spread: The Difference between the maximum and the minimum RSSI values
* measured for current AP in the current session
* @reserved: reserved
* @range: Measured range [cm]
* @range_variance: Measured range variance [cm]
* @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
* uploaded by the LMAC
*/
struct iwl_tof_range_rsp_ap_entry_ntfy {
u8 bssid[ETH_ALEN];
u8 measure_status;
u8 measure_bw;
__le32 rtt;
__le32 rtt_variance;
__le32 rtt_spread;
s8 rssi;
u8 rssi_spread;
__le16 reserved;
__le32 range;
__le32 range_variance;
__le32 timestamp;
} __packed;
/**
* struct iwl_tof_range_rsp_ntfy -
* @request_id: A Token ID of the corresponding Range request
* @request_status: status of current measurement session
* @last_in_batch: reprot policy (when not all responses are uploaded at once)
* @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
* @ap: per-AP data
*/
struct iwl_tof_range_rsp_ntfy {
u8 request_id;
u8 request_status;
u8 last_in_batch;
u8 num_of_aps;
struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
} __packed;
#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
/**
* struct iwl_tof_mcsi_notif - used for debug
* @token: token ID for the current session
* @role: '0' - initiator, '1' - responder
* @reserved: reserved
* @initiator_bssid: initiator machine
* @responder_bssid: responder machine
* @mcsi_buffer: debug data
*/
struct iwl_tof_mcsi_notif {
u8 token;
u8 role;
__le16 reserved;
u8 initiator_bssid[ETH_ALEN];
u8 responder_bssid[ETH_ALEN];
u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
} __packed;
/**
* struct iwl_tof_neighbor_report_notif
* @bssid: BSSID of the AP which sent the report
* @request_token: same token as the corresponding request
* @status:
* @report_ie_len: the length of the response frame starting from the Element ID
* @data: the IEs
*/
struct iwl_tof_neighbor_report {
u8 bssid[ETH_ALEN];
u8 request_token;
u8 status;
__le16 report_ie_len;
u8 data[];
} __packed;
/**
* struct iwl_tof_range_abort_cmd
* @request_id: corresponds to a range request
* @reserved: reserved
*/
struct iwl_tof_range_abort_cmd {
__le32 sub_grp_cmd_id;
u8 request_id;
u8 reserved[3];
} __packed;
#endif

View File

@ -469,6 +469,93 @@ static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
{ .start = 0x00a02400, .end = 0x00a02758 },
};
static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = {
{ .start = 0x00a00000, .end = 0x00a00000 },
{ .start = 0x00a0000c, .end = 0x00a00024 },
{ .start = 0x00a0002c, .end = 0x00a00034 },
{ .start = 0x00a0003c, .end = 0x00a0003c },
{ .start = 0x00a00410, .end = 0x00a00418 },
{ .start = 0x00a00420, .end = 0x00a00420 },
{ .start = 0x00a00428, .end = 0x00a00428 },
{ .start = 0x00a00430, .end = 0x00a0043c },
{ .start = 0x00a00444, .end = 0x00a00444 },
{ .start = 0x00a00840, .end = 0x00a00840 },
{ .start = 0x00a00850, .end = 0x00a00858 },
{ .start = 0x00a01004, .end = 0x00a01008 },
{ .start = 0x00a01010, .end = 0x00a01010 },
{ .start = 0x00a01018, .end = 0x00a01018 },
{ .start = 0x00a01024, .end = 0x00a01024 },
{ .start = 0x00a0102c, .end = 0x00a01034 },
{ .start = 0x00a0103c, .end = 0x00a01040 },
{ .start = 0x00a01048, .end = 0x00a01050 },
{ .start = 0x00a01058, .end = 0x00a01058 },
{ .start = 0x00a01060, .end = 0x00a01070 },
{ .start = 0x00a0108c, .end = 0x00a0108c },
{ .start = 0x00a01c20, .end = 0x00a01c28 },
{ .start = 0x00a01d10, .end = 0x00a01d10 },
{ .start = 0x00a01e28, .end = 0x00a01e2c },
{ .start = 0x00a01e60, .end = 0x00a01e60 },
{ .start = 0x00a01e80, .end = 0x00a01e80 },
{ .start = 0x00a01ea0, .end = 0x00a01ea0 },
{ .start = 0x00a02000, .end = 0x00a0201c },
{ .start = 0x00a02024, .end = 0x00a02024 },
{ .start = 0x00a02040, .end = 0x00a02048 },
{ .start = 0x00a020c0, .end = 0x00a020e0 },
{ .start = 0x00a02400, .end = 0x00a02404 },
{ .start = 0x00a0240c, .end = 0x00a02414 },
{ .start = 0x00a0241c, .end = 0x00a0243c },
{ .start = 0x00a02448, .end = 0x00a024bc },
{ .start = 0x00a024c4, .end = 0x00a024cc },
{ .start = 0x00a02508, .end = 0x00a02508 },
{ .start = 0x00a02510, .end = 0x00a02514 },
{ .start = 0x00a0251c, .end = 0x00a0251c },
{ .start = 0x00a0252c, .end = 0x00a0255c },
{ .start = 0x00a02564, .end = 0x00a025a0 },
{ .start = 0x00a025a8, .end = 0x00a025b4 },
{ .start = 0x00a025c0, .end = 0x00a025c0 },
{ .start = 0x00a025e8, .end = 0x00a025f4 },
{ .start = 0x00a02c08, .end = 0x00a02c18 },
{ .start = 0x00a02c2c, .end = 0x00a02c38 },
{ .start = 0x00a02c68, .end = 0x00a02c78 },
{ .start = 0x00a03000, .end = 0x00a03000 },
{ .start = 0x00a03010, .end = 0x00a03014 },
{ .start = 0x00a0301c, .end = 0x00a0302c },
{ .start = 0x00a03034, .end = 0x00a03038 },
{ .start = 0x00a03040, .end = 0x00a03044 },
{ .start = 0x00a03060, .end = 0x00a03068 },
{ .start = 0x00a03070, .end = 0x00a03070 },
{ .start = 0x00a0307c, .end = 0x00a03084 },
{ .start = 0x00a0308c, .end = 0x00a03090 },
{ .start = 0x00a03098, .end = 0x00a03098 },
{ .start = 0x00a030a0, .end = 0x00a030a0 },
{ .start = 0x00a030a8, .end = 0x00a030b4 },
{ .start = 0x00a030bc, .end = 0x00a030c0 },
{ .start = 0x00a030c8, .end = 0x00a030f4 },
{ .start = 0x00a03100, .end = 0x00a0312c },
{ .start = 0x00a03c00, .end = 0x00a03c5c },
{ .start = 0x00a04400, .end = 0x00a04454 },
{ .start = 0x00a04460, .end = 0x00a04474 },
{ .start = 0x00a044c0, .end = 0x00a044ec },
{ .start = 0x00a04500, .end = 0x00a04504 },
{ .start = 0x00a04510, .end = 0x00a04538 },
{ .start = 0x00a04540, .end = 0x00a04548 },
{ .start = 0x00a04560, .end = 0x00a04560 },
{ .start = 0x00a04570, .end = 0x00a0457c },
{ .start = 0x00a04590, .end = 0x00a04590 },
{ .start = 0x00a04598, .end = 0x00a04598 },
{ .start = 0x00a045c0, .end = 0x00a045f4 },
{ .start = 0x00a0c000, .end = 0x00a0c018 },
{ .start = 0x00a0c020, .end = 0x00a0c028 },
{ .start = 0x00a0c038, .end = 0x00a0c094 },
{ .start = 0x00a0c0c0, .end = 0x00a0c104 },
{ .start = 0x00a0c10c, .end = 0x00a0c118 },
{ .start = 0x00a0c150, .end = 0x00a0c174 },
{ .start = 0x00a0c17c, .end = 0x00a0c188 },
{ .start = 0x00a0c190, .end = 0x00a0c198 },
{ .start = 0x00a0c1a0, .end = 0x00a0c1a8 },
{ .start = 0x00a0c1b0, .end = 0x00a0c1b8 },
};
static void iwl_read_prph_block(struct iwl_trans *trans, u32 start,
u32 len_bytes, __le32 *data)
{
@ -478,15 +565,20 @@ static void iwl_read_prph_block(struct iwl_trans *trans, u32 start,
*data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i));
}
static void iwl_dump_prph(struct iwl_trans *trans,
struct iwl_fw_error_dump_data **data,
static void iwl_dump_prph(struct iwl_fw_runtime *fwrt,
const struct iwl_prph_range *iwl_prph_dump_addr,
u32 range_len)
u32 range_len, void *ptr)
{
struct iwl_fw_error_dump_prph *prph;
struct iwl_trans *trans = fwrt->trans;
struct iwl_fw_error_dump_data **data =
(struct iwl_fw_error_dump_data **)ptr;
unsigned long flags;
u32 i;
if (!data)
return;
IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
if (!iwl_trans_grab_nic_access(trans, &flags))
@ -552,37 +644,47 @@ static struct scatterlist *alloc_sgtable(int size)
return table;
}
static int iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt)
static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt,
const struct iwl_prph_range *iwl_prph_dump_addr,
u32 range_len, void *ptr)
{
u32 prph_len = 0;
int i;
u32 *prph_len = (u32 *)ptr;
int i, num_bytes_in_chunk;
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
i++) {
if (!prph_len)
return;
for (i = 0; i < range_len; i++) {
/* The range includes both boundaries */
int num_bytes_in_chunk =
iwl_prph_dump_addr_comm[i].end -
iwl_prph_dump_addr_comm[i].start + 4;
num_bytes_in_chunk =
iwl_prph_dump_addr[i].end -
iwl_prph_dump_addr[i].start + 4;
prph_len += sizeof(struct iwl_fw_error_dump_data) +
*prph_len += sizeof(struct iwl_fw_error_dump_data) +
sizeof(struct iwl_fw_error_dump_prph) +
num_bytes_in_chunk;
}
}
static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr,
void (*handler)(struct iwl_fw_runtime *,
const struct iwl_prph_range *,
u32, void *))
{
u32 range_len;
if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000);
handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr);
} else {
range_len = ARRAY_SIZE(iwl_prph_dump_addr_comm);
handler(fwrt, iwl_prph_dump_addr_comm, range_len, ptr);
if (fwrt->trans->cfg->mq_rx_supported) {
for (i = 0; i <
ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
/* The range includes both boundaries */
int num_bytes_in_chunk =
iwl_prph_dump_addr_9000[i].end -
iwl_prph_dump_addr_9000[i].start + 4;
prph_len += sizeof(struct iwl_fw_error_dump_data) +
sizeof(struct iwl_fw_error_dump_prph) +
num_bytes_in_chunk;
range_len = ARRAY_SIZE(iwl_prph_dump_addr_9000);
handler(fwrt, iwl_prph_dump_addr_9000, range_len, ptr);
}
}
return prph_len;
}
static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt,
@ -646,6 +748,9 @@ static int iwl_fw_rxf_len(struct iwl_fw_runtime *fwrt,
ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len);
/* Count RXF1 sizes */
if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC))
mem_cfg->num_lmacs = MAX_NUM_LMAC;
for (i = 0; i < mem_cfg->num_lmacs; i++)
ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
@ -664,6 +769,9 @@ static int iwl_fw_txf_len(struct iwl_fw_runtime *fwrt,
goto dump_internal_txf;
/* Count TXF sizes */
if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC))
mem_cfg->num_lmacs = MAX_NUM_LMAC;
for (i = 0; i < mem_cfg->num_lmacs; i++) {
int j;
@ -733,6 +841,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
const struct fw_img *img;
if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX)
return NULL;
img = &fwrt->fw->img[fwrt->cur_fw_img];
sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
@ -747,9 +857,9 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
fifo_len += iwl_fw_txf_len(fwrt, mem_cfg);
/* Make room for PRPH registers */
if (!fwrt->trans->cfg->gen2 &&
iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH))
prph_len += iwl_fw_get_prph_len(fwrt);
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH))
iwl_fw_prph_handler(fwrt, &prph_len,
iwl_fw_get_prph_len);
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG))
@ -828,7 +938,13 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
sizeof(dump_info->dev_human_readable) - 1);
strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
sizeof(dump_info->bus_human_readable) - 1);
dump_info->rt_status = cpu_to_le32(fwrt->dump.rt_status);
dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs;
dump_info->lmac_err_id[0] =
cpu_to_le32(fwrt->dump.lmac_err_id[0]);
if (fwrt->smem_cfg.num_lmacs > 1)
dump_info->lmac_err_id[1] =
cpu_to_le32(fwrt->dump.lmac_err_id[1]);
dump_info->umac_err_id = cpu_to_le32(fwrt->dump.umac_err_id);
dump_data = iwl_fw_error_next_data(dump_data);
}
@ -935,16 +1051,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
if (iwl_fw_dbg_is_paging_enabled(fwrt))
iwl_dump_paging(fwrt, &dump_data);
if (prph_len) {
iwl_dump_prph(fwrt->trans, &dump_data,
iwl_prph_dump_addr_comm,
ARRAY_SIZE(iwl_prph_dump_addr_comm));
if (fwrt->trans->cfg->mq_rx_supported)
iwl_dump_prph(fwrt->trans, &dump_data,
iwl_prph_dump_addr_9000,
ARRAY_SIZE(iwl_prph_dump_addr_9000));
}
if (prph_len)
iwl_fw_prph_handler(fwrt, &dump_data, iwl_dump_prph);
out:
dump_file->file_len = cpu_to_le32(file_len);
@ -1108,13 +1216,13 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
iwl_dump_prph_ini(fwrt->trans, data, reg);
break;
case IWL_FW_INI_REGION_DRAM_BUFFER:
*dump_mask |= IWL_FW_ERROR_DUMP_FW_MONITOR;
*dump_mask |= BIT(IWL_FW_ERROR_DUMP_FW_MONITOR);
break;
case IWL_FW_INI_REGION_PAGING:
if (iwl_fw_dbg_is_paging_enabled(fwrt))
iwl_dump_paging(fwrt, data);
else
*dump_mask |= IWL_FW_ERROR_DUMP_PAGING;
*dump_mask |= BIT(IWL_FW_ERROR_DUMP_PAGING);
break;
case IWL_FW_INI_REGION_TXF:
iwl_fw_dump_txf(fwrt, data);
@ -1146,10 +1254,6 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
if (id == FW_DBG_TRIGGER_FW_ASSERT)
id = IWL_FW_TRIGGER_ID_FW_ASSERT;
else if (id == FW_DBG_TRIGGER_USER)
id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
else if (id < FW_DBG_TRIGGER_MAX)
return NULL;
if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs)))
return NULL;
@ -1385,7 +1489,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
if (WARN_ON(!fwrt->dump.active_trigs[id].active))
return -EINVAL;
delay = le32_to_cpu(fwrt->dump.active_trigs[id].conf->ignore_consec);
delay = le32_to_cpu(fwrt->dump.active_trigs[id].conf->dump_delay);
occur = le32_to_cpu(fwrt->dump.active_trigs[id].conf->occurrences);
if (!occur)
return 0;
@ -1570,27 +1674,29 @@ iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_allocation_tlv *alloc)
{
struct iwl_trans *trans = fwrt->trans;
struct iwl_continuous_record_cmd cont_rec = {};
struct iwl_buffer_allocation_cmd *cmd = (void *)&cont_rec.pad[0];
struct iwl_ldbg_config_cmd ldbg_cmd = {
.type = cpu_to_le32(BUFFER_ALLOCATION),
};
struct iwl_buffer_allocation_cmd *cmd = &ldbg_cmd.buffer_allocation;
struct iwl_host_cmd hcmd = {
.id = LDBG_CONFIG_CMD,
.flags = CMD_ASYNC,
.data[0] = &cont_rec,
.len[0] = sizeof(cont_rec),
.data[0] = &ldbg_cmd,
.len[0] = sizeof(ldbg_cmd),
};
void *virtual_addr = NULL;
u32 size = le32_to_cpu(alloc->size);
dma_addr_t phys_addr;
cont_rec.record_mode.enable_recording = cpu_to_le16(BUFFER_ALLOCATION);
if (!trans->num_blocks &&
le32_to_cpu(alloc->buffer_location) !=
IWL_FW_INI_LOCATION_DRAM_PATH)
return;
virtual_addr = dma_alloc_coherent(fwrt->trans->dev, size,
&phys_addr, GFP_KERNEL);
virtual_addr =
dma_alloc_coherent(fwrt->trans->dev, size, &phys_addr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO |
__GFP_COMP);
/* TODO: alloc fragments if needed */
if (!virtual_addr)

View File

@ -102,7 +102,10 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
if (fwrt->dump.desc != &iwl_dump_desc_assert)
kfree(fwrt->dump.desc);
fwrt->dump.desc = NULL;
fwrt->dump.rt_status = 0;
fwrt->dump.lmac_err_id[0] = 0;
if (fwrt->smem_cfg.num_lmacs > 1)
fwrt->dump.lmac_err_id[1] = 0;
fwrt->dump.umac_err_id = 0;
}
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
@ -266,20 +269,20 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
iwl_fw_dbg_get_trigger((fwrt)->fw,\
(trig)))
static int iwl_fw_dbg_start_stop_hcmd(struct iwl_fw_runtime *fwrt, bool start)
static inline int
iwl_fw_dbg_start_stop_hcmd(struct iwl_fw_runtime *fwrt, bool start)
{
struct iwl_continuous_record_cmd cont_rec = {};
struct iwl_ldbg_config_cmd cmd = {
.type = start ? cpu_to_le32(START_DEBUG_RECORDING) :
cpu_to_le32(STOP_DEBUG_RECORDING),
};
struct iwl_host_cmd hcmd = {
.id = LDBG_CONFIG_CMD,
.flags = CMD_ASYNC,
.data[0] = &cont_rec,
.len[0] = sizeof(cont_rec),
.data[0] = &cmd,
.len[0] = sizeof(cmd),
};
cont_rec.record_mode.enable_recording = start ?
cpu_to_le16(START_DEBUG_RECORDING) :
cpu_to_le16(STOP_DEBUG_RECORDING);
return iwl_trans_send_cmd(fwrt->trans, &hcmd);
}
@ -378,6 +381,7 @@ static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt)
{
return iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PAGING) &&
!fwrt->trans->cfg->gen2 &&
fwrt->cur_fw_img < IWL_UCODE_TYPE_MAX &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block;
}

View File

@ -180,6 +180,8 @@ enum iwl_fw_error_dump_family {
IWL_FW_ERROR_DUMP_FAMILY_8 = 8,
};
#define MAX_NUM_LMAC 2
/**
* struct iwl_fw_error_dump_info - info on the device / firmware
* @device_family: the family of the device (7 / 8)
@ -187,7 +189,10 @@ enum iwl_fw_error_dump_family {
* @fw_human_readable: human readable FW version
* @dev_human_readable: name of the device
* @bus_human_readable: name of the bus used
* @rt_status: the error_id/rt_status that that triggered the latest dump
* @num_of_lmacs: the number of lmacs
* @lmac_err_id: the lmac 0/1 error_id/rt_status that triggered the latest dump
* if the dump collection was not initiated by an assert, the value is 0
* @umac_err_id: the umac error_id/rt_status that triggered the latest dump
* if the dump collection was not initiated by an assert, the value is 0
*/
struct iwl_fw_error_dump_info {
@ -196,7 +201,9 @@ struct iwl_fw_error_dump_info {
u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ];
u8 dev_human_readable[64];
u8 bus_human_readable[8];
__le32 rt_status;
u8 num_of_lmacs;
__le32 umac_err_id;
__le32 lmac_err_id[MAX_NUM_LMAC];
} __packed;
/**

View File

@ -303,7 +303,6 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
* @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
* @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
* @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM)
* @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
* @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
* tx power value into TPC Report action frame and Link Measurement Report
@ -334,6 +333,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
* @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
* @IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2: firmware implements Coex Schema 2
* @IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS: firmware supports ultra high band
* (6 GHz).
* @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
* @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
* @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
@ -357,10 +358,13 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger
* command size (command version 4) that supports toggling ACK TX
* power reduction.
* @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
* @IWL_UCODE_TLV_CAPA_D3_DEBUG: supports debug recording during D3
* @IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT: MCC response support 11ax
* capability.
* @IWL_UCODE_TLV_CAPA_CSI_REPORTING: firmware is capable of being configured
* to report the CSI information with (certain) RX frames
*
* @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@ -369,7 +373,6 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1,
IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2,
IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3,
IWL_UCODE_TLV_CAPA_TOF_SUPPORT = (__force iwl_ucode_tlv_capa_t)5,
IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6,
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8,
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9,
@ -394,6 +397,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_TLC_OFFLOAD = (__force iwl_ucode_tlv_capa_t)43,
IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = (__force iwl_ucode_tlv_capa_t)44,
IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2 = (__force iwl_ucode_tlv_capa_t)45,
IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS = (__force iwl_ucode_tlv_capa_t)48,
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,
@ -412,6 +416,8 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87,
IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88,
IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = (__force iwl_ucode_tlv_capa_t)89,
IWL_UCODE_TLV_CAPA_CSI_REPORTING = (__force iwl_ucode_tlv_capa_t)90,
IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
NUM_IWL_UCODE_TLV_CAPA

View File

@ -142,7 +142,8 @@ struct iwl_fw_runtime {
u32 *d3_debug_data;
struct iwl_fw_ini_active_regs active_regs[IWL_FW_INI_MAX_REGION_ID];
struct iwl_fw_ini_active_triggers active_trigs[IWL_FW_TRIGGER_ID_NUM];
u32 rt_status;
u32 lmac_err_id[MAX_NUM_LMAC];
u32 umac_err_id;
} dump;
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct {

View File

@ -335,10 +335,6 @@ struct iwl_csr_params {
* @fw_name_pre: Firmware filename prefix. The api version and extension
* (.ucode) will be added to filename before loading from disk. The
* filename is constructed as fw_name_pre<api>.ucode.
* @fw_name_pre_b_or_c_step: same as @fw_name_pre, only for b or c steps
* (if supported)
* @fw_name_pre_rf_next_step: same as @fw_name_pre_b_or_c_step, only for rf
* next step. Supported only in integrated solutions.
* @ucode_api_max: Highest version of uCode API supported by driver.
* @ucode_api_min: Lowest version of uCode API supported by driver.
* @max_inst_size: The maximal length of the fw inst section (only DVM)
@ -392,8 +388,6 @@ struct iwl_cfg {
/* params specific to an individual device within a device family */
const char *name;
const char *fw_name_pre;
const char *fw_name_pre_b_or_c_step;
const char *fw_name_pre_rf_next_step;
/* params not likely to change within a device family */
const struct iwl_base_params *base_params;
/* params likely to change within a device family */
@ -551,29 +545,40 @@ extern const struct iwl_cfg iwl8275_2ac_cfg;
extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl9160_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_160_cfg;
extern const struct iwl_cfg iwl9260_killer_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_160_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_2ac_160_cfg_soc;
extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc;
extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
extern const struct iwl_cfg iwl22560_2ax_cfg_hr;
extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
extern const struct iwl_cfg iwl22260_2ax_cfg;
extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
extern const struct iwl_cfg killer1650x_2ax_cfg;
extern const struct iwl_cfg killer1650w_2ax_cfg;
extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_jf;

View File

@ -325,6 +325,7 @@ enum {
#define CSR_HW_REV_TYPE_7265D (0x0000210)
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
#define CSR_HW_REV_TYPE_QNJ (0x0000360)
#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364)
#define CSR_HW_REV_TYPE_HR_CDB (0x0000340)
/* RF_ID value */

View File

@ -210,18 +210,15 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
{
const struct iwl_cfg *cfg = drv->trans->cfg;
char tag[8];
const char *fw_pre_name;
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
(CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP ||
CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_C_STEP))
fw_pre_name = cfg->fw_name_pre_b_or_c_step;
else if (drv->trans->cfg->integrated &&
CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP &&
cfg->fw_name_pre_rf_next_step)
fw_pre_name = cfg->fw_name_pre_rf_next_step;
else
fw_pre_name = cfg->fw_name_pre;
(CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_B_STEP &&
CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_C_STEP)) {
IWL_ERR(drv,
"Only HW steps B and C are currently supported (0x%0x)\n",
drv->trans->hw_rev);
return -EINVAL;
}
if (first) {
drv->fw_index = cfg->ucode_api_max;
@ -235,15 +232,13 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
IWL_ERR(drv, "no suitable firmware found!\n");
if (cfg->ucode_api_min == cfg->ucode_api_max) {
IWL_ERR(drv, "%s%d is required\n", fw_pre_name,
IWL_ERR(drv, "%s%d is required\n", cfg->fw_name_pre,
cfg->ucode_api_max);
} else {
IWL_ERR(drv, "minimum version required: %s%d\n",
fw_pre_name,
cfg->ucode_api_min);
cfg->fw_name_pre, cfg->ucode_api_min);
IWL_ERR(drv, "maximum version supported: %s%d\n",
fw_pre_name,
cfg->ucode_api_max);
cfg->fw_name_pre, cfg->ucode_api_max);
}
IWL_ERR(drv,
@ -252,7 +247,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
}
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
fw_pre_name, tag);
cfg->fw_name_pre, tag);
IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n",
drv->firmware_name);

View File

@ -1,10 +1,13 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
*
* Portions of this file are derived from the ipw3945 project.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@ -15,12 +18,44 @@
* more details.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
* file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include <linux/delay.h>
#include <linux/device.h>

View File

@ -1,8 +1,9 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* Portions of this file are derived from the ipw3945 project.
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@ -14,14 +15,43 @@
* more details.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
* file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_io_h__
#define __iwl_io_h__

View File

@ -152,4 +152,22 @@ struct iwl_mod_params {
bool enable_ini;
};
static inline bool iwl_enable_rx_ampdu(void)
{
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
return false;
return true;
}
static inline bool iwl_enable_tx_ampdu(void)
{
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
return false;
if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
return true;
/* enabled by default */
return true;
}
#endif /* #__iwl_modparams_h__ */

View File

@ -569,8 +569,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
.has_he = true,
.he_cap_elem = {
.mac_cap_info[0] =
IEEE80211_HE_MAC_CAP0_HTC_HE |
IEEE80211_HE_MAC_CAP0_TWT_RES,
IEEE80211_HE_MAC_CAP0_HTC_HE,
.mac_cap_info[1] =
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
@ -1196,14 +1195,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
regd_to_copy = sizeof(struct ieee80211_regdomain) +
valid_rules * sizeof(struct ieee80211_reg_rule);
copy_rd = kzalloc(regd_to_copy, GFP_KERNEL);
copy_rd = kmemdup(regd, regd_to_copy, GFP_KERNEL);
if (!copy_rd) {
copy_rd = ERR_PTR(-ENOMEM);
goto out;
}
memcpy(copy_rd, regd, regd_to_copy);
out:
kfree(regdb_ptrs);
kfree(regd);

View File

@ -358,12 +358,12 @@
/* FW monitor */
#define MON_BUFF_SAMPLE_CTL (0xa03c00)
#define MON_BUFF_BASE_ADDR (0xa03c3c)
#define MON_BUFF_BASE_ADDR (0xa03c1c)
#define MON_BUFF_END_ADDR (0xa03c40)
#define MON_BUFF_WRPTR (0xa03c44)
#define MON_BUFF_CYCLE_CNT (0xa03c48)
/* FW monitor family 8000 and on */
#define MON_BUFF_BASE_ADDR_VER2 (0xa03c3c)
#define MON_BUFF_BASE_ADDR_VER2 (0xa03c1c)
#define MON_BUFF_END_ADDR_VER2 (0xa03c20)
#define MON_BUFF_WRPTR_VER2 (0xa03c24)
#define MON_BUFF_CYCLE_CNT_VER2 (0xa03c28)

View File

@ -7,7 +7,6 @@ iwlmvm-y += power.o coex.o
iwlmvm-y += tt.o offloading.o tdls.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwlmvm-y += tof.o
iwlmvm-$(CONFIG_PM) += d3.o
ccflags-y += -I$(src)/../
ccflags-y += -I $(srctree)/$(src)/../

View File

@ -2125,7 +2125,6 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
file->private_data = inode->i_private;
ieee80211_stop_queues(mvm->hw);
synchronize_net();
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
@ -2140,10 +2139,9 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
rtnl_unlock();
if (err > 0)
err = -EINVAL;
if (err) {
ieee80211_wake_queues(mvm->hw);
if (err)
return err;
}
mvm->d3_test_active = true;
mvm->keep_vif = NULL;
return 0;
@ -2223,8 +2221,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
ieee80211_wake_queues(mvm->hw);
return 0;
}

View File

@ -60,7 +60,6 @@
*
*****************************************************************************/
#include "mvm.h"
#include "fw/api/tof.h"
#include "debugfs.h"
static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
@ -523,751 +522,6 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
char *buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
u32 value;
int ret = -EINVAL;
char *data;
mutex_lock(&mvm->mutex);
data = iwl_dbgfs_is_match("tof_disabled=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.tof_cfg.tof_disabled = value;
goto out;
}
data = iwl_dbgfs_is_match("one_sided_disabled=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.tof_cfg.one_sided_disabled = value;
goto out;
}
data = iwl_dbgfs_is_match("is_debug_mode=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.tof_cfg.is_debug_mode = value;
goto out;
}
data = iwl_dbgfs_is_match("is_buf=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.tof_cfg.is_buf_required = value;
goto out;
}
data = iwl_dbgfs_is_match("send_tof_cfg=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0 && value) {
ret = iwl_mvm_tof_config_cmd(mvm);
goto out;
}
}
out:
mutex_unlock(&mvm->mutex);
return ret ?: count;
}
static ssize_t iwl_dbgfs_tof_enable_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
char buf[256];
int pos = 0;
const size_t bufsz = sizeof(buf);
struct iwl_tof_config_cmd *cmd;
cmd = &mvm->tof_data.tof_cfg;
mutex_lock(&mvm->mutex);
pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n",
cmd->tof_disabled);
pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n",
cmd->one_sided_disabled);
pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n",
cmd->is_debug_mode);
pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n",
cmd->is_buf_required);
mutex_unlock(&mvm->mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
char *buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
u32 value;
int ret = 0;
char *data;
mutex_lock(&mvm->mutex);
data = iwl_dbgfs_is_match("burst_period=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (!ret)
mvm->tof_data.responder_cfg.burst_period =
cpu_to_le16(value);
goto out;
}
data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.min_delta_ftm = value;
goto out;
}
data = iwl_dbgfs_is_match("burst_duration=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.burst_duration = value;
goto out;
}
data = iwl_dbgfs_is_match("num_of_burst_exp=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.num_of_burst_exp = value;
goto out;
}
data = iwl_dbgfs_is_match("abort_responder=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.abort_responder = value;
goto out;
}
data = iwl_dbgfs_is_match("get_ch_est=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.get_ch_est = value;
goto out;
}
data = iwl_dbgfs_is_match("recv_sta_req_params=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.recv_sta_req_params = value;
goto out;
}
data = iwl_dbgfs_is_match("channel_num=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.channel_num = value;
goto out;
}
data = iwl_dbgfs_is_match("bandwidth=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.bandwidth = value;
goto out;
}
data = iwl_dbgfs_is_match("rate=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.rate = value;
goto out;
}
data = iwl_dbgfs_is_match("bssid=", buf);
if (data) {
u8 *mac = mvm->tof_data.responder_cfg.bssid;
if (!mac_pton(data, mac)) {
ret = -EINVAL;
goto out;
}
}
data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.tsf_timer_offset_msecs =
cpu_to_le16(value);
goto out;
}
data = iwl_dbgfs_is_match("toa_offset=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.toa_offset =
cpu_to_le16(value);
goto out;
}
data = iwl_dbgfs_is_match("center_freq=", buf);
if (data) {
struct iwl_tof_responder_config_cmd *cmd =
&mvm->tof_data.responder_cfg;
ret = kstrtou32(data, 10, &value);
if (ret == 0 && value) {
enum nl80211_band band = (cmd->channel_num <= 14) ?
NL80211_BAND_2GHZ :
NL80211_BAND_5GHZ;
struct ieee80211_channel chn = {
.band = band,
.center_freq = ieee80211_channel_to_frequency(
cmd->channel_num, band),
};
struct cfg80211_chan_def chandef = {
.chan = &chn,
.center_freq1 =
ieee80211_channel_to_frequency(value,
band),
};
cmd->ctrl_ch_position = iwl_mvm_get_ctrl_pos(&chandef);
}
goto out;
}
data = iwl_dbgfs_is_match("ftm_per_burst=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.ftm_per_burst = value;
goto out;
}
data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value;
goto out;
}
data = iwl_dbgfs_is_match("asap_mode=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.asap_mode = value;
goto out;
}
data = iwl_dbgfs_is_match("send_responder_cfg=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0 && value) {
ret = iwl_mvm_tof_responder_cmd(mvm, vif);
goto out;
}
}
out:
mutex_unlock(&mvm->mutex);
return ret ?: count;
}
static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
char buf[256];
int pos = 0;
const size_t bufsz = sizeof(buf);
struct iwl_tof_responder_config_cmd *cmd;
cmd = &mvm->tof_data.responder_cfg;
mutex_lock(&mvm->mutex);
pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n",
le16_to_cpu(cmd->burst_period));
pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n",
cmd->burst_duration);
pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n",
cmd->bandwidth);
pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n",
cmd->channel_num);
pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n",
cmd->ctrl_ch_position);
pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n",
cmd->bssid);
pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n",
cmd->min_delta_ftm);
pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n",
cmd->num_of_burst_exp);
pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate);
pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n",
cmd->abort_responder);
pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n",
cmd->get_ch_est);
pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n",
cmd->recv_sta_req_params);
pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n",
cmd->ftm_per_burst);
pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n",
cmd->ftm_resp_ts_avail);
pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n",
cmd->asap_mode);
pos += scnprintf(buf + pos, bufsz - pos,
"tsf_timer_offset_msecs = %d\n",
le16_to_cpu(cmd->tsf_timer_offset_msecs));
pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n",
le16_to_cpu(cmd->toa_offset));
mutex_unlock(&mvm->mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
char *buf, size_t count,
loff_t *ppos)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
u32 value;
int ret = 0;
char *data;
mutex_lock(&mvm->mutex);
data = iwl_dbgfs_is_match("request_id=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req.request_id = value;
goto out;
}
data = iwl_dbgfs_is_match("initiator=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req.initiator = value;
goto out;
}
data = iwl_dbgfs_is_match("one_sided_los_disable=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req.one_sided_los_disable = value;
goto out;
}
data = iwl_dbgfs_is_match("req_timeout=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req.req_timeout = value;
goto out;
}
data = iwl_dbgfs_is_match("report_policy=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req.report_policy = value;
goto out;
}
data = iwl_dbgfs_is_match("macaddr_random=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req.macaddr_random = value;
goto out;
}
data = iwl_dbgfs_is_match("num_of_ap=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req.num_of_ap = value;
goto out;
}
data = iwl_dbgfs_is_match("macaddr_template=", buf);
if (data) {
u8 mac[ETH_ALEN];
if (!mac_pton(data, mac)) {
ret = -EINVAL;
goto out;
}
memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
goto out;
}
data = iwl_dbgfs_is_match("macaddr_mask=", buf);
if (data) {
u8 mac[ETH_ALEN];
if (!mac_pton(data, mac)) {
ret = -EINVAL;
goto out;
}
memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
goto out;
}
data = iwl_dbgfs_is_match("ap=", buf);
if (data) {
struct iwl_tof_range_req_ap_entry ap = {};
int size = sizeof(struct iwl_tof_range_req_ap_entry);
u16 burst_period;
u8 *mac = ap.bssid;
unsigned int i;
if (sscanf(data, "%u %hhd %hhd %hhd"
"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
"%hhd %hhd %hd"
"%hhd %hhd %d"
"%hhx %hhd %hhd %hhd",
&i, &ap.channel_num, &ap.bandwidth,
&ap.ctrl_ch_position,
mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
&ap.measure_type, &ap.num_of_bursts,
&burst_period,
&ap.samples_per_burst, &ap.retries_per_sample,
&ap.tsf_delta, &ap.location_req, &ap.asap_mode,
&ap.enable_dyn_ack, &ap.rssi) != 20) {
ret = -EINVAL;
goto out;
}
if (i >= IWL_MVM_TOF_MAX_APS) {
IWL_ERR(mvm, "Invalid AP index %d\n", i);
ret = -EINVAL;
goto out;
}
ap.burst_period = cpu_to_le16(burst_period);
memcpy(&mvm->tof_data.range_req.ap[i], &ap, size);
goto out;
}
data = iwl_dbgfs_is_match("send_range_request=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0 && value)
ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
goto out;
}
ret = -EINVAL;
out:
mutex_unlock(&mvm->mutex);
return ret ?: count;
}
static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
char buf[512];
int pos = 0;
const size_t bufsz = sizeof(buf);
struct iwl_tof_range_req_cmd *cmd;
int i;
cmd = &mvm->tof_data.range_req;
mutex_lock(&mvm->mutex);
pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n",
cmd->request_id);
pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n",
cmd->initiator);
pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n",
cmd->one_sided_los_disable);
pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n",
cmd->req_timeout);
pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n",
cmd->report_policy);
pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n",
cmd->macaddr_random);
pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n",
cmd->macaddr_template);
pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n",
cmd->macaddr_mask);
pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n",
cmd->num_of_ap);
for (i = 0; i < cmd->num_of_ap; i++) {
struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
pos += scnprintf(buf + pos, bufsz - pos,
"ap %.2d: channel_num=%hhd bw=%hhd"
" control=%hhd bssid=%pM type=%hhd"
" num_of_bursts=%hhd burst_period=%hd ftm=%hhd"
" retries=%hhd tsf_delta=%d"
" tsf_delta_direction=%hhd location_req=0x%hhx "
" asap=%hhd enable=%hhd rssi=%hhd\n",
i, ap->channel_num, ap->bandwidth,
ap->ctrl_ch_position, ap->bssid,
ap->measure_type, ap->num_of_bursts,
ap->burst_period, ap->samples_per_burst,
ap->retries_per_sample, ap->tsf_delta,
ap->tsf_delta_direction,
ap->location_req, ap->asap_mode,
ap->enable_dyn_ack, ap->rssi);
}
mutex_unlock(&mvm->mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
char *buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
u32 value;
int ret = 0;
char *data;
mutex_lock(&mvm->mutex);
data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req_ext.tsf_timer_offset_msec =
cpu_to_le16(value);
goto out;
}
data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req_ext.min_delta_ftm = value;
goto out;
}
data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req_ext.ftm_format_and_bw20M =
value;
goto out;
}
data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req_ext.ftm_format_and_bw40M =
value;
goto out;
}
data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req_ext.ftm_format_and_bw80M =
value;
goto out;
}
data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0 && value)
ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
goto out;
}
ret = -EINVAL;
out:
mutex_unlock(&mvm->mutex);
return ret ?: count;
}
static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
char buf[256];
int pos = 0;
const size_t bufsz = sizeof(buf);
struct iwl_tof_range_req_ext_cmd *cmd;
cmd = &mvm->tof_data.range_req_ext;
mutex_lock(&mvm->mutex);
pos += scnprintf(buf + pos, bufsz - pos,
"tsf_timer_offset_msec = %hd\n",
cmd->tsf_timer_offset_msec);
pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhd\n",
cmd->min_delta_ftm);
pos += scnprintf(buf + pos, bufsz - pos,
"ftm_format_and_bw20M = %hhd\n",
cmd->ftm_format_and_bw20M);
pos += scnprintf(buf + pos, bufsz - pos,
"ftm_format_and_bw40M = %hhd\n",
cmd->ftm_format_and_bw40M);
pos += scnprintf(buf + pos, bufsz - pos,
"ftm_format_and_bw80M = %hhd\n",
cmd->ftm_format_and_bw80M);
mutex_unlock(&mvm->mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif,
char *buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
u32 value;
int abort_id, ret = 0;
char *data;
mutex_lock(&mvm->mutex);
data = iwl_dbgfs_is_match("abort_id=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.last_abort_id = value;
goto out;
}
data = iwl_dbgfs_is_match("send_range_abort=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0 && value) {
abort_id = mvm->tof_data.last_abort_id;
ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id);
goto out;
}
}
out:
mutex_unlock(&mvm->mutex);
return ret ?: count;
}
static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
char buf[32];
int pos = 0;
const size_t bufsz = sizeof(buf);
int last_abort_id;
mutex_lock(&mvm->mutex);
last_abort_id = mvm->tof_data.last_abort_id;
mutex_unlock(&mvm->mutex);
pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n",
last_abort_id);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
char *buf;
int pos = 0;
const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256;
struct iwl_tof_range_rsp_ntfy *cmd;
int i, ret;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&mvm->mutex);
cmd = &mvm->tof_data.range_resp;
pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n",
cmd->request_id);
pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n",
cmd->request_status);
pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n",
cmd->last_in_batch);
pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n",
cmd->num_of_aps);
for (i = 0; i < cmd->num_of_aps; i++) {
struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
pos += scnprintf(buf + pos, bufsz - pos,
"ap %.2d: bssid=%pM status=%hhd bw=%hhd"
" rtt=%d rtt_var=%d rtt_spread=%d"
" rssi=%hhd rssi_spread=%hhd"
" range=%d range_var=%d"
" time_stamp=%d\n",
i, ap->bssid, ap->measure_status,
ap->measure_bw,
ap->rtt, ap->rtt_variance, ap->rtt_spread,
ap->rssi, ap->rssi_spread, ap->range,
ap->range_variance, ap->timestamp);
}
mutex_unlock(&mvm->mutex);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
size_t count, loff_t *ppos)
{
@ -1289,21 +543,64 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
return count;
}
static ssize_t
iwl_dbgfs_low_latency_force_write(struct ieee80211_vif *vif, char *buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
u8 value;
int ret;
ret = kstrtou8(buf, 0, &value);
if (ret)
return ret;
if (value > NUM_LOW_LATENCY_FORCE)
return -EINVAL;
mutex_lock(&mvm->mutex);
if (value == LOW_LATENCY_FORCE_UNSET) {
iwl_mvm_update_low_latency(mvm, vif, false,
LOW_LATENCY_DEBUGFS_FORCE);
iwl_mvm_update_low_latency(mvm, vif, false,
LOW_LATENCY_DEBUGFS_FORCE_ENABLE);
} else {
iwl_mvm_update_low_latency(mvm, vif,
value == LOW_LATENCY_FORCE_ON,
LOW_LATENCY_DEBUGFS_FORCE);
iwl_mvm_update_low_latency(mvm, vif, true,
LOW_LATENCY_DEBUGFS_FORCE_ENABLE);
}
mutex_unlock(&mvm->mutex);
return count;
}
static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
char buf[30] = {};
char format[] = "traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n"
"dbgfs_force_enable=%d\ndbgfs_force=%d\nactual=%d\n";
/*
* all values in format are boolean so the size of format is enough
* for holding the result string
*/
char buf[sizeof(format) + 1] = {};
int len;
len = scnprintf(buf, sizeof(buf) - 1,
"traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n",
len = scnprintf(buf, sizeof(buf) - 1, format,
!!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC),
!!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS),
!!(mvmvif->low_latency & LOW_LATENCY_VCMD),
!!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE));
!!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE),
!!(mvmvif->low_latency &
LOW_LATENCY_DEBUGFS_FORCE_ENABLE),
!!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE),
!!(mvmvif->low_latency_actual));
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@ -1456,14 +753,9 @@ MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(low_latency_force, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff);
@ -1497,6 +789,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(low_latency_force, mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600);
@ -1506,24 +799,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif == mvm->bf_allowed_vif)
MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, 0600);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) &&
!vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) {
if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP)
MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params,
mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir,
0600);
MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir,
0600);
MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir,
0600);
MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir,
0600);
MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir,
0400);
}
/*
* Create symlink for convenience pointing to interface specific
* debugfs entries for the driver. For example, under

View File

@ -69,6 +69,7 @@
#include "sta.h"
#include "iwl-io.h"
#include "debugfs.h"
#include "iwl-modparams.h"
#include "fw/error-dump.h"
static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file,
@ -1206,47 +1207,6 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
/*
* Enable / Disable continuous recording.
* Cause the FW to start continuous recording, by sending the relevant hcmd.
* Enable: input of every integer larger than 0, ENABLE_CONT_RECORDING.
* Disable: for 0 as input, DISABLE_CONT_RECORDING.
*/
static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm,
char *buf, size_t count,
loff_t *ppos)
{
struct iwl_trans *trans = mvm->trans;
const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
struct iwl_continuous_record_cmd cont_rec = {};
int ret, rec_mode;
if (!iwl_mvm_firmware_running(mvm))
return -EIO;
if (!dest)
return -EOPNOTSUPP;
if (dest->monitor_mode != SMEM_MODE ||
trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
return -EOPNOTSUPP;
ret = kstrtoint(buf, 0, &rec_mode);
if (ret)
return ret;
cont_rec.record_mode.enable_recording = rec_mode ?
cpu_to_le16(ENABLE_CONT_RECORDING) :
cpu_to_le16(DISABLE_CONT_RECORDING);
mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd_pdu(mvm, LDBG_CONFIG_CMD, 0,
sizeof(cont_rec), &cont_rec);
mutex_unlock(&mvm->mutex);
return ret ?: count;
}
static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
char *buf, size_t count,
loff_t *ppos)
@ -1722,11 +1682,33 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
return ret ?: count;
}
struct iwl_mvm_sniffer_apply {
struct iwl_mvm *mvm;
u16 aid;
};
static bool iwl_mvm_sniffer_apply(struct iwl_notif_wait_data *notif_data,
struct iwl_rx_packet *pkt, void *data)
{
struct iwl_mvm_sniffer_apply *apply = data;
apply->mvm->cur_aid = cpu_to_le16(apply->aid);
return true;
}
static ssize_t
iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
struct iwl_notification_wait wait;
struct iwl_he_monitor_cmd he_mon_cmd = {};
struct iwl_mvm_sniffer_apply apply = {
.mvm = mvm,
};
u16 wait_cmds[] = {
iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD, DATA_PATH_GROUP, 0),
};
u32 aid;
int ret;
@ -1742,10 +1724,30 @@ iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
he_mon_cmd.aid = cpu_to_le16(aid);
apply.aid = aid;
mutex_lock(&mvm->mutex);
/*
* Use the notification waiter to get our function triggered
* in sequence with other RX. This ensures that frames we get
* on the RX queue _before_ the new configuration is applied
* still have mvm->cur_aid pointing to the old AID, and that
* frames on the RX queue _after_ the firmware processed the
* new configuration (and sent the response, synchronously)
* get mvm->cur_aid correctly set to the new AID.
*/
iwl_init_notification_wait(&mvm->notif_wait, &wait,
wait_cmds, ARRAY_SIZE(wait_cmds),
iwl_mvm_sniffer_apply, &apply);
ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD,
DATA_PATH_GROUP, 0), 0,
sizeof(he_mon_cmd), &he_mon_cmd);
/* no need to really wait, we already did anyway */
iwl_remove_notification(&mvm->notif_wait, &wait);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@ -1800,7 +1802,6 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(max_amsdu_len, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
(IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
@ -2004,7 +2005,6 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(max_amsdu_len, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200);
#ifdef CONFIG_ACPI
@ -2071,6 +2071,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
if (!debugfs_create_blob("nvm_phy_sku", 0400,
mvm->debugfs_dir, &mvm->nvm_phy_sku_blob))
goto err;
if (!debugfs_create_blob("nvm_reg", S_IRUSR,
mvm->debugfs_dir, &mvm->nvm_reg_blob))
goto err;
debugfs_create_file("mem", 0600, dbgfs_dir, mvm, &iwl_dbgfs_mem_ops);

View File

@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -89,7 +91,7 @@
#include "fw/api/sf.h"
#include "fw/api/sta.h"
#include "fw/api/stats.h"
#include "fw/api/tof.h"
#include "fw/api/location.h"
#include "fw/api/tx.h"
#endif /* __fw_api_h__ */

View File

@ -293,9 +293,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type ucode_type)
{
struct iwl_notification_wait alive_wait;
struct iwl_mvm_alive_data alive_data;
struct iwl_mvm_alive_data alive_data = {};
const struct fw_img *fw;
int ret, i;
int ret;
enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
static const u16 alive_cmd[] = { MVM_ALIVE };
@ -373,9 +373,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
BIT(IWL_MAX_TID_COUNT + 2);
for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_fw_set_dbg_rec_on(&mvm->fwrt);

View File

@ -97,11 +97,6 @@ struct iwl_mvm_mac_iface_iterator_data {
bool found_vif;
};
struct iwl_mvm_hw_queues_iface_iterator_data {
struct ieee80211_vif *exclude_vif;
unsigned long used_hw_queues;
};
static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
@ -208,61 +203,6 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
data->preferred_tsf = NUM_TSF_IDS;
}
/*
* Get the mask of the queues used by the vif
*/
u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
{
u32 qmask = 0, ac;
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
qmask |= BIT(vif->hw_queue[ac]);
}
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC)
qmask |= BIT(vif->cab_queue);
return qmask;
}
static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
/* exclude the given vif */
if (vif == data->exclude_vif)
return;
data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif);
}
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *exclude_vif)
{
struct iwl_mvm_hw_queues_iface_iterator_data data = {
.exclude_vif = exclude_vif,
.used_hw_queues =
BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
BIT(mvm->aux_queue) |
BIT(IWL_MVM_DQA_GCAST_QUEUE),
};
lockdep_assert_held(&mvm->mutex);
/* mark all VIF used hw queues */
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
iwl_mvm_iface_hw_queues_iter, &data);
return data.used_hw_queues;
}
static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
@ -360,8 +300,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
iwl_mvm_mac_iface_iterator, &data);
used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif);
/*
* In the case we're getting here during resume, it's similar to
* firmware restart, and with RESUME_ALL the iterator will find
@ -416,9 +354,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* the ones here - no real limit
*/
queue_limit = IEEE80211_MAX_QUEUES;
BUILD_BUG_ON(IEEE80211_MAX_QUEUES >
BITS_PER_BYTE *
sizeof(mvm->hw_queue_to_mac80211[0]));
/*
* Find available queues, and allocate them to the ACs. When in
@ -446,9 +381,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* queue value (when queue is enabled).
*/
mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
} else {
vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
}
mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA;
@ -462,8 +394,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
exit_fail:
memset(mvmvif, 0, sizeof(struct iwl_mvm_vif));
memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue));
vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
return ret;
}
@ -778,27 +708,9 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
if (vif->bss_conf.assoc && vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u8 sta_id = mvmvif->ap_sta_id;
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
if (sta_id != IWL_MVM_INVALID_STA) {
struct ieee80211_sta *sta;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
/*
* TODO: we should check the ext cap IE but it is
* unclear why the spec requires two bits (one in HE
* cap IE, and one in the ext cap IE). In the meantime
* rely on the HE cap IE only.
*/
if (sta && (sta->he_cap.he_cap_elem.mac_cap_info[0] &
IEEE80211_HE_MAC_CAP0_TWT_RES))
ctxt_sta->data_policy |=
cpu_to_le32(TWT_SUPPORTED);
}
if (vif->bss_conf.twt_requester)
ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED);
}
@ -881,8 +793,6 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
/* Override the filter flags to accept only probe requests */
cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
@ -1203,7 +1113,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_STA_TYPE))
ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->cab_queue);
/*
* Only set the beacon time when the MAC is being added, when we
@ -1435,7 +1345,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
IWL_DEBUG_RX(mvm,
"beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n",
"beacon status %#x retries:%d tsf:0x%016llX gp2:0x%X rate:%d\n",
status, beacon_notify_hdr->failure_frame,
le64_to_cpu(beacon->tsf),
mvm->ap_last_beacon_gp2,
@ -1472,35 +1382,48 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
}
}
static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_missed_beacons_notif *missed_beacons = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig;
struct iwl_fw_dbg_trigger_tlv *trigger;
u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx;
u32 rx_missed_bcon, rx_missed_bcon_since_rx;
struct ieee80211_vif *vif;
u32 id = le32_to_cpu(mb->mac_id);
if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id))
return;
IWL_DEBUG_INFO(mvm,
"missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
le32_to_cpu(mb->mac_id),
le32_to_cpu(mb->consec_missed_beacons),
le32_to_cpu(mb->consec_missed_beacons_since_last_rx),
le32_to_cpu(mb->num_recvd_beacons),
le32_to_cpu(mb->num_expected_beacons));
rx_missed_bcon = le32_to_cpu(missed_beacons->consec_missed_beacons);
rcu_read_lock();
vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
if (!vif)
goto out;
rx_missed_bcon = le32_to_cpu(mb->consec_missed_beacons);
rx_missed_bcon_since_rx =
le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx);
le32_to_cpu(mb->consec_missed_beacons_since_last_rx);
/*
* TODO: the threshold should be adjusted based on latency conditions,
* and/or in case of a CS flow on one of the other AP vifs.
*/
if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) >
IWL_MVM_MISSED_BEACONS_THRESHOLD)
if (rx_missed_bcon > IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG)
iwl_mvm_connection_loss(mvm, vif, "missed beacons");
else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD)
ieee80211_beacon_loss(vif);
trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_MISSED_BEACONS);
if (!trigger)
return;
goto out;
bcon_trig = (void *)trigger->data;
stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
@ -1512,28 +1435,11 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
rx_missed_bcon >= stop_trig_missed_bcon)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);
}
void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
IWL_DEBUG_INFO(mvm,
"missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
le32_to_cpu(mb->mac_id),
le32_to_cpu(mb->consec_missed_beacons),
le32_to_cpu(mb->consec_missed_beacons_since_last_rx),
le32_to_cpu(mb->num_recvd_beacons),
le32_to_cpu(mb->num_expected_beacons));
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_beacon_loss_iterator,
mb);
iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_MISSED_BEACONS);
out:
rcu_read_unlock();
}
void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
@ -1575,16 +1481,29 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
ieee80211_rx_napi(mvm->hw, NULL, skb, NULL);
}
static void iwl_mvm_probe_resp_data_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_probe_resp_data_notif *notif = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_probe_resp_data_notif *notif = (void *)pkt->data;
struct iwl_probe_resp_data *old_data, *new_data;
int len = iwl_rx_packet_payload_len(pkt);
u32 id = le32_to_cpu(notif->mac_id);
struct ieee80211_vif *vif;
struct iwl_mvm_vif *mvmvif;
if (mvmvif->id != (u16)le32_to_cpu(notif->mac_id))
if (WARN_ON_ONCE(len < sizeof(*notif)))
return;
IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n",
notif->noa_active, notif->csa_counter);
vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, false);
if (!vif)
return;
mvmvif = iwl_mvm_vif_from_mac80211(vif);
new_data = kzalloc(sizeof(*new_data), GFP_KERNEL);
if (!new_data)
return;
@ -1615,25 +1534,6 @@ static void iwl_mvm_probe_resp_data_iter(void *_data, u8 *mac,
ieee80211_csa_set_counter(vif, notif->csa_counter);
}
void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_probe_resp_data_notif *notif = (void *)pkt->data;
int len = iwl_rx_packet_payload_len(pkt);
if (WARN_ON_ONCE(len < sizeof(*notif)))
return;
IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n",
notif->noa_active, notif->csa_counter);
ieee80211_iterate_active_interfaces(mvm->hw,
IEEE80211_IFACE_ITER_ACTIVE,
iwl_mvm_probe_resp_data_iter,
notif);
}
void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{

View File

@ -395,6 +395,21 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
return ret;
}
const static u8 he_if_types_ext_capa_sta[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
};
const static struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
{
.iftype = NL80211_IFTYPE_STATION,
.extended_capabilities = he_if_types_ext_capa_sta,
.extended_capabilities_mask = he_if_types_ext_capa_sta,
.extended_capabilities_len = sizeof(he_if_types_ext_capa_sta),
},
};
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
{
struct ieee80211_hw *hw = mvm->hw;
@ -410,7 +425,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, SPECTRUM_MGMT);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(hw, QUEUE_CONTROL);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
@ -424,6 +438,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP);
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
ieee80211_hw_set(hw, STA_MMPDU_TXQ);
ieee80211_hw_set(hw, TX_AMSDU);
ieee80211_hw_set(hw, TX_FRAG_LIST);
if (iwl_mvm_has_tlc_offload(mvm)) {
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
@ -469,6 +487,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
hw->max_tx_fragments = mvm->trans->max_skb_frags;
BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
@ -534,6 +553,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
hw->chanctx_data_size = sizeof(u16);
hw->txq_data_size = sizeof(struct iwl_mvm_txq);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
@ -673,6 +693,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE);
}
if (mvm->nvm_data->sku_cap_11ax_enable &&
!iwlwifi_mod_params.disable_11ax) {
hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa;
hw->wiphy->num_iftype_ext_capab =
ARRAY_SIZE(he_iftypes_ext_capa);
}
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
#ifdef CONFIG_PM_SLEEP
@ -776,7 +803,6 @@ static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
goto out;
__skb_queue_tail(&mvm->d0i3_tx, skb);
ieee80211_stop_queues(mvm->hw);
/* trigger wakeup */
iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
@ -796,13 +822,15 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
struct ieee80211_sta *sta = control->sta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (void *)skb->data;
bool offchannel = IEEE80211_SKB_CB(skb)->flags &
IEEE80211_TX_CTL_TX_OFFCHAN;
if (iwl_mvm_is_radio_killed(mvm)) {
IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
goto drop;
}
if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
if (offchannel &&
!test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
!test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
goto drop;
@ -815,8 +843,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
sta = NULL;
/* If there is no sta, and it's not offchannel - send through AP */
if (info->control.vif->type == NL80211_IFTYPE_STATION &&
info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) {
if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION &&
!offchannel) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info->control.vif);
u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
@ -844,22 +872,95 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
ieee80211_free_txskb(hw, skb);
}
static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
{
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
return false;
return true;
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
struct sk_buff *skb = NULL;
/*
* No need for threads to be pending here, they can leave the first
* taker all the work.
*
* mvmtxq->tx_request logic:
*
* If 0, no one is currently TXing, set to 1 to indicate current thread
* will now start TX and other threads should quit.
*
* If 1, another thread is currently TXing, set to 2 to indicate to
* that thread that there was another request. Since that request may
* have raced with the check whether the queue is empty, the TXing
* thread should check the queue's status one more time before leaving.
* This check is done in order to not leave any TX hanging in the queue
* until the next TX invocation (which may not even happen).
*
* If 2, another thread is currently TXing, and it will already double
* check the queue, so do nothing.
*/
if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2))
return;
rcu_read_lock();
do {
while (likely(!mvmtxq->stopped &&
(mvm->trans->system_pm_mode ==
IWL_PLAT_PM_MODE_DISABLED))) {
skb = ieee80211_tx_dequeue(hw, txq);
if (!skb)
break;
if (!txq->sta)
iwl_mvm_tx_skb_non_sta(mvm, skb);
else
iwl_mvm_tx_skb(mvm, skb, txq->sta);
}
} while (atomic_dec_return(&mvmtxq->tx_request));
rcu_read_unlock();
}
static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
{
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
return false;
if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
return true;
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
/* enabled by default */
return true;
/*
* Please note that racing is handled very carefully here:
* mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
* deleted afterwards.
* This means that if:
* mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
* queue is allocated and we can TX.
* mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
* a race, should defer the frame.
* mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
* need to allocate the queue and defer the frame.
* mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
* queue is already scheduled for allocation, no need to allocate,
* should defer the frame.
*/
/* If the queue is allocated TX and return. */
if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
/*
* Check that list is empty to avoid a race where txq_id is
* already updated, but the queue allocation work wasn't
* finished
*/
if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
return;
iwl_mvm_mac_itxq_xmit(hw, txq);
return;
}
/* The list is being deleted only after the queue is fully allocated. */
if (!list_empty(&mvmtxq->list))
return;
list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
schedule_work(&mvm->add_stream_wk);
}
#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
@ -974,7 +1075,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
mvmvif = iwl_mvm_vif_from_mac80211(vif);
cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk);
}
if (!iwl_enable_rx_ampdu(mvm->cfg)) {
if (!iwl_enable_rx_ampdu()) {
ret = -EINVAL;
break;
}
@ -986,7 +1087,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
timeout);
break;
case IEEE80211_AMPDU_TX_START:
if (!iwl_enable_tx_ampdu(mvm->cfg)) {
if (!iwl_enable_tx_ampdu()) {
ret = -EINVAL;
break;
}
@ -1066,6 +1167,8 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_stop_device(mvm);
mvm->cur_aid = 0;
mvm->scan_status = 0;
mvm->ps_disabled = false;
mvm->calibrating = false;
@ -1085,7 +1188,6 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
@ -1391,6 +1493,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out_unlock;
rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif);
/* Counting number of interfaces is needed for legacy PM */
if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count++;
@ -1582,6 +1686,8 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
iwl_mvm_power_update_mac(mvm);
iwl_mvm_mac_ctxt_remove(mvm, vif);
RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL);
if (vif->type == NL80211_IFTYPE_MONITOR)
mvm->monitor_on = false;
@ -2146,6 +2252,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
}
/* Update MU EDCA params */
if (changes & BSS_CHANGED_QOS && mvmvif->associated &&
bss_conf->assoc && vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax)
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
/*
* If we're not associated yet, take the (new) BSSID before associating
* so the firmware knows. If we're already associated, then use the old
@ -2672,7 +2784,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
return;
spin_lock_bh(&mvmsta->lock);
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
for (tid = 0; tid < ARRAY_SIZE(mvmsta->tid_data); tid++) {
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE)
@ -2861,32 +2973,6 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
peer_addr, action);
}
static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta)
{
struct iwl_mvm_tid_data *tid_data;
struct sk_buff *skb;
int i;
spin_lock_bh(&mvm_sta->lock);
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
tid_data = &mvm_sta->tid_data[i];
while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
/*
* The first deferred frame should've stopped the MAC
* queues, so we should never get a second deferred
* frame for the RA/TID.
*/
iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
ieee80211_free_txskb(mvm->hw, skb);
}
}
spin_unlock_bh(&mvm_sta->lock);
}
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@ -2920,7 +3006,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
*/
if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST) {
iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
flush_work(&mvm->add_stream_wk);
/*
@ -2967,6 +3052,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
NL80211_TDLS_SETUP);
}
sta->max_rc_amsdu_len = 1;
} else if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_AUTH) {
/*
@ -3036,6 +3123,16 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
NL80211_TDLS_DISABLE_LINK);
}
/* Remove STA key if this is an AP using WEP */
if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta,
mvmvif->ap_wep_key);
if (!ret)
ret = rm_ret;
}
} else {
ret = -EIO;
}
@ -3431,14 +3528,20 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
.sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
/* Set the channel info data */
.channel_info.band = (channel->band == NL80211_BAND_2GHZ) ?
PHY_BAND_24 : PHY_BAND_5,
.channel_info.channel = channel->hw_value,
.channel_info.width = PHY_VHT_CHANNEL_MODE20,
/* Set the time and duration */
.apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
};
struct iwl_hs20_roc_req_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm,
&aux_roc_req.channel_info);
u16 len = sizeof(aux_roc_req) - iwl_mvm_chan_info_padding(mvm);
/* Set the channel info data */
iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value,
(channel->band == NL80211_BAND_2GHZ) ?
PHY_BAND_24 : PHY_BAND_5,
PHY_VHT_CHANNEL_MODE20,
0);
/* Set the time and duration */
tail->apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg));
delay = AUX_ROC_MIN_DELAY;
req_dur = MSEC_TO_TU(duration);
@ -3463,15 +3566,15 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
}
}
aux_roc_req.duration = cpu_to_le32(req_dur);
aux_roc_req.apply_time_max_delay = cpu_to_le32(delay);
tail->duration = cpu_to_le32(req_dur);
tail->apply_time_max_delay = cpu_to_le32(delay);
IWL_DEBUG_TE(mvm,
"ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
channel->hw_value, req_dur, duration, delay,
dtim_interval);
/* Set the node address */
memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
memcpy(tail->node_addr, vif->addr, ETH_ALEN);
lockdep_assert_held(&mvm->mutex);
@ -3502,7 +3605,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
ARRAY_SIZE(time_event_response),
iwl_mvm_rx_aux_roc, te_data);
res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len,
&aux_roc_req);
if (res) {
@ -4656,8 +4759,35 @@ static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
mutex_unlock(&mvm->mutex);
}
static bool iwl_mvm_can_hw_csum(struct sk_buff *skb)
{
u8 protocol = ip_hdr(skb)->protocol;
if (!IS_ENABLED(CONFIG_INET))
return false;
return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP;
}
static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw,
struct sk_buff *head,
struct sk_buff *skb)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
/* For now don't aggregate IPv6 in AMSDU */
if (skb->protocol != htons(ETH_P_IP))
return false;
if (!iwl_mvm_is_csum_supported(mvm))
return true;
return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head);
}
const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
.ampdu_action = iwl_mvm_mac_ampdu_action,
.start = iwl_mvm_mac_start,
.reconfig_complete = iwl_mvm_mac_reconfig_complete,
@ -4731,6 +4861,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
#endif
.get_survey = iwl_mvm_mac_get_survey,
.sta_statistics = iwl_mvm_mac_sta_statistics,
.can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate,
#ifdef CONFIG_IWLWIFI_DEBUGFS
.sta_add_debugfs = iwl_mvm_sta_add_debugfs,
#endif

View File

@ -83,7 +83,6 @@
#include "sta.h"
#include "fw-api.h"
#include "constants.h"
#include "tof.h"
#include "fw/runtime.h"
#include "fw/dbg.h"
#include "fw/acpi.h"
@ -95,6 +94,8 @@
/* RSSI offset for WkP */
#define IWL_RSSI_OFFSET 50
#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
#define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 16
/* A TimeUnit is 1024 microsecond */
#define MSEC_TO_TU(_msec) (_msec*1000/1024)
@ -298,18 +299,39 @@ enum iwl_bt_force_ant_mode {
BT_FORCE_ANT_MAX,
};
/**
* struct iwl_mvm_low_latency_force - low latency force mode set by debugfs
* @LOW_LATENCY_FORCE_UNSET: unset force mode
* @LOW_LATENCY_FORCE_ON: for low latency on
* @LOW_LATENCY_FORCE_OFF: for low latency off
* @NUM_LOW_LATENCY_FORCE: max num of modes
*/
enum iwl_mvm_low_latency_force {
LOW_LATENCY_FORCE_UNSET,
LOW_LATENCY_FORCE_ON,
LOW_LATENCY_FORCE_OFF,
NUM_LOW_LATENCY_FORCE
};
/**
* struct iwl_mvm_low_latency_cause - low latency set causes
* @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected
* @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs
* @LOW_LATENCY_VCMD: low latency mode set from vendor command
* @LOW_LATENCY_VIF_TYPE: low latency mode set because of vif type (ap)
* @LOW_LATENCY_DEBUGFS_FORCE_ENABLE: indicate that force mode is enabled
* the actual set/unset is done with LOW_LATENCY_DEBUGFS_FORCE
* @LOW_LATENCY_DEBUGFS_FORCE: low latency force mode from debugfs
* set this with LOW_LATENCY_DEBUGFS_FORCE_ENABLE flag
* in low_latency.
*/
enum iwl_mvm_low_latency_cause {
LOW_LATENCY_TRAFFIC = BIT(0),
LOW_LATENCY_DEBUGFS = BIT(1),
LOW_LATENCY_VCMD = BIT(2),
LOW_LATENCY_VIF_TYPE = BIT(3),
LOW_LATENCY_DEBUGFS_FORCE_ENABLE = BIT(4),
LOW_LATENCY_DEBUGFS_FORCE = BIT(5),
};
/**
@ -360,8 +382,10 @@ struct iwl_probe_resp_data {
* @pm_enabled - Indicate if MAC power management is allowed
* @monitor_active: indicates that monitor context is configured, and that the
* interface should get quota etc.
* @low_latency: indicates low latency is set, see
* enum &iwl_mvm_low_latency_cause for causes.
* @low_latency: bit flags for low latency
* see enum &iwl_mvm_low_latency_cause for causes.
* @low_latency_actual: boolean, indicates low latency is set,
* as a result from low_latency bit flags and takes force into account.
* @ps_disabled: indicates that this interface requires PS to be disabled
* @queue_params: QoS params for this MAC
* @bcast_sta: station used for broadcast packets. Used by the following
@ -393,7 +417,8 @@ struct iwl_mvm_vif {
bool ap_ibss_active;
bool pm_enabled;
bool monitor_active;
u8 low_latency;
u8 low_latency: 6;
u8 low_latency_actual: 1;
bool ps_disabled;
struct iwl_mvm_vif_bf_data bf_data;
@ -778,6 +803,39 @@ struct iwl_mvm_geo_profile {
u8 values[ACPI_GEO_TABLE_SIZE];
};
struct iwl_mvm_txq {
struct list_head list;
u16 txq_id;
atomic_t tx_request;
bool stopped;
};
static inline struct iwl_mvm_txq *
iwl_mvm_txq_from_mac80211(struct ieee80211_txq *txq)
{
return (void *)txq->drv_priv;
}
static inline struct iwl_mvm_txq *
iwl_mvm_txq_from_tid(struct ieee80211_sta *sta, u8 tid)
{
if (tid == IWL_MAX_TID_COUNT)
tid = IEEE80211_NUM_TIDS;
return (void *)sta->txq[tid]->drv_priv;
}
/**
* struct iwl_mvm_tvqm_txq_info - maps TVQM hw queue to tid
*
* @sta_id: sta id
* @txq_tid: txq tid
*/
struct iwl_mvm_tvqm_txq_info {
u8 sta_id;
u8 txq_tid;
};
struct iwl_mvm_dqa_txq_info {
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
bool reserved; /* Is this the TXQ reserved for a STA */
@ -843,13 +901,13 @@ struct iwl_mvm {
u64 on_time_scan;
} radio_stats, accu_radio_stats;
u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
struct list_head add_stream_txqs;
union {
struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
};
struct work_struct add_stream_wk; /* To add streams to queues */
atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
const char *nvm_file_name;
struct iwl_nvm_data *nvm_data;
/* NVM sections */
@ -863,7 +921,6 @@ struct iwl_mvm {
/* data related to data path */
struct iwl_rx_phy_info last_phy_info;
struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
u8 rx_ba_sessions;
/* configured by mac80211 */
@ -932,6 +989,7 @@ struct iwl_mvm {
struct debugfs_blob_wrapper nvm_calib_blob;
struct debugfs_blob_wrapper nvm_prod_blob;
struct debugfs_blob_wrapper nvm_phy_sku_blob;
struct debugfs_blob_wrapper nvm_reg_blob;
struct iwl_mvm_frame_stats drv_rx_stats;
spinlock_t drv_stats_lock;
@ -955,6 +1013,7 @@ struct iwl_mvm {
u8 refs[IWL_MVM_REF_COUNT];
u8 vif_count;
struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER];
/* -1 for always, 0 for never, >0 for that many times */
s8 fw_restart;
@ -1090,7 +1149,6 @@ struct iwl_mvm {
u32 ciphers[IWL_MVM_NUM_CIPHERS];
struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
struct iwl_mvm_tof_data tof_data;
struct ieee80211_vif *nan_vif;
#define IWL_MAX_BAID 32
@ -1106,6 +1164,10 @@ struct iwl_mvm {
/* does a monitor vif exist (only one can exist hence bool) */
bool monitor_on;
/* sniffer data to include in radiotap */
__le16 cur_aid;
#ifdef CONFIG_ACPI
struct iwl_mvm_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
struct iwl_mvm_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES];
@ -1150,7 +1212,6 @@ enum iwl_mvm_init_status {
IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE = BIT(0),
IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE = BIT(1),
IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE = BIT(2),
IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE = BIT(3),
};
static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
@ -1207,6 +1268,19 @@ iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
return iwl_mvm_sta_from_mac80211(sta);
}
static inline struct ieee80211_vif *
iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu)
{
if (WARN_ON(vif_id >= ARRAY_SIZE(mvm->vif_id_to_mac)))
return NULL;
if (rcu)
return rcu_dereference(mvm->vif_id_to_mac[vif_id]);
return rcu_dereference_protected(mvm->vif_id_to_mac[vif_id],
lockdep_is_held(&mvm->mutex));
}
static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
{
return !iwlwifi_mod_params.d0i3_disable &&
@ -1470,6 +1544,11 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, __le16 fc);
void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
unsigned int tid);
#ifdef CONFIG_IWLWIFI_DEBUG
const char *iwl_mvm_get_tx_fail_reason(u32 status);
#else
@ -1599,7 +1678,6 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool force_assoc_off, const u8 *bssid_override);
int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
@ -1615,8 +1693,6 @@ void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *exclude_vif);
void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
@ -1870,17 +1946,43 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
* binding, so this has no real impact. For now, just return
* the current desired low-latency state.
*/
return mvmvif->low_latency;
return mvmvif->low_latency_actual;
}
static inline
void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set,
enum iwl_mvm_low_latency_cause cause)
{
u8 new_state;
if (set)
mvmvif->low_latency |= cause;
else
mvmvif->low_latency &= ~cause;
/*
* if LOW_LATENCY_DEBUGFS_FORCE_ENABLE is enabled no changes are
* allowed to actual mode.
*/
if (mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE_ENABLE &&
cause != LOW_LATENCY_DEBUGFS_FORCE_ENABLE)
return;
if (cause == LOW_LATENCY_DEBUGFS_FORCE_ENABLE && set)
/*
* We enter force state
*/
new_state = !!(mvmvif->low_latency &
LOW_LATENCY_DEBUGFS_FORCE);
else
/*
* Check if any other one set low latency
*/
new_state = !!(mvmvif->low_latency &
~(LOW_LATENCY_DEBUGFS_FORCE_ENABLE |
LOW_LATENCY_DEBUGFS_FORCE));
mvmvif->low_latency_actual = new_state;
}
/* Return a bitmask with all the hw supported queues, except for the
@ -1895,6 +1997,16 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
lockdep_assert_held(&mvm->mutex);
/* If IWL_MVM_STATUS_HW_RESTART_REQUESTED bit is set then we received
* an assert. Since we failed to bring the interface up, mac80211
* will not attempt to reconfig the device,
* which handles the dump collection in assert flow,
* so trigger dump collection here.
*/
if (test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
&mvm->status))
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
false, 0);
/* calling this function without using dump_start/end since at this
* point we already hold the op mode mutex
*/
@ -1906,10 +2018,6 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
iwl_trans_stop_device(mvm->trans);
}
/* Stop/start all mac queues in a given bitmap */
void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
/* Re-configure the SCD for a queue that has already been configured */
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
int tid, int frame_limit, u16 ssn);
@ -2015,4 +2123,59 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
struct dentry *dir);
#endif
/* Channel info utils */
static inline bool iwl_mvm_has_ultra_hb_channel(struct iwl_mvm *mvm)
{
return fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS);
}
static inline void *iwl_mvm_chan_info_cmd_tail(struct iwl_mvm *mvm,
struct iwl_fw_channel_info *ci)
{
return (u8 *)ci + (iwl_mvm_has_ultra_hb_channel(mvm) ?
sizeof(struct iwl_fw_channel_info) :
sizeof(struct iwl_fw_channel_info_v1));
}
static inline size_t iwl_mvm_chan_info_padding(struct iwl_mvm *mvm)
{
return iwl_mvm_has_ultra_hb_channel(mvm) ? 0 :
sizeof(struct iwl_fw_channel_info) -
sizeof(struct iwl_fw_channel_info_v1);
}
static inline void iwl_mvm_set_chan_info(struct iwl_mvm *mvm,
struct iwl_fw_channel_info *ci,
u32 chan, u8 band, u8 width,
u8 ctrl_pos)
{
if (iwl_mvm_has_ultra_hb_channel(mvm)) {
ci->channel = cpu_to_le32(chan);
ci->band = band;
ci->width = width;
ci->ctrl_pos = ctrl_pos;
} else {
struct iwl_fw_channel_info_v1 *ci_v1 =
(struct iwl_fw_channel_info_v1 *)ci;
ci_v1->channel = chan;
ci_v1->band = band;
ci_v1->width = width;
ci_v1->ctrl_pos = ctrl_pos;
}
}
static inline void
iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
struct iwl_fw_channel_info *ci,
struct cfg80211_chan_def *chandef)
{
iwl_mvm_set_chan_info(mvm, ci, chandef->chan->hw_value,
(chandef->chan->band == NL80211_BAND_2GHZ ?
PHY_BAND_24 : PHY_BAND_5),
iwl_mvm_get_channel_width(chandef),
iwl_mvm_get_ctrl_pos(chandef));
}
#endif /* __IWL_MVM_H__ */

View File

@ -179,7 +179,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
IWL_DEBUG_EEPROM(mvm->trans->dev,
"NVM access command failed with status %d (device: %s)\n",
ret, mvm->cfg->name);
ret = -EIO;
ret = -ENODATA;
}
goto exit;
}
@ -380,8 +380,12 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
/* we override the constness for initial read */
ret = iwl_nvm_read_section(mvm, section, nvm_buffer,
size_read);
if (ret < 0)
if (ret == -ENODATA) {
ret = 0;
continue;
}
if (ret < 0)
break;
size_read += ret;
temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
if (!temp) {
@ -412,6 +416,11 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
mvm->nvm_phy_sku_blob.data = temp;
mvm->nvm_phy_sku_blob.size = ret;
break;
case NVM_SECTION_TYPE_REGULATORY_SDP:
case NVM_SECTION_TYPE_REGULATORY:
mvm->nvm_reg_blob.data = temp;
mvm->nvm_reg_blob.size = ret;
break;
default:
if (section == mvm->cfg->nvm_hw_section_num) {
mvm->nvm_hw_blob.data = temp;
@ -454,7 +463,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n",
mvm->nvm_data->nvm_version);
return 0;
return ret < 0 ? ret : 0;
}
struct iwl_mcc_update_resp *

View File

@ -301,8 +301,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
RX_HANDLER_SYNC),
RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
@ -329,8 +327,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(SCAN_REQ_UMAC),
HCMD_NAME(SCAN_ABORT_UMAC),
HCMD_NAME(SCAN_COMPLETE_UMAC),
HCMD_NAME(TOF_CMD),
HCMD_NAME(TOF_NOTIFICATION),
HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
HCMD_NAME(ADD_STA_KEY),
HCMD_NAME(ADD_STA),
@ -449,6 +445,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
HCMD_NAME(STA_HE_CTXT_CMD),
HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
HCMD_NAME(STA_PM_NOTIF),
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
HCMD_NAME(RX_QUEUES_NOTIFICATION),
@ -461,6 +458,22 @@ static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
HCMD_NAME(MFU_ASSERT_DUMP_NTF),
};
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_location_names[] = {
HCMD_NAME(TOF_RANGE_REQ_CMD),
HCMD_NAME(TOF_CONFIG_CMD),
HCMD_NAME(TOF_RANGE_ABORT_CMD),
HCMD_NAME(TOF_RANGE_REQ_EXT_CMD),
HCMD_NAME(TOF_RESPONDER_CONFIG_CMD),
HCMD_NAME(TOF_RESPONDER_DYN_CONFIG_CMD),
HCMD_NAME(TOF_LC_NOTIF),
HCMD_NAME(TOF_RESPONDER_STATS),
HCMD_NAME(TOF_MCSI_DEBUG_NOTIF),
HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF),
};
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
@ -483,6 +496,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
[LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names),
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
[REGULATORY_AND_NVM_GROUP] =
HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
@ -685,6 +699,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
INIT_LIST_HEAD(&mvm->add_stream_txqs);
spin_lock_init(&mvm->d0i3_tx_lock);
spin_lock_init(&mvm->refs_lock);
@ -736,6 +751,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.rx_buf_size = rb_size_default;
}
BUILD_BUG_ON(sizeof(struct iwl_ldbg_config_cmd) !=
LDBG_CFG_COMMAND_SIZE);
trans->wide_cmd_header = true;
trans_cfg.bc_table_dword =
mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560;
@ -842,8 +860,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (iwl_mvm_is_d0i3_supported(mvm))
iwl_trans_unref(mvm->trans);
iwl_mvm_tof_init(mvm);
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
return op_mode;
@ -909,8 +925,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
cancel_delayed_work_sync(&mvm->tcm.work);
iwl_mvm_tof_clean(mvm);
iwl_fw_runtime_free(&mvm->fwrt);
mutex_destroy(&mvm->mutex);
mutex_destroy(&mvm->d0i3_suspend_mutex);
@ -1079,24 +1093,6 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
iwl_mvm_rx_common(mvm, rxb, pkt);
}
void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
{
int q;
if (WARN_ON_ONCE(!mq))
return;
for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
IWL_DEBUG_TX_QUEUES(mvm,
"mac80211 %d already stopped\n", q);
continue;
}
ieee80211_stop_queue(mvm->hw, q);
}
}
static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
const struct iwl_device_cmd *cmd)
{
@ -1109,38 +1105,66 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
iwl_trans_block_txq_ptrs(mvm->trans, false);
}
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
int hw_queue, bool start)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
struct ieee80211_sta *sta;
struct ieee80211_txq *txq;
struct iwl_mvm_txq *mvmtxq;
int i;
unsigned long tid_bitmap;
struct iwl_mvm_sta *mvmsta;
u8 sta_id;
iwl_mvm_stop_mac_queues(mvm, mq);
}
sta_id = iwl_mvm_has_new_tx_api(mvm) ?
mvm->tvqm_info[hw_queue].sta_id :
mvm->queue_info[hw_queue].ra_sta_id;
void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
{
int q;
if (WARN_ON_ONCE(!mq))
if (WARN_ON_ONCE(sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
return;
for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
IWL_DEBUG_TX_QUEUES(mvm,
"mac80211 %d still stopped\n", q);
continue;
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (IS_ERR_OR_NULL(sta))
goto out;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (iwl_mvm_has_new_tx_api(mvm)) {
int tid = mvm->tvqm_info[hw_queue].txq_tid;
tid_bitmap = BIT(tid);
} else {
tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap;
}
ieee80211_wake_queue(mvm->hw, q);
for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
int tid = i;
if (tid == IWL_MAX_TID_COUNT)
tid = IEEE80211_NUM_TIDS;
txq = sta->txq[tid];
mvmtxq = iwl_mvm_txq_from_mac80211(txq);
mvmtxq->stopped = !start;
if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
}
out:
rcu_read_unlock();
}
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{
iwl_mvm_queue_state_change(op_mode, hw_queue, false);
}
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
iwl_mvm_start_mac_queues(mvm, mq);
iwl_mvm_queue_state_change(op_mode, hw_queue, true);
}
static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)

View File

@ -143,14 +143,11 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
u8 chains_static, u8 chains_dynamic)
{
u8 active_cnt, idle_cnt;
struct iwl_phy_context_cmd_tail *tail =
iwl_mvm_chan_info_cmd_tail(mvm, &cmd->ci);
/* Set the channel info data */
cmd->ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
PHY_BAND_24 : PHY_BAND_5);
cmd->ci.channel = chandef->chan->hw_value;
cmd->ci.width = iwl_mvm_get_channel_width(chandef);
cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
/* Set rx the chains */
idle_cnt = chains_static;
@ -168,17 +165,17 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
active_cnt = 2;
}
cmd->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
tail->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
PHY_RX_CHAIN_VALID_POS);
cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
cmd->rxchain_info |= cpu_to_le32(active_cnt <<
tail->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
tail->rxchain_info |= cpu_to_le32(active_cnt <<
PHY_RX_CHAIN_MIMO_CNT_POS);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (unlikely(mvm->dbgfs_rx_phyinfo))
cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
tail->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
#endif
cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
tail->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
}
/*
@ -195,6 +192,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
{
struct iwl_phy_context_cmd cmd;
int ret;
u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm);
/* Set the command header fields */
iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time);
@ -203,9 +201,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
chains_static, chains_dynamic);
ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0,
sizeof(struct iwl_phy_context_cmd),
&cmd);
ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, len, &cmd);
if (ret)
IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret);
return ret;

View File

@ -149,14 +149,9 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
if (he_cap && he_cap->has_he &&
(he_cap->he_cap_elem.phy_cap_info[3] &
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK)) {
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK))
flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
if (he_cap->he_cap_elem.phy_cap_info[3] &
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2)
flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK;
}
return flags;
}
@ -320,12 +315,26 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
if (flags & IWL_TLC_NOTIF_FLAG_AMSDU) {
u16 size = le32_to_cpu(notif->amsdu_size);
int i;
if (WARN_ON(sta->max_amsdu_len < size))
goto out;
mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
mvmsta->max_amsdu_len = size;
sta->max_rc_amsdu_len = mvmsta->max_amsdu_len;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
if (mvmsta->amsdu_enabled & BIT(i))
sta->max_tid_amsdu_len[i] =
iwl_mvm_max_amsdu_size(mvm, sta, i);
else
/*
* Not so elegant, but this will effectively
* prevent AMSDU on this TID
*/
sta->max_tid_amsdu_len[i] = 1;
}
IWL_DEBUG_RATE(mvm,
"AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n",

View File

@ -1744,6 +1744,7 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum rs_action scale_action)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
int i;
/*
* In case TLC offload is not active amsdu_enabled is either 0xFFFF
@ -1757,6 +1758,19 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
mvmsta->amsdu_enabled = 0xFFFF;
mvmsta->max_amsdu_len = sta->max_amsdu_len;
sta->max_rc_amsdu_len = mvmsta->max_amsdu_len;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
if (mvmsta->amsdu_enabled)
sta->max_tid_amsdu_len[i] =
iwl_mvm_max_amsdu_size(mvm, sta, i);
else
/*
* Not so elegant, but this will effectively
* prevent AMSDU on this TID
*/
sta->max_tid_amsdu_len[i] = 1;
}
}
/*
@ -3332,12 +3346,12 @@ static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
/* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI
* column the rate table should look like this:
*
* rate[0] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
* rate[1] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
* rate[2] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
* rate[3] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
* rate[4] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
* rate[5] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
* rate[0] 0x400F019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
* rate[1] 0x400F019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
* rate[2] 0x400F018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
* rate[3] 0x400F018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
* rate[4] 0x400F017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
* rate[5] 0x400F017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
* rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI
* rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI
* rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI

View File

@ -599,8 +599,8 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
* data copied into the "data" struct, but rather the data from
* the notification directly.
*/
if (iwl_mvm_is_cdb_supported(mvm)) {
struct mvm_statistics_general_cdb *general =
if (iwl_mvm_has_new_rx_stats_api(mvm)) {
struct mvm_statistics_general *general =
data->general;
mvmvif->beacon_stats.num_beacons =
@ -723,7 +723,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
else
expected_size = sizeof(struct iwl_notif_statistics_v10);
} else {
expected_size = sizeof(struct iwl_notif_statistics_cdb);
expected_size = sizeof(struct iwl_notif_statistics);
}
if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) != expected_size,
@ -753,7 +753,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
flags = stats->flag;
} else {
struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data;
struct iwl_notif_statistics *stats = (void *)&pkt->data;
data.mac_id = stats->rx.general.mac_id;
data.beacon_filter_average_energy =
@ -792,7 +792,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
bytes = (void *)&v11->load_stats.byte_count;
air_time = (void *)&v11->load_stats.air_time;
} else {
struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data;
struct iwl_notif_statistics *stats = (void *)&pkt->data;
energy = (void *)&stats->load_stats.avg_energy;
bytes = (void *)&stats->load_stats.byte_count;

View File

@ -192,28 +192,49 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
}
}
static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
struct sk_buff *skb)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_vendor_radiotap *radiotap;
int size = sizeof(*radiotap) + sizeof(__le16);
if (!mvm->cur_aid)
return;
radiotap = skb_put(skb, size);
radiotap->align = 1;
/* Intel OUI */
radiotap->oui[0] = 0xf6;
radiotap->oui[1] = 0x54;
radiotap->oui[2] = 0x25;
/* radiotap sniffer config sub-namespace */
radiotap->subns = 1;
radiotap->present = 0x1;
radiotap->len = size - sizeof(*radiotap);
radiotap->pad = 0;
/* fill the data now */
memcpy(radiotap->data, &mvm->cur_aid, sizeof(mvm->cur_aid));
rx_status->flag |= RX_FLAG_RADIOTAP_VENDOR_DATA;
}
/* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
struct napi_struct *napi,
struct sk_buff *skb, int queue,
struct ieee80211_sta *sta)
struct ieee80211_sta *sta,
bool csi)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
if (!(rx_status->flag & RX_FLAG_NO_PSDU) &&
iwl_mvm_check_pn(mvm, skb, queue, sta)) {
iwl_mvm_check_pn(mvm, skb, queue, sta))
kfree_skb(skb);
} else {
unsigned int radiotap_len = 0;
if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
radiotap_len += sizeof(struct ieee80211_radiotap_he);
if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
radiotap_len += sizeof(struct ieee80211_radiotap_he_mu);
__skb_push(skb, radiotap_len);
else
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
}
}
static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
struct ieee80211_rx_status *rx_status,
@ -473,7 +494,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
while ((skb = __skb_dequeue(skb_list))) {
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
reorder_buf->queue,
sta);
sta, false);
reorder_buf->num_stored--;
}
}
@ -666,6 +687,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
* This also covers the case of receiving a Block Ack Request
* outside a BA session; we'll pass it to mac80211 and that
* then sends a delBA action frame.
* This also covers pure monitor mode, in which case we won't
* have any BA sessions.
*/
if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
return false;
@ -1158,7 +1181,6 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
/* temporarily hide the radiotap data */
__skb_pull(skb, radiotap_len);
if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_SU) {
/* report the AMPDU-EOF bit on single frames */
if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
@ -1166,7 +1188,6 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
}
}
if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
iwl_mvm_decode_he_phy_data(mvm, phy_data, he, he_mu, rx_status,
@ -1178,9 +1199,7 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
/* toggle is switched whenever new aggregation starts */
if (toggle_bit != mvm->ampdu_toggle &&
(he_type == RATE_MCS_HE_TYPE_MU ||
he_type == RATE_MCS_HE_TYPE_SU)) {
if (toggle_bit != mvm->ampdu_toggle) {
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
@ -1314,6 +1333,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
.d4 = desc->phy_data4,
.info_type = IWL_RX_PHY_INFO_TYPE_NONE,
};
bool csi = false;
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
@ -1412,7 +1432,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
}
/* set the preamble flag if appropriate */
if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
if (rate_n_flags & RATE_MCS_CCK_MSK &&
phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
@ -1441,14 +1462,23 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
rx_status->ampdu_reference = mvm->ampdu_ref;
/* toggle is switched whenever new aggregation starts */
/*
* Toggle is switched whenever new aggregation starts. Make
* sure ampdu_reference is never 0 so we can later use it to
* see if the frame was really part of an A-MPDU or not.
*/
if (toggle_bit != mvm->ampdu_toggle) {
mvm->ampdu_ref++;
if (mvm->ampdu_ref == 0)
mvm->ampdu_ref++;
mvm->ampdu_toggle = toggle_bit;
}
rx_status->ampdu_reference = mvm->ampdu_ref;
}
if (unlikely(mvm->monitor_on))
iwl_mvm_add_rtap_sniffer_config(mvm, skb);
rcu_read_lock();
if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
@ -1602,7 +1632,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue,
sta, csi);
out:
rcu_read_unlock();
}
@ -1705,15 +1736,24 @@ void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
rx_status->nss =
((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
rx_status->encoding = RX_ENC_VHT;
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_BF;
} else if (!(rate_n_flags & RATE_MCS_HE_MSK)) {
/*
* take the nss from the rx_vec since the rate_n_flags has
* only 2 bits for the nss which gives a max of 4 ss but
* there may be up to 8 spatial streams
*/
rx_status->nss =
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
} else if (rate_n_flags & RATE_MCS_HE_MSK) {
rx_status->nss =
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
} else {
int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
rx_status->band);
@ -1726,7 +1766,7 @@ void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->rate_idx = rate;
}
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta, false);
out:
rcu_read_unlock();
}

View File

@ -7,6 +7,7 @@
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,6 +29,7 @@
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -64,7 +66,7 @@ struct iwl_mvm_active_iface_iterator_data {
struct ieee80211_vif *ignore_vif;
u8 sta_vif_ap_sta_id;
enum iwl_sf_state sta_vif_state;
int num_active_macs;
u32 num_active_macs;
};
/*

View File

@ -356,24 +356,16 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
return ret;
}
static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
int mac80211_queue, u8 tid, u8 flags)
static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int queue, u8 tid, u8 flags)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.action = SCD_CFG_DISABLE_QUEUE,
};
bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
int ret;
if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
return -EINVAL;
if (iwl_mvm_has_new_tx_api(mvm)) {
if (remove_mac_queue)
mvm->hw_queue_to_mac80211[queue] &=
~BIT(mac80211_queue);
iwl_trans_txq_free(mvm->trans, queue);
return 0;
@ -384,36 +376,15 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
/*
* If there is another TID with the same AC - don't remove the MAC queue
* from the mapping
*/
if (tid < IWL_MAX_TID_COUNT) {
unsigned long tid_bitmap =
mvm->queue_info[queue].tid_bitmap;
int ac = tid_to_mac80211_ac[tid];
int i;
for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
if (tid_to_mac80211_ac[i] == ac)
remove_mac_queue = false;
}
}
if (remove_mac_queue)
mvm->hw_queue_to_mac80211[queue] &=
~BIT(mac80211_queue);
cmd.action = mvm->queue_info[queue].tid_bitmap ?
SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
if (cmd.action == SCD_CFG_DISABLE_QUEUE)
mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
IWL_DEBUG_TX_QUEUES(mvm,
"Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
"Disabling TXQ #%d tids=0x%x\n",
queue,
mvm->queue_info[queue].tid_bitmap,
mvm->hw_queue_to_mac80211[queue]);
mvm->queue_info[queue].tid_bitmap);
/* If the queue is still enabled - nothing left to do in this func */
if (cmd.action == SCD_CFG_ENABLE_QUEUE)
@ -423,15 +394,19 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
cmd.tid = mvm->queue_info[queue].txq_tid;
/* Make sure queue info is correct even though we overwrite it */
WARN(mvm->queue_info[queue].tid_bitmap ||
mvm->hw_queue_to_mac80211[queue],
"TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n",
queue, mvm->hw_queue_to_mac80211[queue],
mvm->queue_info[queue].tid_bitmap);
WARN(mvm->queue_info[queue].tid_bitmap,
"TXQ #%d info out-of-sync - tids=0x%x\n",
queue, mvm->queue_info[queue].tid_bitmap);
/* If we are here - the queue is freed and we can zero out these vals */
mvm->queue_info[queue].tid_bitmap = 0;
mvm->hw_queue_to_mac80211[queue] = 0;
if (sta) {
struct iwl_mvm_txq *mvmtxq =
iwl_mvm_txq_from_tid(sta, tid);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
}
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
mvm->queue_info[queue].reserved = false;
@ -517,9 +492,14 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
spin_lock_bh(&mvmsta->lock);
/* Unmap MAC queues and TIDs from this queue */
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
struct iwl_mvm_txq *mvmtxq =
iwl_mvm_txq_from_tid(sta, tid);
if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
disable_agg_tids |= BIT(tid);
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
}
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
@ -541,10 +521,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
}
static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
struct ieee80211_sta *old_sta,
u8 new_sta_id)
{
struct iwl_mvm_sta *mvmsta;
u8 txq_curr_ac, sta_id, tid;
u8 sta_id, tid;
unsigned long disable_agg_tids = 0;
bool same_sta;
int ret;
@ -554,7 +535,6 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
sta_id = mvm->queue_info[queue].ra_sta_id;
tid = mvm->queue_info[queue].txq_tid;
@ -570,9 +550,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
iwl_mvm_invalidate_sta_queue(mvm, queue,
disable_agg_tids, false);
ret = iwl_mvm_disable_txq(mvm, queue,
mvmsta->vif->hw_queue[txq_curr_ac],
tid, 0);
ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
if (ret) {
IWL_ERR(mvm,
"Failed to free inactive queue %d (ret=%d)\n",
@ -662,16 +640,15 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
* in such a case, otherwise - if no redirection required - it does nothing,
* unless the %force param is true.
*/
static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
int ac, int ssn, unsigned int wdg_timeout,
bool force)
bool force, struct iwl_mvm_txq *txq)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.action = SCD_CFG_DISABLE_QUEUE,
};
bool shared_queue;
unsigned long mq;
int ret;
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
@ -695,14 +672,14 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
cmd.tid = mvm->queue_info[queue].txq_tid;
mq = mvm->hw_queue_to_mac80211[queue];
shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
queue, iwl_mvm_ac_to_tx_fifo[ac]);
/* Stop MAC queues and wait for this queue to empty */
iwl_mvm_stop_mac_queues(mvm, mq);
/* Stop the queue and wait for it to empty */
txq->stopped = true;
ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
if (ret) {
IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
@ -743,8 +720,8 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
out:
/* Continue using the MAC queues */
iwl_mvm_start_mac_queues(mvm, mq);
/* Continue using the queue */
txq->stopped = false;
return ret;
}
@ -769,7 +746,7 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
return -ENOSPC;
}
static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
u8 sta_id, u8 tid, unsigned int timeout)
{
int queue, size = IWL_DEFAULT_QUEUE_SIZE;
@ -792,10 +769,7 @@ static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
queue, sta_id, tid);
mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
IWL_DEBUG_TX_QUEUES(mvm,
"Enabling TXQ #%d (mac80211 map:0x%x)\n",
queue, mvm->hw_queue_to_mac80211[queue]);
IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue);
return queue;
}
@ -805,9 +779,10 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
int tid)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_txq *mvmtxq =
iwl_mvm_txq_from_tid(sta, tid);
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
u8 mac_queue = mvmsta->vif->hw_queue[ac];
int queue = -1;
lockdep_assert_held(&mvm->mutex);
@ -815,11 +790,16 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
IWL_DEBUG_TX_QUEUES(mvm,
"Allocating queue for sta %d on tid %d\n",
mvmsta->sta_id, tid);
queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
wdg_timeout);
queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
if (queue < 0)
return queue;
if (sta) {
mvmtxq->txq_id = queue;
mvm->tvqm_info[queue].txq_tid = tid;
mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
}
IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
spin_lock_bh(&mvmsta->lock);
@ -829,8 +809,9 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
return 0;
}
static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
int mac80211_queue, u8 sta_id, u8 tid)
static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
int queue, u8 sta_id, u8 tid)
{
bool enable_queue = true;
@ -845,14 +826,6 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
if (mvm->queue_info[queue].tid_bitmap)
enable_queue = false;
if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
WARN(mac80211_queue >=
BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
"cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
mac80211_queue, queue, sta_id, tid);
mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
}
mvm->queue_info[queue].tid_bitmap |= BIT(tid);
mvm->queue_info[queue].ra_sta_id = sta_id;
@ -866,16 +839,22 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
mvm->queue_info[queue].txq_tid = tid;
}
if (sta) {
struct iwl_mvm_txq *mvmtxq =
iwl_mvm_txq_from_tid(sta, tid);
mvmtxq->txq_id = queue;
}
IWL_DEBUG_TX_QUEUES(mvm,
"Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
queue, mvm->queue_info[queue].tid_bitmap,
mvm->hw_queue_to_mac80211[queue]);
"Enabling TXQ #%d tids=0x%x\n",
queue, mvm->queue_info[queue].tid_bitmap);
return enable_queue;
}
static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
int mac80211_queue, u16 ssn,
static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout)
{
@ -895,8 +874,7 @@ static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
return false;
/* Send the enabling command if we need to */
if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
cfg->sta_id, cfg->tid))
if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
return false;
inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
@ -989,9 +967,10 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
ret = iwl_mvm_redirect_queue(mvm, queue, tid,
tid_to_mac80211_ac[tid], ssn,
wdg_timeout, true);
wdg_timeout, true,
iwl_mvm_txq_from_tid(sta, tid));
if (ret) {
IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
return;
@ -1068,11 +1047,9 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
* Remove the ones that did.
*/
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
u16 tid_bitmap;
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
@ -1105,10 +1082,6 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
* sure all TIDs have existing corresponding mac queues enabled
*/
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
mvm->hw_queue_to_mac80211[queue] |=
BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
}
/* If the queue is marked as shared - "unshare" it */
if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
@ -1136,6 +1109,7 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
unsigned long unshare_queues = 0;
unsigned long changetid_queues = 0;
int i, ret, free_queue = -ENOSPC;
struct ieee80211_sta *queue_owner = NULL;
lockdep_assert_held(&mvm->mutex);
@ -1201,13 +1175,14 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
inactive_tid_bitmap,
&unshare_queues,
&changetid_queues);
if (ret >= 0 && free_queue < 0)
if (ret >= 0 && free_queue < 0) {
queue_owner = sta;
free_queue = ret;
}
/* only unlock sta lock - we still need the queue info lock */
spin_unlock_bh(&mvmsta->lock);
}
rcu_read_unlock();
/* Reconfigure queues requiring reconfiguation */
for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
@ -1216,18 +1191,21 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
iwl_mvm_change_queue_tid(mvm, i);
if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
ret = iwl_mvm_free_inactive_queue(mvm, free_queue,
ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
alloc_for_sta);
if (ret)
if (ret) {
rcu_read_unlock();
return ret;
}
}
rcu_read_unlock();
return free_queue;
}
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u8 ac, int tid,
struct ieee80211_hdr *hdr)
struct ieee80211_sta *sta, u8 ac, int tid)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_trans_txq_scd_cfg cfg = {
@ -1238,7 +1216,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
};
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
u8 mac_queue = mvmsta->vif->hw_queue[ac];
int queue = -1;
unsigned long disable_agg_tids = 0;
enum iwl_mvm_agg_state queue_state;
@ -1257,12 +1234,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
spin_unlock_bh(&mvmsta->lock);
/*
* Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
* exists
*/
if (!ieee80211_is_data_qos(hdr->frame_control) ||
ieee80211_is_qos_nullfunc(hdr->frame_control)) {
if (tid == IWL_MAX_TID_COUNT) {
queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
IWL_MVM_DQA_MIN_MGMT_QUEUE,
IWL_MVM_DQA_MAX_MGMT_QUEUE);
@ -1341,8 +1313,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
}
}
inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
ssn, &cfg, wdg_timeout);
inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
/*
* Mark queue as shared in transport if shared
@ -1384,8 +1355,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
}
} else {
/* Redirect queue, if needed */
ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
wdg_timeout, false);
ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
wdg_timeout, false,
iwl_mvm_txq_from_tid(sta, tid));
if (ret)
goto out_err;
}
@ -1393,7 +1365,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
return 0;
out_err:
iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
return ret;
}
@ -1406,87 +1378,32 @@ static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
return tid_to_mac80211_ac[tid];
}
static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, int tid)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
struct sk_buff_head deferred_tx;
u8 mac_queue;
bool no_queue = false; /* Marks if there is a problem with the queue */
u8 ac;
lockdep_assert_held(&mvm->mutex);
skb = skb_peek(&tid_data->deferred_tx_frames);
if (!skb)
return;
hdr = (void *)skb->data;
ac = iwl_mvm_tid_to_ac_queue(tid);
mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
IWL_ERR(mvm,
"Can't alloc TXQ for sta %d tid %d - dropping frame\n",
mvmsta->sta_id, tid);
/*
* Mark queue as problematic so later the deferred traffic is
* freed, as we can do nothing with it
*/
no_queue = true;
}
__skb_queue_head_init(&deferred_tx);
/* Disable bottom-halves when entering TX path */
local_bh_disable();
spin_lock(&mvmsta->lock);
skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
spin_unlock(&mvmsta->lock);
while ((skb = __skb_dequeue(&deferred_tx)))
if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
ieee80211_free_txskb(mvm->hw, skb);
local_bh_enable();
/* Wake queue */
iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
}
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
add_stream_wk);
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
unsigned long deferred_tid_traffic;
int sta_id, tid;
mutex_lock(&mvm->mutex);
iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
/* Go over all stations with deferred traffic */
for_each_set_bit(sta_id, mvm->sta_deferred_frames,
IWL_MVM_STATION_COUNT) {
clear_bit(sta_id, mvm->sta_deferred_frames);
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))
continue;
while (!list_empty(&mvm->add_stream_txqs)) {
struct iwl_mvm_txq *mvmtxq;
struct ieee80211_txq *txq;
u8 tid;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
mvmtxq = list_first_entry(&mvm->add_stream_txqs,
struct iwl_mvm_txq, list);
for_each_set_bit(tid, &deferred_tid_traffic,
IWL_MAX_TID_COUNT + 1)
iwl_mvm_tx_deferred_stream(mvm, sta, tid);
txq = container_of((void *)mvmtxq, struct ieee80211_txq,
drv_priv);
tid = txq->tid;
if (tid == IEEE80211_NUM_TIDS)
tid = IWL_MAX_TID_COUNT;
iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
list_del_init(&mvmtxq->list);
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
}
mutex_unlock(&mvm->mutex);
@ -1542,9 +1459,10 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
* Note that re-enabling aggregations isn't done in this function.
*/
static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta)
struct ieee80211_sta *sta)
{
unsigned int wdg_timeout =
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
unsigned int wdg =
iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
int i;
struct iwl_trans_txq_scd_cfg cfg = {
@ -1561,23 +1479,18 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
int txq_id = tid_data->txq_id;
int ac;
u8 mac_queue;
if (txq_id == IWL_MVM_INVALID_QUEUE)
continue;
skb_queue_head_init(&tid_data->deferred_tx_frames);
ac = tid_to_mac80211_ac[i];
mac_queue = mvm_sta->vif->hw_queue[ac];
if (iwl_mvm_has_new_tx_api(mvm)) {
IWL_DEBUG_TX_QUEUES(mvm,
"Re-mapping sta %d tid %d\n",
mvm_sta->sta_id, i);
txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
mvm_sta->sta_id,
i, wdg_timeout);
txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
i, wdg);
tid_data->txq_id = txq_id;
/*
@ -1600,8 +1513,7 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
"Re-mapping sta %d tid %d to queue %d\n",
mvm_sta->sta_id, i, txq_id);
iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
wdg_timeout);
iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
}
}
@ -1691,7 +1603,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
if (ret)
goto err;
iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
iwl_mvm_realloc_queues_after_restart(mvm, sta);
sta_update = true;
sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
goto update_fw;
@ -1724,9 +1636,17 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
* frames until the queue is allocated
*/
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
}
mvm_sta->deferred_traffic_tid_map = 0;
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
struct iwl_mvm_txq *mvmtxq =
iwl_mvm_txq_from_mac80211(sta->txq[i]);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
INIT_LIST_HEAD(&mvmtxq->list);
atomic_set(&mvmtxq->tx_request, 0);
}
mvm_sta->agg_tids = 0;
if (iwl_mvm_has_new_rx_api(mvm) &&
@ -1861,9 +1781,9 @@ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_sta *mvm_sta)
struct ieee80211_sta *sta)
{
int ac;
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
int i;
lockdep_assert_held(&mvm->mutex);
@ -1872,11 +1792,17 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
continue;
ac = iwl_mvm_tid_to_ac_queue(i);
iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
vif->hw_queue[ac], i, 0);
iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
0);
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
}
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
struct iwl_mvm_txq *mvmtxq =
iwl_mvm_txq_from_mac80211(sta->txq[i]);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
}
}
int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
@ -1938,7 +1864,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
iwl_mvm_disable_sta_queues(mvm, vif, sta);
/* If there is a TXQ still marked as reserved - free it */
if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
@ -2044,7 +1970,7 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
if (iwl_mvm_has_new_tx_api(mvm)) {
int tvqm_queue =
iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
iwl_mvm_tvqm_enable_txq(mvm, sta_id,
IWL_MAX_TID_COUNT,
wdg_timeout);
*queue = tvqm_queue;
@ -2057,7 +1983,7 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
.frame_limit = IWL_FRAME_LIMIT,
};
iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout);
}
}
@ -2135,8 +2061,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
IWL_MAX_TID_COUNT, 0);
iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
@ -2195,8 +2120,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
bsta->tfd_queue_msk |= BIT(queue);
iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
&cfg, wdg_timeout);
iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
}
if (vif->type == NL80211_IFTYPE_ADHOC)
@ -2215,8 +2139,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* to firmware so enable queue here - after the station was added
*/
if (iwl_mvm_has_new_tx_api(mvm)) {
queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
bsta->sta_id,
queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
IWL_MAX_TID_COUNT,
wdg_timeout);
@ -2254,7 +2177,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
return;
}
iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
if (iwl_mvm_has_new_tx_api(mvm))
return;
@ -2377,10 +2300,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* Note that this is done here as we want to avoid making DQA
* changes in mac80211 layer.
*/
if (vif->type == NL80211_IFTYPE_ADHOC) {
vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
mvmvif->cab_queue = vif->cab_queue;
}
if (vif->type == NL80211_IFTYPE_ADHOC)
mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
/*
* While in previous FWs we had to exclude cab queue from TFD queue
@ -2388,9 +2309,9 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
*/
if (!iwl_mvm_has_new_tx_api(mvm) &&
fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
&cfg, timeout);
msta->tfd_queue_msk |= BIT(vif->cab_queue);
iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
timeout);
msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
}
ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
mvmvif->id, mvmvif->color);
@ -2407,15 +2328,14 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* tfd_queue_mask.
*/
if (iwl_mvm_has_new_tx_api(mvm)) {
int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
msta->sta_id,
int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
0,
timeout);
mvmvif->cab_queue = queue;
} else if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_STA_TYPE))
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
&cfg, timeout);
iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
timeout);
if (mvmvif->ap_wep_key) {
u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
@ -2446,8 +2366,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
0, 0);
iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
if (ret)
@ -2781,7 +2700,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data;
u16 normalized_ssn;
int txq_id;
u16 txq_id;
int ret;
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
@ -2823,17 +2742,24 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
*/
txq_id = mvmsta->tid_data[tid].txq_id;
if (txq_id == IWL_MVM_INVALID_QUEUE) {
txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE);
if (txq_id < 0) {
ret = txq_id;
if (ret < 0) {
IWL_ERR(mvm, "Failed to allocate agg queue\n");
goto out;
}
txq_id = ret;
/* TXQ hasn't yet been enabled, so mark it only as reserved */
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
} else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
ret = -ENXIO;
IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
tid, IWL_MAX_HW_QUEUES - 1);
goto out;
} else if (unlikely(mvm->queue_info[txq_id].status ==
IWL_MVM_QUEUE_SHARED)) {
ret = -ENXIO;
@ -2976,8 +2902,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
if (alloc_queue)
iwl_mvm_enable_txq(mvm, queue,
vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
iwl_mvm_enable_txq(mvm, sta, queue, ssn,
&cfg, wdg_timeout);
/* Send ADD_STA command to enable aggs only if the queue isn't shared */

View File

@ -297,7 +297,6 @@ enum iwl_mvm_agg_state {
/**
* struct iwl_mvm_tid_data - holds the states for each RA / TID
* @deferred_tx_frames: deferred TX frames for this RA/TID
* @seq_number: the next WiFi sequence number to use
* @next_reclaimed: the WiFi sequence number of the next packet to be acked.
* This is basically (last acked packet++).
@ -318,7 +317,6 @@ enum iwl_mvm_agg_state {
* tpt_meas_start
*/
struct iwl_mvm_tid_data {
struct sk_buff_head deferred_tx_frames;
u16 seq_number;
u16 next_reclaimed;
/* The rest is Tx AGG related */
@ -427,8 +425,6 @@ struct iwl_mvm_sta {
struct iwl_mvm_key_pn __rcu *ptk_pn[4];
struct iwl_mvm_rxq_dup_data *dup_data;
u16 deferred_traffic_tid_map;
u8 reserved_queue;
/* Temporary, until the new TLC will control the Tx protection */

View File

@ -399,6 +399,9 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info;
struct ieee80211_hdr *hdr;
struct iwl_tdls_channel_switch_cmd cmd = {0};
struct iwl_tdls_channel_switch_cmd_tail *tail =
iwl_mvm_chan_info_cmd_tail(mvm, &cmd.ci);
u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm);
int ret;
lockdep_assert_held(&mvm->mutex);
@ -414,9 +417,9 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
}
cmd.switch_type = type;
cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
cmd.timing.switch_time = cpu_to_le32(switch_time);
cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
tail->timing.frame_timestamp = cpu_to_le32(timestamp);
tail->timing.switch_time = cpu_to_le32(switch_time);
tail->timing.switch_timeout = cpu_to_le32(switch_timeout);
rcu_read_lock();
sta = ieee80211_find_sta(vif, peer);
@ -448,21 +451,16 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
}
}
if (chandef) {
cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
PHY_BAND_24 : PHY_BAND_5);
cmd.ci.channel = chandef->chan->hw_value;
cmd.ci.width = iwl_mvm_get_channel_width(chandef);
cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
}
if (chandef)
iwl_mvm_set_chan_info_chandef(mvm, &cmd.ci, chandef);
/* keep quota calculation simple for now - 50% of DTIM for TDLS */
cmd.timing.max_offchan_duration =
tail->timing.max_offchan_duration =
cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
vif->bss_conf.beacon_int) / 2);
/* Switch time is the first element in the switch-timing IE. */
cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
tail->frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
info = IEEE80211_SKB_CB(skb);
hdr = (void *)skb->data;
@ -472,20 +470,19 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
ret = -EINVAL;
goto out;
}
iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
iwl_mvm_set_tx_cmd_ccmp(info, &tail->frame.tx_cmd);
}
iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
iwl_mvm_set_tx_cmd(mvm, skb, &tail->frame.tx_cmd, info,
mvmsta->sta_id);
iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
iwl_mvm_set_tx_cmd_rate(mvm, &tail->frame.tx_cmd, info, sta,
hdr->frame_control);
rcu_read_unlock();
memcpy(cmd.frame.data, skb->data, skb->len);
memcpy(tail->frame.data, skb->data, skb->len);
ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0,
sizeof(cmd), &cmd);
ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, len, &cmd);
if (ret) {
IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
ret);

View File

@ -334,6 +334,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
switch (te_data->vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
ieee80211_remain_on_channel_expired(mvm->hw);
set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
iwl_mvm_roc_finished(mvm);
break;
case NL80211_IFTYPE_STATION:
@ -686,6 +687,8 @@ static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
struct iwl_mvm_time_event_data *te_data)
{
struct iwl_hs20_roc_req aux_cmd = {};
u16 len = sizeof(aux_cmd) - iwl_mvm_chan_info_padding(mvm);
u32 uid;
int ret;
@ -699,7 +702,7 @@ static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
le32_to_cpu(aux_cmd.event_unique_id));
ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
sizeof(aux_cmd), &aux_cmd);
len, &aux_cmd);
if (WARN_ON(ret))
return;

View File

@ -1,305 +0,0 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2015 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include "mvm.h"
#include "fw/api/tof.h"
#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256
void iwl_mvm_tof_init(struct iwl_mvm *mvm)
{
struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
return;
memset(tof_data, 0, sizeof(*tof_data));
tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (IWL_MVM_TOF_IS_RESPONDER) {
tof_data->responder_cfg.sub_grp_cmd_id =
cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
tof_data->responder_cfg.sta_id = IWL_MVM_INVALID_STA;
}
#endif
tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD);
tof_data->range_req.req_timeout = 1;
tof_data->range_req.initiator = 1;
tof_data->range_req.report_policy = 3;
tof_data->range_req_ext.sub_grp_cmd_id =
cpu_to_le32(TOF_RANGE_REQ_EXT_CMD);
mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
mvm->init_status |= IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE;
}
void iwl_mvm_tof_clean(struct iwl_mvm *mvm)
{
struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_TOF_SUPPORT) ||
!(mvm->init_status & IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE))
return;
memset(tof_data, 0, sizeof(*tof_data));
mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
mvm->init_status &= ~IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE;
}
static void iwl_tof_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
bool *enabled = _data;
/* non bss vif exists */
if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
*enabled = false;
}
int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm)
{
struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg;
bool enabled;
lockdep_assert_held(&mvm->mutex);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
return -EINVAL;
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_tof_iterator, &enabled);
if (!enabled) {
IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n");
return -EINVAL;
}
mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
IWL_ALWAYS_LONG_GROUP, 0),
0, sizeof(*cmd), cmd);
}
int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id)
{
struct iwl_tof_range_abort_cmd cmd = {
.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD),
.request_id = id,
};
lockdep_assert_held(&mvm->mutex);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
return -EINVAL;
if (id != mvm->tof_data.active_range_request) {
IWL_ERR(mvm, "Invalid range request id %d (active %d)\n",
id, mvm->tof_data.active_range_request);
return -EINVAL;
}
/* after abort is sent there's no active request anymore */
mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
IWL_ALWAYS_LONG_GROUP, 0),
0, sizeof(cmd), &cmd);
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
lockdep_assert_held(&mvm->mutex);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
return -EINVAL;
if (vif->p2p || vif->type != NL80211_IFTYPE_AP ||
!mvmvif->ap_ibss_active) {
IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
return -EIO;
}
cmd->sta_id = mvmvif->bcast_sta.sta_id;
memcpy(cmd->bssid, vif->addr, ETH_ALEN);
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
IWL_ALWAYS_LONG_GROUP, 0),
0, sizeof(*cmd), cmd);
}
#endif
int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_host_cmd cmd = {
.id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0),
.len = { sizeof(mvm->tof_data.range_req), },
/* no copy because of the command size */
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
lockdep_assert_held(&mvm->mutex);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
return -EINVAL;
if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
IWL_ERR(mvm, "Cannot send range request, not STA mode\n");
return -EIO;
}
/* nesting of range requests is not supported in FW */
if (mvm->tof_data.active_range_request !=
IWL_MVM_TOF_RANGE_REQ_MAX_ID) {
IWL_ERR(mvm, "Cannot send range req, already active req %d\n",
mvm->tof_data.active_range_request);
return -EIO;
}
mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id;
cmd.data[0] = &mvm->tof_data.range_req;
return iwl_mvm_send_cmd(mvm, &cmd);
}
int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
lockdep_assert_held(&mvm->mutex);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
return -EINVAL;
if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n");
return -EIO;
}
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
IWL_ALWAYS_LONG_GROUP, 0),
0, sizeof(mvm->tof_data.range_req_ext),
&mvm->tof_data.range_req_ext);
}
static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data)
{
struct iwl_tof_range_rsp_ntfy *resp = (void *)data;
if (resp->request_id != mvm->tof_data.active_range_request) {
IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n",
resp->request_id, mvm->tof_data.active_range_request);
return -EIO;
}
memcpy(&mvm->tof_data.range_resp, resp,
sizeof(struct iwl_tof_range_rsp_ntfy));
mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
return 0;
}
static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data)
{
struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data;
IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token);
return 0;
}
static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data)
{
struct iwl_tof_neighbor_report *report =
(struct iwl_tof_neighbor_report *)data;
IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n",
report->bssid, report->request_token, report->status);
return 0;
}
void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data;
lockdep_assert_held(&mvm->mutex);
switch (le32_to_cpu(resp->sub_grp_cmd_id)) {
case TOF_RANGE_RESPONSE_NOTIF:
iwl_mvm_tof_range_resp(mvm, resp->data);
break;
case TOF_MCSI_DEBUG_NOTIF:
iwl_mvm_tof_mcsi_notif(mvm, resp->data);
break;
case TOF_NEIGHBOR_REPORT_RSP_NOTIF:
iwl_mvm_tof_nb_report_notif(mvm, resp->data);
break;
default:
IWL_ERR(mvm, "Unknown sub-group command 0x%x\n",
resp->sub_grp_cmd_id);
break;
}
}

View File

@ -1,89 +0,0 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2015 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __tof_h__
#define __tof_h__
#include "fw/api/tof.h"
struct iwl_mvm_tof_data {
struct iwl_tof_config_cmd tof_cfg;
struct iwl_tof_range_req_cmd range_req;
struct iwl_tof_range_req_ext_cmd range_req_ext;
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct iwl_tof_responder_config_cmd responder_cfg;
#endif
struct iwl_tof_range_rsp_ntfy range_resp;
u8 last_abort_id;
u16 active_range_request;
};
void iwl_mvm_tof_init(struct iwl_mvm *mvm);
void iwl_mvm_tof_clean(struct iwl_mvm *mvm);
int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm);
int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id);
int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
#endif
#endif /* __tof_h__ */

View File

@ -533,10 +533,11 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
/*
* For data packets rate info comes from the fw. Only
* set rate/antenna during connection establishment.
* set rate/antenna during connection establishment or in case
* no station is given.
*/
if (sta && (!ieee80211_is_data(hdr->frame_control) ||
mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)) {
if (!sta || !ieee80211_is_data(hdr->frame_control) ||
mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
flags |= IWL_TX_FLAGS_CMD_RATE;
rate_n_flags =
iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
@ -602,11 +603,12 @@ static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
}
static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info, __le16 fc)
struct ieee80211_tx_info *info,
struct ieee80211_hdr *hdr)
{
struct iwl_mvm_vif *mvmvif;
mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif);
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info->control.vif);
__le16 fc = hdr->frame_control;
switch (info->control.vif->type) {
case NL80211_IFTYPE_AP:
@ -625,7 +627,9 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
(!ieee80211_is_bufferable_mmpdu(fc) ||
ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
return mvm->probe_queue;
if (info->hw_queue == info->control.vif->cab_queue)
if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) &&
is_multicast_ether_addr(hdr->addr1))
return mvmvif->cab_queue;
WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
@ -634,8 +638,6 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
case NL80211_IFTYPE_P2P_DEVICE:
if (ieee80211_is_mgmt(fc))
return mvm->p2p_dev_queue;
if (info->hw_queue == info->control.vif->cab_queue)
return mvmvif->cab_queue;
WARN_ON_ONCE(1);
return mvm->p2p_dev_queue;
@ -713,6 +715,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
__le16 fc = hdr->frame_control;
bool offchannel = IEEE80211_SKB_CB(skb)->flags &
IEEE80211_TX_CTL_TX_OFFCHAN;
int queue = -1;
memcpy(&info, skb->cb, sizeof(info));
@ -720,11 +724,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
return -1;
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
(!info.control.vif ||
info.hw_queue != info.control.vif->cab_queue)))
return -1;
if (info.control.vif) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info.control.vif);
@ -737,14 +736,12 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
else
sta_id = mvmvif->mcast_sta.sta_id;
queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
hdr->frame_control);
queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr);
} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
queue = mvm->snif_queue;
sta_id = mvm->snif_sta.sta_id;
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
info.hw_queue == IWL_MVM_OFFCHANNEL_QUEUE) {
offchannel) {
/*
* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
* that can be used in 2 different types of vifs, P2P &
@ -758,8 +755,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
}
}
if (queue < 0)
if (queue < 0) {
IWL_ERR(mvm, "No queue was found. Dropping TX\n");
return -1;
}
if (unlikely(ieee80211_is_probe_resp(fc)))
iwl_mvm_probe_resp_set_noa(mvm, skb);
@ -781,6 +780,35 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
return 0;
}
unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, unsigned int tid)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
u8 ac = tid_to_mac80211_ac[tid];
unsigned int txf;
int lmac = IWL_LMAC_24G_INDEX;
if (iwl_mvm_is_cdb_supported(mvm) &&
band == NL80211_BAND_5GHZ)
lmac = IWL_LMAC_5G_INDEX;
/* For HE redirect to trigger based fifos */
if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
ac += 4;
txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
/*
* Don't send an AMSDU that will be longer than the TXF.
* Add a security margin of 256 for the TX command + headers.
* We also want to have the start of the next packet inside the
* fifo to be able to send bursts.
*/
return min_t(unsigned int, mvmsta->max_amsdu_len,
mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
}
#ifdef CONFIG_INET
static int
@ -850,36 +878,6 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
return 0;
}
static unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
unsigned int tid)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
u8 ac = tid_to_mac80211_ac[tid];
unsigned int txf;
int lmac = IWL_LMAC_24G_INDEX;
if (iwl_mvm_is_cdb_supported(mvm) &&
band == NL80211_BAND_5GHZ)
lmac = IWL_LMAC_5G_INDEX;
/* For HE redirect to trigger based fifos */
if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
ac += 4;
txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
/*
* Don't send an AMSDU that will be longer than the TXF.
* Add a security margin of 256 for the TX command + headers.
* We also want to have the start of the next packet inside the
* fifo to be able to send bursts.
*/
return min_t(unsigned int, mvmsta->max_amsdu_len,
mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
}
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
@ -1002,34 +1000,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
}
#endif
static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta, u8 tid,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
u8 mac_queue = info->hw_queue;
struct sk_buff_head *deferred_tx_frames;
lockdep_assert_held(&mvm_sta->lock);
mvm_sta->deferred_traffic_tid_map |= BIT(tid);
set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
skb_queue_tail(deferred_tx_frames, skb);
/*
* The first deferred frame should've stopped the MAC queues, so we
* should never get a second deferred frame for the RA/TID.
* In case of GSO the first packet may have been split, so don't warn.
*/
if (skb_queue_len(deferred_tx_frames) == 1) {
iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
schedule_work(&mvm->add_stream_wk);
}
}
/* Check if there are any timed-out TIDs on a given shared TXQ */
static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
{
@ -1054,7 +1024,12 @@ static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
int airtime)
{
int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
struct iwl_mvm_tcm_mac *mdata;
if (mac >= NUM_MAC_INDEX_DRIVER)
return;
mdata = &mvm->tcm.data[mac];
if (mvm->tcm.paused)
return;
@ -1065,14 +1040,21 @@ static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
mdata->tx.airtime += airtime;
}
static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta, int tid)
{
u32 ac = tid_to_mac80211_ac[tid];
int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
struct iwl_mvm_tcm_mac *mdata;
if (mac >= NUM_MAC_INDEX_DRIVER)
return -EINVAL;
mdata = &mvm->tcm.data[mac];
mdata->tx.pkts[ac]++;
return 0;
}
/*
@ -1088,7 +1070,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
__le16 fc;
u16 seq_number = 0;
u8 tid = IWL_MAX_TID_COUNT;
u16 txq_id = info->hw_queue;
u16 txq_id;
bool is_ampdu = false;
int hdrlen;
@ -1152,14 +1134,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
/* Check if TXQ needs to be allocated or re-activated */
if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) {
iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
/*
* The frame is now deferred, and the worker scheduled
* will re-allocate it, so we can free it for now.
*/
if (WARN_ON_ONCE(txq_id == IWL_MVM_INVALID_QUEUE)) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock);
return 0;
@ -1199,7 +1174,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_unlock(&mvmsta->lock);
iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid);
if (iwl_mvm_tx_pkt_queued(mvm, mvmsta,
tid == IWL_MAX_TID_COUNT ? 0 : tid))
goto drop;
return 0;

View File

@ -248,7 +248,7 @@ void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
le16_to_cpu(err_resp->bad_cmd_seq_num),
le32_to_cpu(err_resp->error_service));
IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n",
le64_to_cpu(err_resp->timestamp));
}
@ -463,6 +463,9 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
iwl_trans_read_mem_bytes(trans, mvm->umac_error_event_table, &table,
sizeof(table));
if (table.valid)
mvm->fwrt.dump.umac_err_id = table.error_id;
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
@ -486,11 +489,11 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
}
static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
{
struct iwl_trans *trans = mvm->trans;
struct iwl_error_event_table table;
u32 val;
u32 val, base = mvm->error_event_table[lmac_num];
if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
if (!base)
@ -541,7 +544,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
if (table.valid)
mvm->fwrt.dump.rt_status = table.error_id;
mvm->fwrt.dump.lmac_err_id[lmac_num] = table.error_id;
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
@ -598,10 +601,10 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
return;
}
iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]);
iwl_mvm_dump_lmac_error_log(mvm, 0);
if (mvm->error_event_table[1])
iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]);
iwl_mvm_dump_lmac_error_log(mvm, 1);
iwl_mvm_dump_umac_error_log(mvm);
}
@ -1133,19 +1136,14 @@ static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
"AP isn't using AMPDU with uAPSD enabled");
}
static void iwl_mvm_uapsd_agg_disconnect_iter(void *data, u8 *mac,
static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
int *mac_id = data;
if (vif->type != NL80211_IFTYPE_STATION)
return;
if (mvmvif->id != *mac_id)
return;
if (!vif->bss_conf.assoc)
return;
@ -1155,10 +1153,10 @@ static void iwl_mvm_uapsd_agg_disconnect_iter(void *data, u8 *mac,
!mvmvif->queue_params[IEEE80211_AC_BK].uapsd)
return;
if (mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected)
if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected)
return;
mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected = true;
mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true;
IWL_INFO(mvm,
"detected AP should do aggregation but isn't, likely due to U-APSD\n");
schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ);
@ -1171,6 +1169,7 @@ static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
u64 tpt;
unsigned long rate;
struct ieee80211_vif *vif;
rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
@ -1199,9 +1198,11 @@ static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
return;
}
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_uapsd_agg_disconnect_iter, &mac);
rcu_read_lock();
vif = rcu_dereference(mvm->vif_id_to_mac[mac]);
if (vif)
iwl_mvm_uapsd_agg_disconnect(mvm, vif);
rcu_read_unlock();
}
static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,

View File

@ -513,10 +513,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
/* 9000 Series */
{IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
@ -531,17 +531,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
@ -556,20 +556,21 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x001C, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
@ -593,24 +594,26 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
@ -628,17 +631,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
@ -656,17 +659,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_shared_clk)},
@ -684,18 +687,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
@ -710,18 +713,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
@ -739,17 +742,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
@ -767,19 +770,19 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
@ -805,18 +808,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
@ -834,17 +837,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)},
@ -862,41 +865,88 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)},
/* 22000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0040, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x02F0, 0x0070, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x02F0, 0x0074, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x02F0, 0x0078, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x02F0, 0x007C, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x02F0, 0x0310, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x02F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x02F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x02F0, 0x4070, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x0070, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x0074, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x0078, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x007C, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x0310, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x06F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x06F0, 0x4070, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0000, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x0040, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0074, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0078, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x007C, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x1080, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x2720, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x2720, 0x4070, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0074, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x007C, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x4070, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0074, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x007C, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x43F0, 0x4070, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0074, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x007C, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)},
{IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)},
{IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)},
{IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)},
{IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)},
{IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)},
{IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)},
{IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)},
#endif /* CONFIG_IWLMVM */

View File

@ -1,13 +1,15 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@ -18,12 +20,46 @@
* more details.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
* file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_trans_int_pcie_h__
#define __iwl_trans_int_pcie_h__
@ -1029,8 +1065,6 @@ static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
void iwl_pcie_rx_allocator_work(struct work_struct *data);
/* common functions that are used by gen2 transport */

View File

@ -1,13 +1,15 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@ -18,12 +20,46 @@
* more details.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
* file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include <linux/sched.h>
#include <linux/wait.h>
@ -256,6 +292,9 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
}
IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
(u32)rxb->vid, rxq->id, rxq->write);
}
/*
@ -860,30 +899,6 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
}
void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable)
{
if (trans->cfg->device_family != IWL_DEVICE_FAMILY_9000)
return;
if (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP)
return;
if (!trans->cfg->integrated)
return;
/*
* Turn on the chicken-bits that cause MAC wakeup for RX-related
* values.
* This costs some power, but needed for W/A 9000 integrated A-step
* bug where shadow registers are not in the retention list and their
* value is lost when NIC powers down
*/
iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
CSR_MAC_SHADOW_REG_CTRL_RX_WAKE);
iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2,
CSR_MAC_SHADOW_REG_CTL2_RX_WAKE);
}
static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -971,8 +986,6 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
iwl_pcie_enable_rx_wake(trans, true);
}
void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
@ -1360,6 +1373,8 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
if (rxb->invalid)
goto out_err;
IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
@ -1411,11 +1426,12 @@ restart:
emergency = true;
}
IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
rxb = iwl_pcie_get_rxb(trans, rxq, i);
if (!rxb)
goto out;
IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
i = (i + 1) & (rxq->queue_size - 1);

View File

@ -1530,8 +1530,6 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
iwl_clear_bit(trans, CSR_GP_CNTRL,
BIT(trans->cfg->csr->flag_init_done));
iwl_pcie_enable_rx_wake(trans, false);
if (reset) {
/*
* reset TX queues -- some of their registers reset during S3
@ -1558,8 +1556,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return 0;
}
iwl_pcie_enable_rx_wake(trans, true);
iwl_set_bit(trans, CSR_GP_CNTRL,
BIT(trans->cfg->csr->flag_mac_access_req));
iwl_set_bit(trans, CSR_GP_CNTRL,
@ -1968,7 +1964,7 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
struct iwl_trans_pcie_removal *removal =
container_of(wk, struct iwl_trans_pcie_removal, work);
struct pci_dev *pdev = removal->pdev;
char *prop[] = {"EVENT=INACCESSIBLE", NULL};
static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
dev_err(&pdev->dev, "Device gone - attempting removal\n");
kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
@ -3118,7 +3114,7 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
return len;
}
static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, int *len)
static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
{
if (trans->num_blocks) {
*len += sizeof(struct iwl_fw_error_dump_data) +
@ -3173,8 +3169,7 @@ static struct iwl_trans_dump_data
struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs = 0;
u32 monitor_len;
u32 len, num_rbs = 0, monitor_len = 0;
int i, ptr;
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
!trans->cfg->mq_rx_supported &&
@ -3191,20 +3186,9 @@ static struct iwl_trans_dump_data
cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
/* FW monitor */
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
if (dump_mask == BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) {
dump_data = vzalloc(len);
if (!dump_data)
return NULL;
data = (void *)dump_data->data;
len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
dump_data->len = len;
return dump_data;
}
/* CSR registers */
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
len += sizeof(*data) + IWL_CSR_TO_DUMP;
@ -3569,24 +3553,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
}
/*
* 9000-series integrated A-step has a problem with suspend/resume
* and sometimes even causes the whole platform to get stuck. This
* workaround makes the hardware not go into the problematic state.
*/
if (trans->cfg->integrated &&
trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP)
iwl_set_bit(trans, CSR_HOST_CHICKEN,
CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME);
IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
#if IS_ENABLED(CONFIG_IWLMVM)
trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
if (cfg == &iwl22000_2ax_cfg_hr) {
if (cfg == &iwl22560_2ax_cfg_hr) {
if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
trans->cfg = &iwl22000_2ax_cfg_hr;
trans->cfg = &iwl22560_2ax_cfg_hr;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
trans->cfg = &iwl22000_2ax_cfg_jf;
@ -3602,7 +3577,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_no_pci;
}
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
(trans->cfg != &iwl22260_2ax_cfg ||
trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
u32 hw_status;
hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);

View File

@ -214,7 +214,11 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
struct iwl_tfh_tb *tb = &tfd->tbs[idx];
struct iwl_tfh_tb *tb;
if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
return -EINVAL;
tb = &tfd->tbs[idx];
/* Each TFD can point to a maximum max_tbs Tx buffers */
if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
@ -408,7 +412,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
goto out_err;
/* building the A-MSDU might have changed this data, memcpy it now */
memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
return tfd;
out_err:
@ -469,7 +473,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
/* The first TB points to bi-directional DMA data */
memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
@ -834,14 +838,14 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size);
iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx),
tb0_size);
/* map first command fragment, if any remains */
if (copy_size > tb0_size) {
phys_addr = dma_map_single(trans->dev,
((u8 *)&out_cmd->hdr) + tb0_size,
(u8 *)out_cmd + tb0_size,
copy_size - tb0_size,
DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {

View File

@ -1,13 +1,15 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@ -18,12 +20,46 @@
* more details.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
* file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
@ -2438,8 +2474,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* building the A-MSDU might have changed this data, so memcpy it now */
memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
IWL_FIRST_TB_SIZE);
memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */

View File

@ -708,8 +708,6 @@ void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
goto exit;
priv->debugfs_dir = debugfs_create_dir(dev->name, lbs_dir);
if (!priv->debugfs_dir)
goto exit;
for (i=0; i<ARRAY_SIZE(debugfs_files); i++) {
files = &debugfs_files[i];
@ -721,8 +719,6 @@ void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
}
priv->events_dir = debugfs_create_dir("subscribed_events", priv->debugfs_dir);
if (!priv->events_dir)
goto exit;
for (i=0; i<ARRAY_SIZE(debugfs_events_files); i++) {
files = &debugfs_events_files[i];
@ -734,8 +730,6 @@ void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
}
priv->regs_dir = debugfs_create_dir("registers", priv->debugfs_dir);
if (!priv->regs_dir)
goto exit;
for (i=0; i<ARRAY_SIZE(debugfs_regs_files); i++) {
files = &debugfs_regs_files[i];

View File

@ -797,7 +797,12 @@ static void lbs_persist_config_init(struct net_device *dev)
{
int ret;
ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group);
if (ret)
pr_err("failed to create boot_opts_group.\n");
ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group);
if (ret)
pr_err("failed to create mesh_ie_group.\n");
}
static void lbs_persist_config_remove(struct net_device *dev)

View File

@ -9,7 +9,7 @@ config MWIFIEX
mwifiex.
config MWIFIEX_SDIO
tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8997"
tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8997"
depends on MWIFIEX && MMC
select FW_LOADER
select WANT_DEV_COREDUMP

View File

@ -922,9 +922,8 @@ mwifiex_reset_write(struct file *file,
}
#define MWIFIEX_DFS_ADD_FILE(name) do { \
if (!debugfs_create_file(#name, 0644, priv->dfs_dev_dir, \
priv, &mwifiex_dfs_##name##_fops)) \
return; \
debugfs_create_file(#name, 0644, priv->dfs_dev_dir, priv, \
&mwifiex_dfs_##name##_fops); \
} while (0);
#define MWIFIEX_DFS_FILE_OPS(name) \

View File

@ -489,6 +489,8 @@ static void mwifiex_sdio_coredump(struct device *dev)
#define SDIO_DEVICE_ID_MARVELL_8887 (0x9135)
/* Device ID for SD8801 */
#define SDIO_DEVICE_ID_MARVELL_8801 (0x9139)
/* Device ID for SD8977 */
#define SDIO_DEVICE_ID_MARVELL_8977 (0x9145)
/* Device ID for SD8997 */
#define SDIO_DEVICE_ID_MARVELL_8997 (0x9141)
@ -507,6 +509,8 @@ static const struct sdio_device_id mwifiex_ids[] = {
.driver_data = (unsigned long)&mwifiex_sdio_sd8887},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8801),
.driver_data = (unsigned long)&mwifiex_sdio_sd8801},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8977),
.driver_data = (unsigned long)&mwifiex_sdio_sd8977},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997),
.driver_data = (unsigned long)&mwifiex_sdio_sd8997},
{},
@ -2726,4 +2730,5 @@ MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8977_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME);

View File

@ -36,6 +36,7 @@
#define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
#define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
#define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
#define SD8977_DEFAULT_FW_NAME "mrvl/sd8977_uapsta.bin"
#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin"
#define BLOCK_MODE 1
@ -371,6 +372,59 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
0x59, 0x5c, 0x5d},
};
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8977 = {
.start_rd_port = 0,
.start_wr_port = 0,
.base_0_reg = 0xF8,
.base_1_reg = 0xF9,
.poll_reg = 0x5C,
.host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
.host_int_rsr_reg = 0x4,
.host_int_status_reg = 0x0C,
.host_int_mask_reg = 0x08,
.status_reg_0 = 0xE8,
.status_reg_1 = 0xE9,
.sdio_int_mask = 0xff,
.data_port_mask = 0xffffffff,
.io_port_0_reg = 0xE4,
.io_port_1_reg = 0xE5,
.io_port_2_reg = 0xE6,
.max_mp_regs = 196,
.rd_bitmap_l = 0x10,
.rd_bitmap_u = 0x11,
.rd_bitmap_1l = 0x12,
.rd_bitmap_1u = 0x13,
.wr_bitmap_l = 0x14,
.wr_bitmap_u = 0x15,
.wr_bitmap_1l = 0x16,
.wr_bitmap_1u = 0x17,
.rd_len_p0_l = 0x18,
.rd_len_p0_u = 0x19,
.card_misc_cfg_reg = 0xd8,
.card_cfg_2_1_reg = 0xd9,
.cmd_rd_len_0 = 0xc0,
.cmd_rd_len_1 = 0xc1,
.cmd_rd_len_2 = 0xc2,
.cmd_rd_len_3 = 0xc3,
.cmd_cfg_0 = 0xc4,
.cmd_cfg_1 = 0xc5,
.cmd_cfg_2 = 0xc6,
.cmd_cfg_3 = 0xc7,
.fw_dump_host_ready = 0xcc,
.fw_dump_ctrl = 0xf0,
.fw_dump_start = 0xf1,
.fw_dump_end = 0xf8,
.func1_dump_reg_start = 0x10,
.func1_dump_reg_end = 0x17,
.func1_scratch_reg = 0xe8,
.func1_spec_reg_num = 13,
.func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D,
0x60, 0x61, 0x62, 0x64,
0x65, 0x66, 0x68, 0x69,
0x6a},
};
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = {
.start_rd_port = 0,
.start_wr_port = 0,
@ -532,6 +586,22 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
.can_ext_scan = true,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
.firmware = SD8977_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_sd8977,
.max_ports = 32,
.mp_agg_pkt_limit = 16,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
.supports_sdio_new_mode = true,
.has_control_mask = false,
.can_dump_fw = true,
.fw_dump_enh = true,
.can_auto_tdls = false,
.can_ext_scan = true,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
.firmware = SD8997_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_sd8997,

View File

@ -300,7 +300,7 @@ int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
goto unmap;
return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
return mt76_dma_add_buf(dev, q, buf, n, tx_info, skb, t);
unmap:
ret = -ENOMEM;
@ -318,7 +318,7 @@ free:
EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
static int
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
{
dma_addr_t addr;
void *buf;
@ -392,7 +392,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
mt76_dma_rx_cleanup(dev, q);
mt76_dma_sync_idx(dev, q);
mt76_dma_rx_fill(dev, q, false);
mt76_dma_rx_fill(dev, q);
}
static void
@ -417,10 +417,9 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
static int
mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
{
int len, data_len, done = 0;
struct sk_buff *skb;
unsigned char *data;
int len;
int done = 0;
bool more;
while (done < budget) {
@ -430,6 +429,19 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
if (!data)
break;
if (q->rx_head)
data_len = q->buf_size;
else
data_len = SKB_WITH_OVERHEAD(q->buf_size);
if (data_len < len + q->buf_offset) {
dev_kfree_skb(q->rx_head);
q->rx_head = NULL;
skb_free_frag(data);
continue;
}
if (q->rx_head) {
mt76_add_fragment(dev, q, data, len, more);
continue;
@ -440,12 +452,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
skb_free_frag(data);
continue;
}
skb_reserve(skb, q->buf_offset);
if (skb->tail + len > skb->end) {
dev_kfree_skb(skb);
continue;
}
if (q == &dev->q_rx[MT_RXQ_MCU]) {
u32 *rxfce = (u32 *) skb->cb;
@ -463,7 +470,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
dev->drv->rx_skb(dev, q - dev->q_rx, skb);
}
mt76_dma_rx_fill(dev, q, true);
mt76_dma_rx_fill(dev, q);
return done;
}
@ -504,7 +511,7 @@ mt76_dma_init(struct mt76_dev *dev)
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
64);
mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
mt76_dma_rx_fill(dev, &dev->q_rx[i]);
skb_queue_head_init(&dev->rx_skb[i]);
napi_enable(&dev->napi[i]);
}

View File

@ -328,6 +328,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, AP_LINK_PS);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@ -547,7 +548,7 @@ mt76_check_ccmp_pn(struct sk_buff *skb)
}
static void
mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@ -566,6 +567,11 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
sta = container_of((void *) wcid, struct ieee80211_sta, drv_priv);
if (status->signal <= 0)
ewma_signal_add(&wcid->rssi, -status->signal);
wcid->inactive_count = 0;
if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
return;
@ -625,7 +631,7 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
__skb_queue_head_init(&frames);
while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
mt76_check_ps(dev, skb);
mt76_check_sta(dev, skb);
mt76_rx_aggr_reorder(skb, &frames);
}
@ -659,6 +665,7 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
mt76_txq_init(dev, sta->txq[i]);
}
ewma_signal_init(&wcid->rssi);
rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
out:
@ -709,3 +716,60 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return 0;
}
EXPORT_SYMBOL_GPL(mt76_sta_state);
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int *dbm)
{
struct mt76_dev *dev = hw->priv;
int n_chains = __sw_hweight8(dev->antenna_mask);
*dbm = dev->txpower_cur / 2;
/* convert from per-chain power to combined
* output on 2x2 devices
*/
if (n_chains > 1)
*dbm += 3;
return 0;
}
EXPORT_SYMBOL_GPL(mt76_get_txpower);
static void
__mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
if (vif->csa_active && ieee80211_csa_is_complete(vif))
ieee80211_csa_finish(vif);
}
void mt76_csa_finish(struct mt76_dev *dev)
{
if (!dev->csa_complete)
return;
ieee80211_iterate_active_interfaces_atomic(dev->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
__mt76_csa_finish, dev);
dev->csa_complete = 0;
}
EXPORT_SYMBOL_GPL(mt76_csa_finish);
static void
__mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct mt76_dev *dev = priv;
if (!vif->csa_active)
return;
dev->csa_complete |= ieee80211_csa_is_complete(vif);
}
void mt76_csa_check(struct mt76_dev *dev)
{
ieee80211_iterate_active_interfaces_atomic(dev->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
__mt76_csa_check, dev);
}
EXPORT_SYMBOL_GPL(mt76_csa_check);

View File

@ -23,6 +23,7 @@
#include <linux/skbuff.h>
#include <linux/leds.h>
#include <linux/usb.h>
#include <linux/average.h>
#include <net/mac80211.h>
#include "util.h"
@ -174,6 +175,8 @@ enum mt76_wcid_flags {
#define MT76_N_WCIDS 128
DECLARE_EWMA(signal, 10, 8);
struct mt76_wcid {
struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
@ -181,6 +184,9 @@ struct mt76_wcid {
unsigned long flags;
struct ewma_signal rssi;
int inactive_count;
u8 idx;
u8 hw_key_idx;
@ -239,7 +245,9 @@ struct mt76_rx_tid {
#define MT_TX_CB_TXS_FAILED BIT(2)
#define MT_PACKET_ID_MASK GENMASK(7, 0)
#define MT_PACKET_ID_NO_ACK MT_PACKET_ID_MASK
#define MT_PACKET_ID_NO_ACK 0
#define MT_PACKET_ID_NO_SKB 1
#define MT_PACKET_ID_FIRST 2
#define MT_TX_STATUS_SKB_TIMEOUT HZ
@ -421,6 +429,7 @@ struct mt76_dev {
struct mt76_queue q_tx[__MT_TXQ_MAX];
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
int tx_dma_idx[4];
wait_queue_head_t tx_wait;
struct sk_buff_head status_list;
@ -454,6 +463,8 @@ struct mt76_dev {
bool led_al;
u8 led_pin;
u8 csa_complete;
u32 rxfilter;
union {
@ -488,7 +499,7 @@ struct mt76_rx_status {
u8 rate_idx;
u8 nss;
u8 band;
u8 signal;
s8 signal;
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
};
@ -677,6 +688,14 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);
int mt76_get_min_avg_rssi(struct mt76_dev *dev);
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int *dbm);
void mt76_csa_check(struct mt76_dev *dev);
void mt76_csa_finish(struct mt76_dev *dev);
/* internal */
void mt76_tx_free(struct mt76_dev *dev);
struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);

View File

@ -88,6 +88,7 @@ static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
{ MT_TX_PROT_CFG6, 0xe3f42004 },
{ MT_TX_PROT_CFG7, 0xe3f42084 },
{ MT_TX_PROT_CFG8, 0xe3f42104 },
{ MT_VHT_HT_FBK_CFG1, 0xedcba980 },
};
static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {

View File

@ -99,7 +99,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.wake_tx_queue = mt76_wake_tx_queue,
.get_survey = mt76_get_survey,
.get_txpower = mt76x02_get_txpower,
.get_txpower = mt76_get_txpower,
.flush = mt76x0e_flush,
.set_tim = mt76x0e_set_tim,
.release_buffered_frames = mt76_release_buffered_frames,
@ -141,6 +141,15 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
mt76_clear(dev, 0x110, BIT(9));
mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
mt76_wr(dev, MT_CH_TIME_CFG,
MT_CH_TIME_CFG_TIMER_EN |
MT_CH_TIME_CFG_TX_AS_BUSY |
MT_CH_TIME_CFG_RX_AS_BUSY |
MT_CH_TIME_CFG_NAV_AS_BUSY |
MT_CH_TIME_CFG_EIFS_AS_BUSY |
MT_CH_CCA_RC_EN |
FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
err = mt76x0_register_device(dev);
if (err < 0)
return err;

View File

@ -1013,6 +1013,8 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
mt76x0_phy_calibrate(dev, false);
mt76x0_phy_set_txpower(dev);
mt76x02_edcca_init(dev);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
@ -1075,7 +1077,9 @@ mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
u8 gain_delta;
int low_gain;
dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76);
if (!dev->cal.avg_rssi_all)
dev->cal.avg_rssi_all = -75;
low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
(dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));

View File

@ -155,7 +155,7 @@ static const struct ieee80211_ops mt76x0u_ops = {
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.set_rts_threshold = mt76x02_set_rts_threshold,
.wake_tx_queue = mt76_wake_tx_queue,
.get_txpower = mt76x02_get_txpower,
.get_txpower = mt76_get_txpower,
};
static int mt76x0u_register_device(struct mt76x02_dev *dev)

View File

@ -15,6 +15,7 @@
*/
#include <linux/kernel.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include "mt76x0.h"
#include "mcu.h"

View File

@ -15,8 +15,8 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __MT76X02_UTIL_H
#define __MT76X02_UTIL_H
#ifndef __MT76x02_H
#define __MT76x02_H
#include <linux/kfifo.h>
@ -28,6 +28,9 @@
#define MT_CALIBRATE_INTERVAL HZ
#define MT_WATCHDOG_TIME (HZ / 10)
#define MT_TX_HANG_TH 10
#define MT_MAX_CHAINS 2
struct mt76x02_rx_freq_cal {
s8 high_gain[MT_MAX_CHAINS];
@ -79,6 +82,7 @@ struct mt76x02_dev {
struct tasklet_struct pre_tbtt_tasklet;
struct delayed_work cal_work;
struct delayed_work mac_work;
struct delayed_work wdt_work;
u32 aggr_stats[32];
@ -89,6 +93,9 @@ struct mt76x02_dev {
u8 tbtt_count;
u16 beacon_int;
u32 tx_hang_reset;
u8 tx_hang_check;
struct mt76x02_calibration cal;
s8 target_power;
@ -101,6 +108,12 @@ struct mt76x02_dev {
u8 slottime;
struct mt76x02_dfs_pattern_detector dfs_pd;
/* edcca monitor */
bool ed_tx_blocked;
bool ed_monitor;
u8 ed_trigger;
u8 ed_silent;
};
extern struct ieee80211_rate mt76x02_rates[12];
@ -136,6 +149,7 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
const struct ieee80211_tx_rate *rate);
s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr,
s8 max_txpwr_adj);
void mt76x02_wdt_work(struct work_struct *work);
void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr);
void mt76x02_set_tx_ackto(struct mt76x02_dev *dev);
void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
@ -158,8 +172,6 @@ void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int mt76x02_get_txpower(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int *dbm);
void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@ -224,4 +236,4 @@ mt76x02_rx_get_sta_wcid(struct mt76x02_sta *sta, bool unicast)
return &sta->vif->group_wcid;
}
#endif
#endif /* __MT76x02_H */

Some files were not shown because too many files have changed in this diff Show More