* work for RX multiqueue continues (Sara);
* dynamic queue allocation work continues (Liad); * add Luca as maintainer; * a bunch of fixes and improvements all over; -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJXMjqKAAoJEKFHnKIaPMX6XBMQAKljH39uDlH6rSpXx/W0Ybqu +i3mfpKBA4Od9VnrfLk6IbK0g9LYwQmyaIN8Qkhjr1ecfF/+8xG6cD5YgH2VzU1X 0nFoI2K5vZCoyA4qydwPqHcRiWHfpNOYpnL6m0ttc/5JH3OOZw5b6hjkux8tZUFy PpA6/uQYnXRnextLbMr5pJw2H1oWpXZGuWa9M9xrknznmMlT7AcHZnY7am+UmR18 h4OJa9IGJzw6gruacw5kdIcYA+lNEOHKV8zjTFPuZIvQVOLrnKzHcCeXBMJcxkvF FqEJRYVzL9tlRO/rGsqZQnuBKA7ueVXAomicppy4gwkAVYdSTk08q04/eh6ghL7T 4DpqWYFqlyWNf2bF3hMQonUwayIUb4oWL4EDjjcyjGrHdqjLAdJL5KLV85FETi91 YnkGvHVp/EDQt33To1aHWBs/f0v+3UynmCzJqGkhiJmhhFY+4ZxDUY5+vQAqpz+5 Amlj5JeRFsxk+U6r7ho5vsUdhFYKRX66N8/1tB3KcSHqF6ijap/KPpF6BybH0uff sdacEcB0/WYqXkFaW+q5rnYA/9gWnMnldHrDZVpPK7ckTGzihomWe9aAOneEVNvs JGX5dPeQwoS1WDlMRPwtilVva9pnNcbszvam7pyTKEvcKNNCcMn8HfFro3sX93Ix 7JU9CecGoAiA87dQ90ZQ =m30M -----END PGP SIGNATURE----- Merge tag 'iwlwifi-next-for-kalle-2016-05-10' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next * work for RX multiqueue continues (Sara); * dynamic queue allocation work continues (Liad); * add Luca as maintainer; * a bunch of fixes and improvements all over;
This commit is contained in:
commit
2befc4e003
|
@ -5831,6 +5831,7 @@ F: drivers/net/wireless/intel/iwlegacy/
|
|||
INTEL WIRELESS WIFI LINK (iwlwifi)
|
||||
M: Johannes Berg <johannes.berg@intel.com>
|
||||
M: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
|
||||
M: Luca Coelho <luciano.coelho@intel.com>
|
||||
M: Intel Linux Wireless <linuxwifi@intel.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
W: http://intellinuxwireless.org
|
||||
|
|
|
@ -134,12 +134,6 @@ config IWLWIFI_DEBUGFS
|
|||
is a low-impact option that allows getting insight into the
|
||||
driver's state at runtime.
|
||||
|
||||
config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
|
||||
bool "Experimental uCode support"
|
||||
depends on IWLWIFI_DEBUG
|
||||
---help---
|
||||
Enable use of experimental ucode for testing and debugging.
|
||||
|
||||
config IWLWIFI_DEVICE_TRACING
|
||||
bool "iwlwifi device access tracing"
|
||||
depends on EVENT_TRACING
|
||||
|
|
|
@ -52,7 +52,7 @@
|
|||
static const struct iwl_base_params iwl1000_base_params = {
|
||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
||||
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
|
||||
.pll_cfg = true,
|
||||
.max_ll_items = OTP_MAX_LL_ITEMS_1000,
|
||||
.shadow_ram_support = false,
|
||||
.led_compensation = 51,
|
||||
|
|
|
@ -62,7 +62,6 @@
|
|||
static const struct iwl_base_params iwl2000_base_params = {
|
||||
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||
.pll_cfg_val = 0,
|
||||
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
|
||||
.shadow_ram_support = true,
|
||||
.led_compensation = 51,
|
||||
|
@ -76,7 +75,6 @@ static const struct iwl_base_params iwl2000_base_params = {
|
|||
static const struct iwl_base_params iwl2030_base_params = {
|
||||
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||
.pll_cfg_val = 0,
|
||||
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
|
||||
.shadow_ram_support = true,
|
||||
.led_compensation = 57,
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
static const struct iwl_base_params iwl5000_base_params = {
|
||||
.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
|
||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
|
||||
.pll_cfg = true,
|
||||
.led_compensation = 51,
|
||||
.wd_timeout = IWL_WATCHDOG_DISABLED,
|
||||
.max_event_log_size = 512,
|
||||
|
|
|
@ -71,7 +71,6 @@
|
|||
static const struct iwl_base_params iwl6000_base_params = {
|
||||
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||
.pll_cfg_val = 0,
|
||||
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
|
||||
.shadow_ram_support = true,
|
||||
.led_compensation = 51,
|
||||
|
@ -84,7 +83,6 @@ static const struct iwl_base_params iwl6000_base_params = {
|
|||
static const struct iwl_base_params iwl6050_base_params = {
|
||||
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||
.pll_cfg_val = 0,
|
||||
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
|
||||
.shadow_ram_support = true,
|
||||
.led_compensation = 51,
|
||||
|
@ -97,7 +95,6 @@ static const struct iwl_base_params iwl6050_base_params = {
|
|||
static const struct iwl_base_params iwl6000_g2_base_params = {
|
||||
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||
.pll_cfg_val = 0,
|
||||
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
|
||||
.shadow_ram_support = true,
|
||||
.led_compensation = 57,
|
||||
|
|
|
@ -122,7 +122,6 @@
|
|||
static const struct iwl_base_params iwl7000_base_params = {
|
||||
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
|
||||
.num_of_queues = 31,
|
||||
.pll_cfg_val = 0,
|
||||
.shadow_ram_support = true,
|
||||
.led_compensation = 57,
|
||||
.wd_timeout = IWL_LONG_WD_TIMEOUT,
|
||||
|
|
|
@ -89,7 +89,7 @@
|
|||
#define IWL8260_SMEM_OFFSET 0x400000
|
||||
#define IWL8260_SMEM_LEN 0x68000
|
||||
|
||||
#define IWL8000_FW_PRE "iwlwifi-8000"
|
||||
#define IWL8000_FW_PRE "iwlwifi-8000C-"
|
||||
#define IWL8000_MODULE_FIRMWARE(api) \
|
||||
IWL8000_FW_PRE "-" __stringify(api) ".ucode"
|
||||
|
||||
|
@ -112,7 +112,6 @@
|
|||
static const struct iwl_base_params iwl8000_base_params = {
|
||||
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
|
||||
.num_of_queues = 31,
|
||||
.pll_cfg_val = 0,
|
||||
.shadow_ram_support = true,
|
||||
.led_compensation = 57,
|
||||
.wd_timeout = IWL_LONG_WD_TIMEOUT,
|
||||
|
@ -237,6 +236,20 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
|
|||
.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl8265_2ac_sdio_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless-AC 8265",
|
||||
.fw_name_pre = IWL8265_FW_PRE,
|
||||
IWL_DEVICE_8265,
|
||||
.ht_params = &iwl8000_ht_params,
|
||||
.nvm_ver = IWL8000_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
|
||||
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
|
||||
.max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
|
||||
.disable_dummy_notification = true,
|
||||
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
|
||||
.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless-AC 4165",
|
||||
.fw_name_pre = IWL8000_FW_PRE,
|
||||
|
|
|
@ -72,16 +72,21 @@
|
|||
#define IWL9000_SMEM_OFFSET 0x400000
|
||||
#define IWL9000_SMEM_LEN 0x68000
|
||||
|
||||
#define IWL9000_FW_PRE "iwlwifi-9000-"
|
||||
#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-"
|
||||
#define IWL9260_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
|
||||
#define IWL9260LC_FW_PRE "iwlwifi-9260-th-a0-lc-a0-"
|
||||
#define IWL9000_MODULE_FIRMWARE(api) \
|
||||
IWL9000_FW_PRE "-" __stringify(api) ".ucode"
|
||||
#define IWL9260_MODULE_FIRMWARE(api) \
|
||||
IWL9260_FW_PRE "-" __stringify(api) ".ucode"
|
||||
#define IWL9260LC_MODULE_FIRMWARE(api) \
|
||||
IWL9260LC_FW_PRE "-" __stringify(api) ".ucode"
|
||||
|
||||
#define NVM_HW_SECTION_NUM_FAMILY_9000 10
|
||||
|
||||
static const struct iwl_base_params iwl9000_base_params = {
|
||||
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000,
|
||||
.num_of_queues = 31,
|
||||
.pll_cfg_val = 0,
|
||||
.shadow_ram_support = true,
|
||||
.led_compensation = 57,
|
||||
.wd_timeout = IWL_LONG_WD_TIMEOUT,
|
||||
|
@ -138,11 +143,26 @@ static const struct iwl_tt_params iwl9000_tt_params = {
|
|||
.apmg_not_supported = true, \
|
||||
.mq_rx_supported = true, \
|
||||
.vht_mu_mimo_supported = true, \
|
||||
.mac_addr_from_csr = true
|
||||
.mac_addr_from_csr = true, \
|
||||
.rf_id = true
|
||||
|
||||
const struct iwl_cfg iwl9560_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9560",
|
||||
.fw_name_pre = IWL9000_FW_PRE,
|
||||
const struct iwl_cfg iwl9260_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9260",
|
||||
.fw_name_pre = IWL9260_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
|
||||
};
|
||||
|
||||
/*
|
||||
* TODO the struct below is for internal testing only this should be
|
||||
* removed by EO 2016~
|
||||
*/
|
||||
const struct iwl_cfg iwl9260lc_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9260",
|
||||
.fw_name_pre = IWL9260LC_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
|
@ -161,3 +181,5 @@ const struct iwl_cfg iwl5165_2ac_cfg = {
|
|||
};
|
||||
|
||||
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9260LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright (C) 2016 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -31,6 +32,7 @@
|
|||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright (C) 2016 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -165,20 +167,22 @@ static inline u8 num_of_ant(u8 mask)
|
|||
* @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
|
||||
*/
|
||||
struct iwl_base_params {
|
||||
int eeprom_size;
|
||||
int num_of_queues; /* def: HW dependent */
|
||||
/* for iwl_pcie_apm_init() */
|
||||
u32 pll_cfg_val;
|
||||
|
||||
const u16 max_ll_items;
|
||||
const bool shadow_ram_support;
|
||||
u16 led_compensation;
|
||||
unsigned int wd_timeout;
|
||||
u32 max_event_log_size;
|
||||
const bool shadow_reg_enable;
|
||||
const bool pcie_l1_allowed;
|
||||
const bool apmg_wake_up_wa;
|
||||
const bool scd_chain_ext_wa;
|
||||
|
||||
u16 eeprom_size;
|
||||
u16 max_event_log_size;
|
||||
|
||||
u8 pll_cfg:1, /* for iwl_pcie_apm_init() */
|
||||
shadow_ram_support:1,
|
||||
shadow_reg_enable:1,
|
||||
pcie_l1_allowed:1,
|
||||
apmg_wake_up_wa:1,
|
||||
scd_chain_ext_wa:1;
|
||||
|
||||
u8 num_of_queues; /* def: HW dependent */
|
||||
|
||||
u8 max_ll_items;
|
||||
u8 led_compensation;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -189,10 +193,10 @@ struct iwl_base_params {
|
|||
*/
|
||||
struct iwl_ht_params {
|
||||
enum ieee80211_smps_mode smps_mode;
|
||||
const bool ht_greenfield_support; /* if used set to true */
|
||||
const bool stbc;
|
||||
const bool ldpc;
|
||||
bool use_rts_for_aggregation;
|
||||
u8 ht_greenfield_support:1,
|
||||
stbc:1,
|
||||
ldpc:1,
|
||||
use_rts_for_aggregation:1;
|
||||
u8 ht40_bands;
|
||||
};
|
||||
|
||||
|
@ -233,10 +237,10 @@ struct iwl_tt_params {
|
|||
u32 tx_protection_entry;
|
||||
u32 tx_protection_exit;
|
||||
struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
|
||||
bool support_ct_kill;
|
||||
bool support_dynamic_smps;
|
||||
bool support_tx_protection;
|
||||
bool support_tx_backoff;
|
||||
u8 support_ct_kill:1,
|
||||
support_dynamic_smps:1,
|
||||
support_tx_protection:1,
|
||||
support_tx_backoff:1;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -314,6 +318,7 @@ struct iwl_pwr_tx_backoff {
|
|||
* @smem_len: the length of SMEM
|
||||
* @mq_rx_supported: multi-queue rx support
|
||||
* @vht_mu_mimo_supported: VHT MU-MIMO support
|
||||
* @rf_id: need to read rf_id to determine the firmware image
|
||||
*
|
||||
* We enable the driver to be backward compatible wrt. hardware features.
|
||||
* API differences in uCode shouldn't be handled here but through TLVs
|
||||
|
@ -323,50 +328,51 @@ struct iwl_cfg {
|
|||
/* params specific to an individual device within a device family */
|
||||
const char *name;
|
||||
const char *fw_name_pre;
|
||||
const unsigned int ucode_api_max;
|
||||
const unsigned int ucode_api_min;
|
||||
const enum iwl_device_family device_family;
|
||||
const u32 max_data_size;
|
||||
const u32 max_inst_size;
|
||||
u8 valid_tx_ant;
|
||||
u8 valid_rx_ant;
|
||||
u8 non_shared_ant;
|
||||
bool bt_shared_single_ant;
|
||||
u16 nvm_ver;
|
||||
u16 nvm_calib_ver;
|
||||
/* params not likely to change within a device family */
|
||||
const struct iwl_base_params *base_params;
|
||||
/* params likely to change within a device family */
|
||||
const struct iwl_ht_params *ht_params;
|
||||
const struct iwl_eeprom_params *eeprom_params;
|
||||
enum iwl_led_mode led_mode;
|
||||
const bool rx_with_siso_diversity;
|
||||
const bool internal_wimax_coex;
|
||||
const bool host_interrupt_operation_mode;
|
||||
bool high_temp;
|
||||
u8 nvm_hw_section_num;
|
||||
bool mac_addr_from_csr;
|
||||
bool lp_xtal_workaround;
|
||||
const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
|
||||
bool no_power_up_nic_in_init;
|
||||
const char *default_nvm_file_B_step;
|
||||
const char *default_nvm_file_C_step;
|
||||
netdev_features_t features;
|
||||
unsigned int max_rx_agg_size;
|
||||
bool disable_dummy_notification;
|
||||
unsigned int max_tx_agg_size;
|
||||
unsigned int max_ht_ampdu_exponent;
|
||||
unsigned int max_vht_ampdu_exponent;
|
||||
const u32 dccm_offset;
|
||||
const u32 dccm_len;
|
||||
const u32 dccm2_offset;
|
||||
const u32 dccm2_len;
|
||||
const u32 smem_offset;
|
||||
const u32 smem_len;
|
||||
const struct iwl_tt_params *thermal_params;
|
||||
bool apmg_not_supported;
|
||||
bool mq_rx_supported;
|
||||
bool vht_mu_mimo_supported;
|
||||
enum iwl_device_family device_family;
|
||||
enum iwl_led_mode led_mode;
|
||||
u32 max_data_size;
|
||||
u32 max_inst_size;
|
||||
netdev_features_t features;
|
||||
u32 dccm_offset;
|
||||
u32 dccm_len;
|
||||
u32 dccm2_offset;
|
||||
u32 dccm2_len;
|
||||
u32 smem_offset;
|
||||
u32 smem_len;
|
||||
u16 nvm_ver;
|
||||
u16 nvm_calib_ver;
|
||||
u16 rx_with_siso_diversity:1,
|
||||
bt_shared_single_ant:1,
|
||||
internal_wimax_coex:1,
|
||||
host_interrupt_operation_mode:1,
|
||||
high_temp:1,
|
||||
mac_addr_from_csr:1,
|
||||
lp_xtal_workaround:1,
|
||||
no_power_up_nic_in_init:1,
|
||||
disable_dummy_notification:1,
|
||||
apmg_not_supported:1,
|
||||
mq_rx_supported:1,
|
||||
vht_mu_mimo_supported:1,
|
||||
rf_id:1;
|
||||
u8 valid_tx_ant;
|
||||
u8 valid_rx_ant;
|
||||
u8 non_shared_ant;
|
||||
u8 nvm_hw_section_num;
|
||||
u8 max_rx_agg_size;
|
||||
u8 max_tx_agg_size;
|
||||
u8 max_ht_ampdu_exponent;
|
||||
u8 max_vht_ampdu_exponent;
|
||||
u8 ucode_api_max;
|
||||
u8 ucode_api_min;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -437,8 +443,10 @@ extern const struct iwl_cfg iwl8260_2ac_cfg;
|
|||
extern const struct iwl_cfg iwl8265_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl4165_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
|
||||
extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
|
||||
extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
|
||||
extern const struct iwl_cfg iwl9560_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9260_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9260lc_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl5165_2ac_cfg;
|
||||
#endif /* CONFIG_IWLMVM */
|
||||
|
||||
|
|
|
@ -107,6 +107,17 @@
|
|||
*/
|
||||
#define CSR_HW_REV (CSR_BASE+0x028)
|
||||
|
||||
/*
|
||||
* RF ID revision info
|
||||
* Bit fields:
|
||||
* 31:24: Reserved (set to 0x0)
|
||||
* 23:12: Type
|
||||
* 11:8: Step (A - 0x0, B - 0x1, etc)
|
||||
* 7:4: Dash
|
||||
* 3:0: Flavor
|
||||
*/
|
||||
#define CSR_HW_RF_ID (CSR_BASE+0x09c)
|
||||
|
||||
/*
|
||||
* EEPROM and OTP (one-time-programmable) memory reads
|
||||
*
|
||||
|
@ -333,6 +344,10 @@ enum {
|
|||
#define CSR_HW_REV_TYPE_7265D (0x0000210)
|
||||
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
|
||||
|
||||
/* RF_ID value */
|
||||
#define CSR_HW_RF_ID_TYPE_JF (0x00105000)
|
||||
#define CSR_HW_RF_ID_TYPE_LC (0x00101000)
|
||||
|
||||
/* EEPROM REG */
|
||||
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
|
||||
#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
|
||||
|
|
|
@ -117,7 +117,7 @@ struct iwl_drv {
|
|||
const struct iwl_cfg *cfg;
|
||||
|
||||
int fw_index; /* firmware we're trying to load */
|
||||
char firmware_name[32]; /* name of firmware file to load */
|
||||
char firmware_name[64]; /* name of firmware file to load */
|
||||
|
||||
struct completion request_firmware_complete;
|
||||
|
||||
|
@ -211,20 +211,12 @@ static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
|
|||
static void iwl_req_fw_callback(const struct firmware *ucode_raw,
|
||||
void *context);
|
||||
|
||||
#define UCODE_EXPERIMENTAL_INDEX 100
|
||||
#define UCODE_EXPERIMENTAL_TAG "exp"
|
||||
|
||||
static int iwl_request_firmware(struct iwl_drv *drv, bool first)
|
||||
{
|
||||
const char *name_pre = drv->cfg->fw_name_pre;
|
||||
char tag[8];
|
||||
|
||||
if (first) {
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
|
||||
drv->fw_index = UCODE_EXPERIMENTAL_INDEX;
|
||||
strcpy(tag, UCODE_EXPERIMENTAL_TAG);
|
||||
} else if (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
|
||||
#endif
|
||||
drv->fw_index = drv->cfg->ucode_api_max;
|
||||
sprintf(tag, "%d", drv->fw_index);
|
||||
} else {
|
||||
|
@ -240,22 +232,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
|
|||
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
|
||||
name_pre, tag);
|
||||
|
||||
/*
|
||||
* Starting 8000B - FW name format has changed. This overwrites the
|
||||
* previous name and uses the new format.
|
||||
*/
|
||||
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
|
||||
char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
|
||||
|
||||
if (rev_step != 'A')
|
||||
snprintf(drv->firmware_name,
|
||||
sizeof(drv->firmware_name), "%s%c-%s.ucode",
|
||||
name_pre, rev_step, tag);
|
||||
}
|
||||
|
||||
IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
|
||||
(drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
|
||||
? "EXPERIMENTAL " : "",
|
||||
IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n",
|
||||
drv->firmware_name);
|
||||
|
||||
return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
|
||||
|
@ -554,9 +531,7 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
|
|||
}
|
||||
|
||||
if (build)
|
||||
sprintf(buildstr, " build %u%s", build,
|
||||
(drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
|
||||
? " (EXP)" : "");
|
||||
sprintf(buildstr, " build %u", build);
|
||||
else
|
||||
buildstr[0] = '\0';
|
||||
|
||||
|
@ -640,9 +615,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
|||
build = le32_to_cpu(ucode->build);
|
||||
|
||||
if (build)
|
||||
sprintf(buildstr, " build %u%s", build,
|
||||
(drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
|
||||
? " (EXP)" : "");
|
||||
sprintf(buildstr, " build %u", build);
|
||||
else
|
||||
buildstr[0] = '\0';
|
||||
|
||||
|
@ -1290,15 +1263,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
|||
* firmware filename ... but we don't check for that and only rely
|
||||
* on the API version read from firmware header from here on forward
|
||||
*/
|
||||
/* no api version check required for experimental uCode */
|
||||
if (drv->fw_index != UCODE_EXPERIMENTAL_INDEX) {
|
||||
if (api_ver < api_min || api_ver > api_max) {
|
||||
IWL_ERR(drv,
|
||||
"Driver unable to support your firmware API. "
|
||||
"Driver supports v%u, firmware is v%u.\n",
|
||||
api_max, api_ver);
|
||||
goto try_again;
|
||||
}
|
||||
if (api_ver < api_min || api_ver > api_max) {
|
||||
IWL_ERR(drv,
|
||||
"Driver unable to support your firmware API. "
|
||||
"Driver supports v%u, firmware is v%u.\n",
|
||||
api_max, api_ver);
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1757,4 +1727,4 @@ MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)");
|
|||
|
||||
module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool,
|
||||
S_IRUGO);
|
||||
MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities");
|
||||
MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)");
|
||||
|
|
|
@ -98,6 +98,7 @@ struct iwl_nvm_data {
|
|||
s8 max_tx_pwr_half_dbm;
|
||||
|
||||
bool lar_enabled;
|
||||
bool vht160_supported;
|
||||
struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
|
||||
struct ieee80211_channel channels[];
|
||||
};
|
||||
|
|
|
@ -321,6 +321,9 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
|
|||
/* Write index table */
|
||||
#define RFH_Q0_FRBDCB_WIDX 0xA08080
|
||||
#define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4)
|
||||
/* Write index table - shadow registers */
|
||||
#define RFH_Q0_FRBDCB_WIDX_TRG 0x1C80
|
||||
#define RFH_Q_FRBDCB_WIDX_TRG(q) (RFH_Q0_FRBDCB_WIDX_TRG + (q) * 4)
|
||||
/* Read index table */
|
||||
#define RFH_Q0_FRBDCB_RIDX 0xA080C0
|
||||
#define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4)
|
||||
|
|
|
@ -288,6 +288,9 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
|||
!data->sku_cap_band_52GHz_enable)
|
||||
continue;
|
||||
|
||||
if (ch_flags & NVM_CHANNEL_160MHZ)
|
||||
data->vht160_supported = true;
|
||||
|
||||
if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) {
|
||||
/*
|
||||
* Channels might become valid later if lar is
|
||||
|
@ -331,17 +334,20 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
|||
channel->flags = 0;
|
||||
|
||||
IWL_DEBUG_EEPROM(dev,
|
||||
"Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
|
||||
"Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n",
|
||||
channel->hw_value,
|
||||
is_5ghz ? "5.2" : "2.4",
|
||||
ch_flags,
|
||||
CHECK_AND_PRINT_I(VALID),
|
||||
CHECK_AND_PRINT_I(IBSS),
|
||||
CHECK_AND_PRINT_I(ACTIVE),
|
||||
CHECK_AND_PRINT_I(RADAR),
|
||||
CHECK_AND_PRINT_I(WIDE),
|
||||
CHECK_AND_PRINT_I(INDOOR_ONLY),
|
||||
CHECK_AND_PRINT_I(GO_CONCURRENT),
|
||||
ch_flags,
|
||||
CHECK_AND_PRINT_I(WIDE),
|
||||
CHECK_AND_PRINT_I(40MHZ),
|
||||
CHECK_AND_PRINT_I(80MHZ),
|
||||
CHECK_AND_PRINT_I(160MHZ),
|
||||
channel->max_power,
|
||||
((ch_flags & NVM_CHANNEL_IBSS) &&
|
||||
!(ch_flags & NVM_CHANNEL_RADAR))
|
||||
|
@ -370,6 +376,10 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
|
|||
max_ampdu_exponent <<
|
||||
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
|
||||
|
||||
if (data->vht160_supported)
|
||||
vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
|
||||
IEEE80211_VHT_CAP_SHORT_GI_160;
|
||||
|
||||
if (cfg->vht_mu_mimo_supported)
|
||||
vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -72,8 +73,6 @@
|
|||
#include "iwl-trans.h"
|
||||
|
||||
#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
|
||||
#define IWL_NUM_PAPD_CH_GROUPS 9
|
||||
#define IWL_NUM_TXP_CH_GROUPS 9
|
||||
|
||||
struct iwl_phy_db_entry {
|
||||
u16 size;
|
||||
|
@ -86,14 +85,18 @@ struct iwl_phy_db_entry {
|
|||
* @cfg: phy configuration.
|
||||
* @calib_nch: non channel specific calibration data.
|
||||
* @calib_ch: channel specific calibration data.
|
||||
* @n_group_papd: number of entries in papd channel group.
|
||||
* @calib_ch_group_papd: calibration data related to papd channel group.
|
||||
* @n_group_txp: number of entries in tx power channel group.
|
||||
* @calib_ch_group_txp: calibration data related to tx power chanel group.
|
||||
*/
|
||||
struct iwl_phy_db {
|
||||
struct iwl_phy_db_entry cfg;
|
||||
struct iwl_phy_db_entry calib_nch;
|
||||
struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
|
||||
struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
|
||||
int n_group_papd;
|
||||
struct iwl_phy_db_entry *calib_ch_group_papd;
|
||||
int n_group_txp;
|
||||
struct iwl_phy_db_entry *calib_ch_group_txp;
|
||||
|
||||
struct iwl_trans *trans;
|
||||
};
|
||||
|
@ -143,6 +146,9 @@ struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
|
|||
|
||||
phy_db->trans = trans;
|
||||
|
||||
phy_db->n_group_txp = -1;
|
||||
phy_db->n_group_papd = -1;
|
||||
|
||||
/* TODO: add default values of the phy db. */
|
||||
return phy_db;
|
||||
}
|
||||
|
@ -166,11 +172,11 @@ iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
|
|||
case IWL_PHY_DB_CALIB_NCH:
|
||||
return &phy_db->calib_nch;
|
||||
case IWL_PHY_DB_CALIB_CHG_PAPD:
|
||||
if (chg_id >= IWL_NUM_PAPD_CH_GROUPS)
|
||||
if (chg_id >= phy_db->n_group_papd)
|
||||
return NULL;
|
||||
return &phy_db->calib_ch_group_papd[chg_id];
|
||||
case IWL_PHY_DB_CALIB_CHG_TXP:
|
||||
if (chg_id >= IWL_NUM_TXP_CH_GROUPS)
|
||||
if (chg_id >= phy_db->n_group_txp)
|
||||
return NULL;
|
||||
return &phy_db->calib_ch_group_txp[chg_id];
|
||||
default:
|
||||
|
@ -202,17 +208,21 @@ void iwl_phy_db_free(struct iwl_phy_db *phy_db)
|
|||
|
||||
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
|
||||
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
|
||||
for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
|
||||
|
||||
for (i = 0; i < phy_db->n_group_papd; i++)
|
||||
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
|
||||
for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
|
||||
kfree(phy_db->calib_ch_group_papd);
|
||||
|
||||
for (i = 0; i < phy_db->n_group_txp; i++)
|
||||
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
|
||||
kfree(phy_db->calib_ch_group_txp);
|
||||
|
||||
kfree(phy_db);
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_phy_db_free);
|
||||
|
||||
int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
|
||||
gfp_t alloc_ctx)
|
||||
int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
|
||||
struct iwl_rx_packet *pkt)
|
||||
{
|
||||
struct iwl_calib_res_notif_phy_db *phy_db_notif =
|
||||
(struct iwl_calib_res_notif_phy_db *)pkt->data;
|
||||
|
@ -224,16 +234,42 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
|
|||
if (!phy_db)
|
||||
return -EINVAL;
|
||||
|
||||
if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
|
||||
type == IWL_PHY_DB_CALIB_CHG_TXP)
|
||||
if (type == IWL_PHY_DB_CALIB_CHG_PAPD) {
|
||||
chg_id = le16_to_cpup((__le16 *)phy_db_notif->data);
|
||||
if (phy_db && !phy_db->calib_ch_group_papd) {
|
||||
/*
|
||||
* Firmware sends the largest index first, so we can use
|
||||
* it to know how much we should allocate.
|
||||
*/
|
||||
phy_db->calib_ch_group_papd = kcalloc(chg_id + 1,
|
||||
sizeof(struct iwl_phy_db_entry),
|
||||
GFP_ATOMIC);
|
||||
if (!phy_db->calib_ch_group_papd)
|
||||
return -ENOMEM;
|
||||
phy_db->n_group_papd = chg_id + 1;
|
||||
}
|
||||
} else if (type == IWL_PHY_DB_CALIB_CHG_TXP) {
|
||||
chg_id = le16_to_cpup((__le16 *)phy_db_notif->data);
|
||||
if (phy_db && !phy_db->calib_ch_group_txp) {
|
||||
/*
|
||||
* Firmware sends the largest index first, so we can use
|
||||
* it to know how much we should allocate.
|
||||
*/
|
||||
phy_db->calib_ch_group_txp = kcalloc(chg_id + 1,
|
||||
sizeof(struct iwl_phy_db_entry),
|
||||
GFP_ATOMIC);
|
||||
if (!phy_db->calib_ch_group_txp)
|
||||
return -ENOMEM;
|
||||
phy_db->n_group_txp = chg_id + 1;
|
||||
}
|
||||
}
|
||||
|
||||
entry = iwl_phy_db_get_section(phy_db, type, chg_id);
|
||||
if (!entry)
|
||||
return -EINVAL;
|
||||
|
||||
kfree(entry->data);
|
||||
entry->data = kmemdup(phy_db_notif->data, size, alloc_ctx);
|
||||
entry->data = kmemdup(phy_db_notif->data, size, GFP_ATOMIC);
|
||||
if (!entry->data) {
|
||||
entry->size = 0;
|
||||
return -ENOMEM;
|
||||
|
@ -296,7 +332,7 @@ static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
|
|||
if (ch_index == 0xff)
|
||||
return 0xff;
|
||||
|
||||
for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
|
||||
for (i = 0; i < phy_db->n_group_txp; i++) {
|
||||
txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
|
||||
if (!txp_chg)
|
||||
return 0xff;
|
||||
|
@ -447,7 +483,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
|
|||
/* Send all the TXP channel specific data */
|
||||
err = iwl_phy_db_send_all_channel_groups(phy_db,
|
||||
IWL_PHY_DB_CALIB_CHG_PAPD,
|
||||
IWL_NUM_PAPD_CH_GROUPS);
|
||||
phy_db->n_group_papd);
|
||||
if (err) {
|
||||
IWL_ERR(phy_db->trans,
|
||||
"Cannot send channel specific PAPD groups\n");
|
||||
|
@ -457,7 +493,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
|
|||
/* Send all the TXP channel specific data */
|
||||
err = iwl_phy_db_send_all_channel_groups(phy_db,
|
||||
IWL_PHY_DB_CALIB_CHG_TXP,
|
||||
IWL_NUM_TXP_CH_GROUPS);
|
||||
phy_db->n_group_txp);
|
||||
if (err) {
|
||||
IWL_ERR(phy_db->trans,
|
||||
"Cannot send channel specific TX power groups\n");
|
||||
|
|
|
@ -73,8 +73,8 @@ struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans);
|
|||
|
||||
void iwl_phy_db_free(struct iwl_phy_db *phy_db);
|
||||
|
||||
int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
|
||||
gfp_t alloc_ctx);
|
||||
int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
|
||||
struct iwl_rx_packet *pkt);
|
||||
|
||||
|
||||
int iwl_send_phy_db_data(struct iwl_phy_db *phy_db);
|
||||
|
|
|
@ -753,6 +753,7 @@ enum iwl_plat_pm_mode {
|
|||
* @dev - pointer to struct device * that represents the device
|
||||
* @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
|
||||
* 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
|
||||
* @hw_rf_id a u32 with the device RF ID
|
||||
* @hw_id: a u32 with the ID of the device / sub-device.
|
||||
* Set during transport allocation.
|
||||
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
|
||||
|
@ -797,6 +798,7 @@ struct iwl_trans {
|
|||
struct device *dev;
|
||||
u32 max_skb_frags;
|
||||
u32 hw_rev;
|
||||
u32 hw_rf_id;
|
||||
u32 hw_id;
|
||||
char hw_id_str[52];
|
||||
|
||||
|
|
|
@ -109,6 +109,7 @@
|
|||
#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1
|
||||
#define IWL_MVM_TOF_IS_RESPONDER 0
|
||||
#define IWL_MVM_SW_TX_CSUM_OFFLOAD 0
|
||||
#define IWL_MVM_HW_CSUM_DISABLE 0
|
||||
#define IWL_MVM_COLLECT_FW_ERR_DUMP 1
|
||||
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
|
||||
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
|
||||
|
|
|
@ -1804,7 +1804,6 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
|
|||
struct iwl_wowlan_status *fw_status;
|
||||
int i;
|
||||
bool keep;
|
||||
struct ieee80211_sta *ap_sta;
|
||||
struct iwl_mvm_sta *mvm_ap_sta;
|
||||
|
||||
fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
|
||||
|
@ -1823,13 +1822,10 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
|
|||
status.wake_packet = fw_status->wake_packet;
|
||||
|
||||
/* still at hard-coded place 0 for D3 image */
|
||||
ap_sta = rcu_dereference_protected(
|
||||
mvm->fw_id_to_mac_id[0],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
if (IS_ERR_OR_NULL(ap_sta))
|
||||
mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
|
||||
if (!mvm_ap_sta)
|
||||
goto out_free;
|
||||
|
||||
mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
|
||||
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
|
||||
u16 seq = status.qos_seq_ctr[i];
|
||||
/* firmware stores last-used value, we store next value */
|
||||
|
|
|
@ -281,13 +281,10 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
|
|||
|
||||
if (vif->type == NL80211_IFTYPE_STATION &&
|
||||
ap_sta_id != IWL_MVM_STATION_COUNT) {
|
||||
struct ieee80211_sta *sta;
|
||||
|
||||
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
if (!IS_ERR_OR_NULL(sta)) {
|
||||
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_mvm_sta *mvm_sta;
|
||||
|
||||
mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
|
||||
if (mvm_sta) {
|
||||
pos += scnprintf(buf+pos, bufsz-pos,
|
||||
"ap_sta_id %d - reduced Tx power %d\n",
|
||||
ap_sta_id,
|
||||
|
|
|
@ -1309,6 +1309,8 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
|
|||
PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA);
|
||||
PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT);
|
||||
PRINT_MVM_REF(IWL_MVM_REF_INIT_UCODE);
|
||||
PRINT_MVM_REF(IWL_MVM_REF_SENDING_CMD);
|
||||
PRINT_MVM_REF(IWL_MVM_REF_RX);
|
||||
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||
}
|
||||
|
|
|
@ -368,7 +368,7 @@ struct iwl_wowlan_gtk_status {
|
|||
u8 decrypt_key[16];
|
||||
u8 tkip_mic_key[8];
|
||||
struct iwl_wowlan_rsc_tsc_params_cmd rsc;
|
||||
} __packed;
|
||||
} __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */
|
||||
|
||||
struct iwl_wowlan_status {
|
||||
struct iwl_wowlan_gtk_status gtk;
|
||||
|
|
|
@ -437,21 +437,28 @@ struct iwl_rxq_sync_notification {
|
|||
/**
|
||||
* Internal message identifier
|
||||
*
|
||||
* @IWL_MVM_RXQ_EMPTY: empty sync notification
|
||||
* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
|
||||
*/
|
||||
enum iwl_mvm_rxq_notif_type {
|
||||
IWL_MVM_RXQ_EMPTY,
|
||||
IWL_MVM_RXQ_NOTIF_DEL_BA,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent
|
||||
* in &iwl_rxq_sync_cmd. Should be DWORD aligned.
|
||||
* FW is agnostic to the payload, so there are no endianity requirements.
|
||||
*
|
||||
* @type: value from &iwl_mvm_rxq_notif_type
|
||||
* @sync: ctrl path is waiting for all notifications to be received
|
||||
* @cookie: internal cookie to identify old notifications
|
||||
* @data: payload
|
||||
*/
|
||||
struct iwl_mvm_internal_rxq_notif {
|
||||
u32 type;
|
||||
u16 type;
|
||||
u16 sync;
|
||||
u32 cookie;
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
|
|
|
@ -173,7 +173,7 @@ enum iwl_sta_key_flag {
|
|||
|
||||
/**
|
||||
* enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
|
||||
* @STA_MODIFY_KEY: this command modifies %key
|
||||
* @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue
|
||||
* @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
|
||||
* @STA_MODIFY_TX_RATE: unused
|
||||
* @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
|
||||
|
@ -183,7 +183,7 @@ enum iwl_sta_key_flag {
|
|||
* @STA_MODIFY_QUEUES: modify the queues used by this station
|
||||
*/
|
||||
enum iwl_sta_modify_flag {
|
||||
STA_MODIFY_KEY = BIT(0),
|
||||
STA_MODIFY_QUEUE_REMOVAL = BIT(0),
|
||||
STA_MODIFY_TID_DISABLE_TX = BIT(1),
|
||||
STA_MODIFY_TX_RATE = BIT(2),
|
||||
STA_MODIFY_ADD_BA_TID = BIT(3),
|
||||
|
@ -255,8 +255,10 @@ struct iwl_mvm_keyinfo {
|
|||
__le64 hw_tkip_mic_tx_key;
|
||||
} __packed;
|
||||
|
||||
#define IWL_ADD_STA_STATUS_MASK 0xFF
|
||||
#define IWL_ADD_STA_BAID_MASK 0xFF00
|
||||
#define IWL_ADD_STA_STATUS_MASK 0xFF
|
||||
#define IWL_ADD_STA_BAID_VALID_MASK 0x8000
|
||||
#define IWL_ADD_STA_BAID_MASK 0x7F00
|
||||
#define IWL_ADD_STA_BAID_SHIFT 8
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table.
|
||||
|
|
|
@ -90,6 +90,7 @@ enum {
|
|||
* DQA queue numbers
|
||||
*
|
||||
* @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
|
||||
* @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames
|
||||
* @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
|
||||
* @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
|
||||
* that we are never left without the possibility to connect to an AP.
|
||||
|
@ -97,6 +98,8 @@ enum {
|
|||
* Each MGMT queue is mapped to a single STA
|
||||
* MGMT frames are frames that return true on ieee80211_is_mgmt()
|
||||
* @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames
|
||||
* @IWL_MVM_DQA_AP_PROBE_RESP_QUEUE: a queue reserved for P2P GO/SoftAP probe
|
||||
* responses
|
||||
* @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames.
|
||||
* DATA frames are intended for !ieee80211_is_mgmt() frames, but if
|
||||
* the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues
|
||||
|
@ -105,10 +108,12 @@ enum {
|
|||
*/
|
||||
enum iwl_mvm_dqa_txq {
|
||||
IWL_MVM_DQA_CMD_QUEUE = 0,
|
||||
IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2,
|
||||
IWL_MVM_DQA_GCAST_QUEUE = 3,
|
||||
IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
|
||||
IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
|
||||
IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
|
||||
IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9,
|
||||
IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
|
||||
IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
|
||||
};
|
||||
|
|
|
@ -271,9 +271,6 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
|
|||
for (i = 0;
|
||||
i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
|
||||
i++) {
|
||||
/* Mark the number of TXF we're pulling now */
|
||||
iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i);
|
||||
|
||||
fifo_hdr = (void *)(*dump_data)->data;
|
||||
fifo_data = (void *)fifo_hdr->data;
|
||||
fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i];
|
||||
|
@ -289,6 +286,10 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
|
|||
cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
|
||||
|
||||
fifo_hdr->fifo_num = cpu_to_le32(i);
|
||||
|
||||
/* Mark the number of TXF we're pulling now */
|
||||
iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i);
|
||||
|
||||
fifo_hdr->available_bytes =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
TXF_CPU2_FIFO_ITEM_CNT));
|
||||
|
@ -339,9 +340,11 @@ void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
|
|||
#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
|
||||
#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */
|
||||
|
||||
static const struct {
|
||||
struct iwl_prph_range {
|
||||
u32 start, end;
|
||||
} iwl_prph_dump_addr[] = {
|
||||
};
|
||||
|
||||
static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = {
|
||||
{ .start = 0x00a00000, .end = 0x00a00000 },
|
||||
{ .start = 0x00a0000c, .end = 0x00a00024 },
|
||||
{ .start = 0x00a0002c, .end = 0x00a0003c },
|
||||
|
@ -439,8 +442,18 @@ static const struct {
|
|||
{ .start = 0x00a44000, .end = 0x00a7bf80 },
|
||||
};
|
||||
|
||||
static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
|
||||
{ .start = 0x00a05c00, .end = 0x00a05c18 },
|
||||
{ .start = 0x00a05400, .end = 0x00a056e8 },
|
||||
{ .start = 0x00a08000, .end = 0x00a098bc },
|
||||
{ .start = 0x00adfc00, .end = 0x00adfd1c },
|
||||
{ .start = 0x00a02400, .end = 0x00a02758 },
|
||||
};
|
||||
|
||||
static u32 iwl_dump_prph(struct iwl_trans *trans,
|
||||
struct iwl_fw_error_dump_data **data)
|
||||
struct iwl_fw_error_dump_data **data,
|
||||
const struct iwl_prph_range *iwl_prph_dump_addr,
|
||||
u32 range_len)
|
||||
{
|
||||
struct iwl_fw_error_dump_prph *prph;
|
||||
unsigned long flags;
|
||||
|
@ -449,7 +462,7 @@ static u32 iwl_dump_prph(struct iwl_trans *trans,
|
|||
if (!iwl_trans_grab_nic_access(trans, &flags))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
|
||||
for (i = 0; i < range_len; i++) {
|
||||
/* The range includes both boundaries */
|
||||
int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
|
||||
iwl_prph_dump_addr[i].start + 4;
|
||||
|
@ -572,16 +585,31 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
|||
}
|
||||
|
||||
/* Make room for PRPH registers */
|
||||
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); i++) {
|
||||
/* The range includes both boundaries */
|
||||
int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
|
||||
iwl_prph_dump_addr[i].start + 4;
|
||||
int num_bytes_in_chunk =
|
||||
iwl_prph_dump_addr_comm[i].end -
|
||||
iwl_prph_dump_addr_comm[i].start + 4;
|
||||
|
||||
prph_len += sizeof(*dump_data) +
|
||||
sizeof(struct iwl_fw_error_dump_prph) +
|
||||
num_bytes_in_chunk;
|
||||
}
|
||||
|
||||
if (mvm->cfg->mq_rx_supported) {
|
||||
for (i = 0; i <
|
||||
ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
|
||||
/* The range includes both boundaries */
|
||||
int num_bytes_in_chunk =
|
||||
iwl_prph_dump_addr_9000[i].end -
|
||||
iwl_prph_dump_addr_9000[i].start + 4;
|
||||
|
||||
prph_len += sizeof(*dump_data) +
|
||||
sizeof(struct iwl_fw_error_dump_prph) +
|
||||
num_bytes_in_chunk;
|
||||
}
|
||||
}
|
||||
|
||||
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
|
||||
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
|
||||
}
|
||||
|
@ -609,7 +637,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
|||
}
|
||||
|
||||
/* Make room for fw's virtual image pages, if it exists */
|
||||
if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
|
||||
if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
|
||||
mvm->fw_paging_db[0].fw_paging_block)
|
||||
file_len += mvm->num_of_paging_blk *
|
||||
(sizeof(*dump_data) +
|
||||
sizeof(struct iwl_fw_error_dump_paging) +
|
||||
|
@ -750,7 +779,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
|||
}
|
||||
|
||||
/* Dump fw's virtual image */
|
||||
if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
|
||||
if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
|
||||
mvm->fw_paging_db[0].fw_paging_block) {
|
||||
for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
|
||||
struct iwl_fw_error_dump_paging *paging;
|
||||
struct page *pages =
|
||||
|
@ -767,8 +797,16 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
|||
}
|
||||
}
|
||||
|
||||
if (prph_len)
|
||||
iwl_dump_prph(mvm->trans, &dump_data);
|
||||
if (prph_len) {
|
||||
iwl_dump_prph(mvm->trans, &dump_data,
|
||||
iwl_prph_dump_addr_comm,
|
||||
ARRAY_SIZE(iwl_prph_dump_addr_comm));
|
||||
|
||||
if (mvm->cfg->mq_rx_supported)
|
||||
iwl_dump_prph(mvm->trans, &dump_data,
|
||||
iwl_prph_dump_addr_9000,
|
||||
ARRAY_SIZE(iwl_prph_dump_addr_9000));
|
||||
}
|
||||
|
||||
dump_trans_data:
|
||||
fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
|
||||
|
|
|
@ -149,9 +149,11 @@ void iwl_free_fw_paging(struct iwl_mvm *mvm)
|
|||
|
||||
__free_pages(mvm->fw_paging_db[i].fw_paging_block,
|
||||
get_order(mvm->fw_paging_db[i].fw_paging_size));
|
||||
mvm->fw_paging_db[i].fw_paging_block = NULL;
|
||||
}
|
||||
kfree(mvm->trans->paging_download_buf);
|
||||
mvm->trans->paging_download_buf = NULL;
|
||||
mvm->trans->paging_db = NULL;
|
||||
|
||||
memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
|
||||
}
|
||||
|
@ -533,7 +535,7 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
|
|||
return true;
|
||||
}
|
||||
|
||||
WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC));
|
||||
WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
|
||||
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -501,9 +501,11 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
|
||||
switch (vif->type) {
|
||||
case NL80211_IFTYPE_P2P_DEVICE:
|
||||
iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
|
||||
IWL_MVM_OFFCHANNEL_QUEUE,
|
||||
IWL_MVM_TX_FIFO_VO, 0, wdg_timeout);
|
||||
if (!iwl_mvm_is_dqa_supported(mvm))
|
||||
iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
|
||||
IWL_MVM_OFFCHANNEL_QUEUE,
|
||||
IWL_MVM_TX_FIFO_VO, 0,
|
||||
wdg_timeout);
|
||||
break;
|
||||
case NL80211_IFTYPE_AP:
|
||||
iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue,
|
||||
|
@ -533,13 +535,21 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
|
||||
switch (vif->type) {
|
||||
case NL80211_IFTYPE_P2P_DEVICE:
|
||||
iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
|
||||
IWL_MVM_OFFCHANNEL_QUEUE, IWL_MAX_TID_COUNT,
|
||||
0);
|
||||
if (!iwl_mvm_is_dqa_supported(mvm))
|
||||
iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
|
||||
IWL_MVM_OFFCHANNEL_QUEUE,
|
||||
IWL_MAX_TID_COUNT, 0);
|
||||
|
||||
break;
|
||||
case NL80211_IFTYPE_AP:
|
||||
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
|
||||
IWL_MAX_TID_COUNT, 0);
|
||||
|
||||
if (iwl_mvm_is_dqa_supported(mvm))
|
||||
iwl_mvm_disable_txq(mvm,
|
||||
IWL_MVM_DQA_AP_PROBE_RESP_QUEUE,
|
||||
vif->hw_queue[0], IWL_MAX_TID_COUNT,
|
||||
0);
|
||||
/* fall through */
|
||||
default:
|
||||
/*
|
||||
|
|
|
@ -229,7 +229,11 @@ void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
|
|||
|
||||
IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
|
||||
spin_lock_bh(&mvm->refs_lock);
|
||||
WARN_ON(!mvm->refs[ref_type]--);
|
||||
if (WARN_ON(!mvm->refs[ref_type])) {
|
||||
spin_unlock_bh(&mvm->refs_lock);
|
||||
return;
|
||||
}
|
||||
mvm->refs[ref_type]--;
|
||||
spin_unlock_bh(&mvm->refs_lock);
|
||||
iwl_trans_unref(mvm->trans);
|
||||
}
|
||||
|
@ -439,11 +443,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
|||
ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
|
||||
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
|
||||
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
|
||||
if (iwl_mvm_has_new_rx_api(mvm))
|
||||
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
|
||||
|
||||
if (mvm->trans->num_rx_queues > 1)
|
||||
ieee80211_hw_set(hw, USES_RSS);
|
||||
|
||||
if (mvm->trans->max_skb_frags)
|
||||
hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
|
||||
|
||||
hw->queues = mvm->first_agg_queue;
|
||||
if (!iwl_mvm_is_dqa_supported(mvm))
|
||||
hw->queues = mvm->first_agg_queue;
|
||||
else
|
||||
hw->queues = IEEE80211_MAX_QUEUES;
|
||||
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
|
||||
hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
|
||||
IEEE80211_RADIOTAP_MCS_HAVE_STBC;
|
||||
|
@ -848,6 +860,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
|
|||
u16 *ssn = ¶ms->ssn;
|
||||
u8 buf_size = params->buf_size;
|
||||
bool amsdu = params->amsdu;
|
||||
u16 timeout = params->timeout;
|
||||
|
||||
IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
|
||||
sta->addr, tid, action);
|
||||
|
@ -888,10 +901,12 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
|
|||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size);
|
||||
ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size,
|
||||
timeout);
|
||||
break;
|
||||
case IEEE80211_AMPDU_RX_STOP:
|
||||
ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size);
|
||||
ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size,
|
||||
timeout);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_START:
|
||||
if (!iwl_enable_tx_ampdu(mvm->cfg)) {
|
||||
|
@ -4037,6 +4052,55 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
|
|||
}
|
||||
}
|
||||
|
||||
void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_internal_rxq_notif *notif,
|
||||
u32 size)
|
||||
{
|
||||
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
|
||||
u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (!iwl_mvm_has_new_rx_api(mvm))
|
||||
return;
|
||||
|
||||
notif->cookie = mvm->queue_sync_cookie;
|
||||
|
||||
if (notif->sync)
|
||||
atomic_set(&mvm->queue_sync_counter,
|
||||
mvm->trans->num_rx_queues);
|
||||
|
||||
ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (notif->sync)
|
||||
ret = wait_event_timeout(notif_waitq,
|
||||
atomic_read(&mvm->queue_sync_counter) == 0,
|
||||
HZ);
|
||||
WARN_ON_ONCE(!ret);
|
||||
|
||||
out:
|
||||
atomic_set(&mvm->queue_sync_counter, 0);
|
||||
mvm->queue_sync_cookie++;
|
||||
}
|
||||
|
||||
static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
struct iwl_mvm_internal_rxq_notif data = {
|
||||
.type = IWL_MVM_RXQ_EMPTY,
|
||||
.sync = 1,
|
||||
};
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data));
|
||||
mutex_unlock(&mvm->mutex);
|
||||
}
|
||||
|
||||
const struct ieee80211_ops iwl_mvm_hw_ops = {
|
||||
.tx = iwl_mvm_mac_tx,
|
||||
.ampdu_action = iwl_mvm_mac_ampdu_action,
|
||||
|
@ -4093,6 +4157,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
|
|||
|
||||
.event_callback = iwl_mvm_mac_event_callback,
|
||||
|
||||
.sync_rx_queues = iwl_mvm_sync_rx_queues,
|
||||
|
||||
CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
|
|
@ -301,6 +301,8 @@ enum iwl_mvm_ref_type {
|
|||
IWL_MVM_REF_PROTECT_CSA,
|
||||
IWL_MVM_REF_FW_DBG_COLLECT,
|
||||
IWL_MVM_REF_INIT_UCODE,
|
||||
IWL_MVM_REF_SENDING_CMD,
|
||||
IWL_MVM_REF_RX,
|
||||
|
||||
/* update debugfs.c when changing this */
|
||||
|
||||
|
@ -613,6 +615,84 @@ struct iwl_mvm_shared_mem_cfg {
|
|||
u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer
|
||||
* @head_sn: reorder window head sn
|
||||
* @num_stored: number of mpdus stored in the buffer
|
||||
* @buf_size: the reorder buffer size as set by the last addba request
|
||||
* @sta_id: sta id of this reorder buffer
|
||||
* @queue: queue of this reorder buffer
|
||||
* @last_amsdu: track last ASMDU SN for duplication detection
|
||||
* @last_sub_index: track ASMDU sub frame index for duplication detection
|
||||
* @entries: list of skbs stored
|
||||
* @reorder_time: time the packet was stored in the reorder buffer
|
||||
* @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
|
||||
* it is the time of last received sub-frame
|
||||
* @removed: prevent timer re-arming
|
||||
* @lock: protect reorder buffer internal state
|
||||
* @mvm: mvm pointer, needed for frame timer context
|
||||
*/
|
||||
struct iwl_mvm_reorder_buffer {
|
||||
u16 head_sn;
|
||||
u16 num_stored;
|
||||
u8 buf_size;
|
||||
u8 sta_id;
|
||||
int queue;
|
||||
u16 last_amsdu;
|
||||
u8 last_sub_index;
|
||||
struct sk_buff_head entries[IEEE80211_MAX_AMPDU_BUF];
|
||||
unsigned long reorder_time[IEEE80211_MAX_AMPDU_BUF];
|
||||
struct timer_list reorder_timer;
|
||||
bool removed;
|
||||
spinlock_t lock;
|
||||
struct iwl_mvm *mvm;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_baid_data - BA session data
|
||||
* @sta_id: station id
|
||||
* @tid: tid of the session
|
||||
* @baid baid of the session
|
||||
* @timeout: the timeout set in the addba request
|
||||
* @last_rx: last rx jiffies, updated only if timeout passed from last update
|
||||
* @session_timer: timer to check if BA session expired, runs at 2 * timeout
|
||||
* @mvm: mvm pointer, needed for timer context
|
||||
* @reorder_buf: reorder buffer, allocated per queue
|
||||
*/
|
||||
struct iwl_mvm_baid_data {
|
||||
struct rcu_head rcu_head;
|
||||
u8 sta_id;
|
||||
u8 tid;
|
||||
u8 baid;
|
||||
u16 timeout;
|
||||
unsigned long last_rx;
|
||||
struct timer_list session_timer;
|
||||
struct iwl_mvm *mvm;
|
||||
struct iwl_mvm_reorder_buffer reorder_buf[];
|
||||
};
|
||||
|
||||
/*
|
||||
* enum iwl_mvm_queue_status - queue status
|
||||
* @IWL_MVM_QUEUE_FREE: the queue is not allocated nor reserved
|
||||
* Basically, this means that this queue can be used for any purpose
|
||||
* @IWL_MVM_QUEUE_RESERVED: queue is reserved but not yet in use
|
||||
* This is the state of a queue that has been dedicated for some RATID
|
||||
* (agg'd or not), but that hasn't yet gone through the actual enablement
|
||||
* of iwl_mvm_enable_txq(), and therefore no traffic can go through it yet.
|
||||
* Note that in this state there is no requirement to already know what TID
|
||||
* should be used with this queue, it is just marked as a queue that will
|
||||
* be used, and shouldn't be allocated to anyone else.
|
||||
* @IWL_MVM_QUEUE_READY: queue is ready to be used
|
||||
* This is the state of a queue that has been fully configured (including
|
||||
* SCD pointers, etc), has a specific RA/TID assigned to it, and can be
|
||||
* used to send traffic.
|
||||
*/
|
||||
enum iwl_mvm_queue_status {
|
||||
IWL_MVM_QUEUE_FREE,
|
||||
IWL_MVM_QUEUE_RESERVED,
|
||||
IWL_MVM_QUEUE_READY,
|
||||
};
|
||||
|
||||
struct iwl_mvm {
|
||||
/* for logger access */
|
||||
struct device *dev;
|
||||
|
@ -633,6 +713,8 @@ struct iwl_mvm {
|
|||
|
||||
unsigned long status;
|
||||
|
||||
u32 queue_sync_cookie;
|
||||
atomic_t queue_sync_counter;
|
||||
/*
|
||||
* for beacon filtering -
|
||||
* currently only one interface can be supported
|
||||
|
@ -666,13 +748,8 @@ struct iwl_mvm {
|
|||
u32 hw_queue_to_mac80211;
|
||||
u8 hw_queue_refcount;
|
||||
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
|
||||
/*
|
||||
* This is to mark that queue is reserved for a STA but not yet
|
||||
* allocated. This is needed to make sure we have at least one
|
||||
* available queue to use when adding a new STA
|
||||
*/
|
||||
bool setup_reserved;
|
||||
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
|
||||
enum iwl_mvm_queue_status status;
|
||||
} queue_info[IWL_MAX_HW_QUEUES];
|
||||
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
|
||||
struct work_struct add_stream_wk; /* To add streams to queues */
|
||||
|
@ -920,6 +997,10 @@ struct iwl_mvm {
|
|||
u32 ciphers[6];
|
||||
struct iwl_mvm_tof_data tof_data;
|
||||
|
||||
struct ieee80211_vif *nan_vif;
|
||||
#define IWL_MAX_BAID 32
|
||||
struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
|
||||
|
||||
/*
|
||||
* Drop beacons from other APs in AP mode when there are no connected
|
||||
* clients.
|
||||
|
@ -1065,7 +1146,8 @@ static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
|
|||
static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
|
||||
{
|
||||
return fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
|
||||
IWL_UCODE_TLV_CAPA_CSUM_SUPPORT) &&
|
||||
!IWL_MVM_HW_CSUM_DISABLE;
|
||||
}
|
||||
|
||||
static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm)
|
||||
|
@ -1242,7 +1324,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|||
void iwl_mvm_rx_phy_cmd_mq(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
struct iwl_rx_cmd_buffer *rxb, int queue);
|
||||
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm,
|
||||
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
struct iwl_rx_cmd_buffer *rxb, int queue);
|
||||
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
|
||||
const u8 *data, u32 count);
|
||||
|
@ -1566,6 +1648,10 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
|
|||
void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
|
||||
void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
|
||||
|
||||
/* Re-configure the SCD for a queue that has already been configured */
|
||||
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
|
||||
int tid, int frame_limit, u16 ssn);
|
||||
|
||||
/* Thermal management and CT-kill */
|
||||
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
|
||||
void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
|
||||
|
@ -1628,6 +1714,10 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
|
|||
void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
|
||||
|
||||
void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_internal_rxq_notif *notif,
|
||||
u32 size);
|
||||
void iwl_mvm_reorder_timer_expired(unsigned long data);
|
||||
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
|
||||
|
||||
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
|
||||
|
|
|
@ -554,8 +554,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
|
||||
|
||||
mvm->aux_queue = 15;
|
||||
mvm->first_agg_queue = 16;
|
||||
mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
|
||||
if (!iwl_mvm_is_dqa_supported(mvm)) {
|
||||
mvm->first_agg_queue = 16;
|
||||
mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
|
||||
} else {
|
||||
mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE;
|
||||
mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
|
||||
}
|
||||
if (mvm->cfg->base_params->num_of_queues == 16) {
|
||||
mvm->aux_queue = 11;
|
||||
mvm->first_agg_queue = 12;
|
||||
|
@ -586,6 +591,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
skb_queue_head_init(&mvm->d0i3_tx);
|
||||
init_waitqueue_head(&mvm->d0i3_exit_waitq);
|
||||
|
||||
atomic_set(&mvm->queue_sync_counter, 0);
|
||||
|
||||
SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
|
||||
|
||||
/*
|
||||
|
@ -930,7 +937,7 @@ static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
|
|||
if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
|
||||
iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
|
||||
else if (pkt->hdr.cmd == FRAME_RELEASE)
|
||||
iwl_mvm_rx_frame_release(mvm, rxb, 0);
|
||||
iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
|
||||
else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
|
||||
iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
|
||||
else
|
||||
|
@ -1208,7 +1215,6 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
|
|||
struct iwl_d0i3_iter_data *iter_data)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct ieee80211_sta *ap_sta;
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
u32 available_tids = 0;
|
||||
u8 tid;
|
||||
|
@ -1217,11 +1223,10 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
|
|||
mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
|
||||
return false;
|
||||
|
||||
ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
|
||||
if (IS_ERR_OR_NULL(ap_sta))
|
||||
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
|
||||
if (!mvmsta)
|
||||
return false;
|
||||
|
||||
mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
|
||||
spin_lock_bh(&mvmsta->lock);
|
||||
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
|
||||
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
|
||||
|
@ -1632,7 +1637,7 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
|
|||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
|
||||
if (unlikely(pkt->hdr.cmd == FRAME_RELEASE))
|
||||
iwl_mvm_rx_frame_release(mvm, rxb, queue);
|
||||
iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
|
||||
else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION &&
|
||||
pkt->hdr.group_id == DATA_PATH_GROUP))
|
||||
iwl_mvm_rx_queue_notif(mvm, rxb, queue);
|
||||
|
|
|
@ -97,6 +97,7 @@ void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
|||
* Adds the rxb to a new skb and give it to mac80211
|
||||
*/
|
||||
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta,
|
||||
struct napi_struct *napi,
|
||||
struct sk_buff *skb,
|
||||
struct ieee80211_hdr *hdr, u16 len,
|
||||
|
@ -131,7 +132,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
|
|||
fraglen, rxb->truesize);
|
||||
}
|
||||
|
||||
ieee80211_rx_napi(mvm->hw, NULL, skb, napi);
|
||||
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -271,6 +272,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|||
u32 rate_n_flags;
|
||||
u32 rx_pkt_status;
|
||||
u8 crypt_len = 0;
|
||||
bool take_ref;
|
||||
|
||||
phy_info = &mvm->last_phy_info;
|
||||
rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
|
||||
|
@ -453,8 +455,26 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|||
mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED))
|
||||
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
|
||||
|
||||
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status,
|
||||
crypt_len, rxb);
|
||||
if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
|
||||
ieee80211_is_probe_resp(hdr->frame_control)))
|
||||
rx_status->boottime_ns = ktime_get_boot_ns();
|
||||
|
||||
/* Take a reference briefly to kick off a d0i3 entry delay so
|
||||
* we can handle bursts of RX packets without toggling the
|
||||
* state too often. But don't do this for beacons if we are
|
||||
* going to idle because the beacon filtering changes we make
|
||||
* cause the firmware to send us collateral beacons. */
|
||||
take_ref = !(test_bit(STATUS_TRANS_GOING_IDLE, &mvm->trans->status) &&
|
||||
ieee80211_is_beacon(hdr->frame_control));
|
||||
|
||||
if (take_ref)
|
||||
iwl_mvm_ref(mvm, IWL_MVM_REF_RX);
|
||||
|
||||
iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len,
|
||||
ampdu_status, crypt_len, rxb);
|
||||
|
||||
if (take_ref)
|
||||
iwl_mvm_unref(mvm, IWL_MVM_REF_RX);
|
||||
}
|
||||
|
||||
static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
|
||||
|
|
|
@ -210,7 +210,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
|
|||
if (iwl_mvm_check_pn(mvm, skb, queue, sta))
|
||||
kfree_skb(skb);
|
||||
else
|
||||
ieee80211_rx_napi(mvm->hw, NULL, skb, napi);
|
||||
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
|
||||
}
|
||||
|
||||
static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
|
||||
|
@ -395,6 +395,150 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if sn2 - buffer_size < sn1 < sn2.
|
||||
* To be used only in order to compare reorder buffer head with NSSN.
|
||||
* We fully trust NSSN unless it is behind us due to reorder timeout.
|
||||
* Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
|
||||
*/
|
||||
static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
|
||||
{
|
||||
return ieee80211_sn_less(sn1, sn2) &&
|
||||
!ieee80211_sn_less(sn1, sn2 - buffer_size);
|
||||
}
|
||||
|
||||
#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
|
||||
|
||||
static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta,
|
||||
struct napi_struct *napi,
|
||||
struct iwl_mvm_reorder_buffer *reorder_buf,
|
||||
u16 nssn)
|
||||
{
|
||||
u16 ssn = reorder_buf->head_sn;
|
||||
|
||||
lockdep_assert_held(&reorder_buf->lock);
|
||||
|
||||
/* ignore nssn smaller than head sn - this can happen due to timeout */
|
||||
if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
|
||||
return;
|
||||
|
||||
while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
|
||||
int index = ssn % reorder_buf->buf_size;
|
||||
struct sk_buff_head *skb_list = &reorder_buf->entries[index];
|
||||
struct sk_buff *skb;
|
||||
|
||||
ssn = ieee80211_sn_inc(ssn);
|
||||
|
||||
/* holes are valid since nssn indicates frames were received. */
|
||||
if (skb_queue_empty(skb_list) || !skb_peek_tail(skb_list))
|
||||
continue;
|
||||
/* Empty the list. Will have more than one frame for A-MSDU */
|
||||
while ((skb = __skb_dequeue(skb_list))) {
|
||||
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
|
||||
reorder_buf->queue,
|
||||
sta);
|
||||
reorder_buf->num_stored--;
|
||||
}
|
||||
}
|
||||
reorder_buf->head_sn = nssn;
|
||||
|
||||
if (reorder_buf->num_stored && !reorder_buf->removed) {
|
||||
u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
|
||||
|
||||
while (!skb_peek_tail(&reorder_buf->entries[index]))
|
||||
index = (index + 1) % reorder_buf->buf_size;
|
||||
/* modify timer to match next frame's expiration time */
|
||||
mod_timer(&reorder_buf->reorder_timer,
|
||||
reorder_buf->reorder_time[index] + 1 +
|
||||
RX_REORDER_BUF_TIMEOUT_MQ);
|
||||
} else {
|
||||
del_timer(&reorder_buf->reorder_timer);
|
||||
}
|
||||
}
|
||||
|
||||
void iwl_mvm_reorder_timer_expired(unsigned long data)
|
||||
{
|
||||
struct iwl_mvm_reorder_buffer *buf = (void *)data;
|
||||
int i;
|
||||
u16 sn = 0, index = 0;
|
||||
bool expired = false;
|
||||
|
||||
spin_lock_bh(&buf->lock);
|
||||
|
||||
if (!buf->num_stored || buf->removed) {
|
||||
spin_unlock_bh(&buf->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < buf->buf_size ; i++) {
|
||||
index = (buf->head_sn + i) % buf->buf_size;
|
||||
|
||||
if (!skb_peek_tail(&buf->entries[index]))
|
||||
continue;
|
||||
if (!time_after(jiffies, buf->reorder_time[index] +
|
||||
RX_REORDER_BUF_TIMEOUT_MQ))
|
||||
break;
|
||||
expired = true;
|
||||
sn = ieee80211_sn_add(buf->head_sn, i + 1);
|
||||
}
|
||||
|
||||
if (expired) {
|
||||
struct ieee80211_sta *sta;
|
||||
|
||||
rcu_read_lock();
|
||||
sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[buf->sta_id]);
|
||||
/* SN is set to the last expired frame + 1 */
|
||||
iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn);
|
||||
rcu_read_unlock();
|
||||
} else if (buf->num_stored) {
|
||||
/*
|
||||
* If no frame expired and there are stored frames, index is now
|
||||
* pointing to the first unexpired frame - modify timer
|
||||
* accordingly to this frame.
|
||||
*/
|
||||
mod_timer(&buf->reorder_timer,
|
||||
buf->reorder_time[index] +
|
||||
1 + RX_REORDER_BUF_TIMEOUT_MQ);
|
||||
}
|
||||
spin_unlock_bh(&buf->lock);
|
||||
}
|
||||
|
||||
static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
|
||||
struct iwl_mvm_delba_data *data)
|
||||
{
|
||||
struct iwl_mvm_baid_data *ba_data;
|
||||
struct ieee80211_sta *sta;
|
||||
struct iwl_mvm_reorder_buffer *reorder_buf;
|
||||
u8 baid = data->baid;
|
||||
|
||||
if (WARN_ON_ONCE(baid >= IWL_RX_REORDER_DATA_INVALID_BAID))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
ba_data = rcu_dereference(mvm->baid_map[baid]);
|
||||
if (WARN_ON_ONCE(!ba_data))
|
||||
goto out;
|
||||
|
||||
sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
|
||||
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
|
||||
goto out;
|
||||
|
||||
reorder_buf = &ba_data->reorder_buf[queue];
|
||||
|
||||
/* release all frames that are in the reorder buffer to the stack */
|
||||
spin_lock_bh(&reorder_buf->lock);
|
||||
iwl_mvm_release_frames(mvm, sta, NULL, reorder_buf,
|
||||
ieee80211_sn_add(reorder_buf->head_sn,
|
||||
reorder_buf->buf_size));
|
||||
spin_unlock_bh(&reorder_buf->lock);
|
||||
del_timer_sync(&reorder_buf->reorder_timer);
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
||||
int queue)
|
||||
{
|
||||
|
@ -405,15 +549,182 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
|||
notif = (void *)pkt->data;
|
||||
internal_notif = (void *)notif->payload;
|
||||
|
||||
if (internal_notif->sync) {
|
||||
if (mvm->queue_sync_cookie != internal_notif->cookie) {
|
||||
WARN_ONCE(1,
|
||||
"Received expired RX queue sync message\n");
|
||||
return;
|
||||
}
|
||||
atomic_dec(&mvm->queue_sync_counter);
|
||||
}
|
||||
|
||||
switch (internal_notif->type) {
|
||||
case IWL_MVM_RXQ_EMPTY:
|
||||
break;
|
||||
case IWL_MVM_RXQ_NOTIF_DEL_BA:
|
||||
/* TODO */
|
||||
iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the MPDU was buffered\dropped, false if it should be passed
|
||||
* to upper layer.
|
||||
*/
|
||||
static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
|
||||
struct napi_struct *napi,
|
||||
int queue,
|
||||
struct ieee80211_sta *sta,
|
||||
struct sk_buff *skb,
|
||||
struct iwl_rx_mpdu_desc *desc)
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_mvm_baid_data *baid_data;
|
||||
struct iwl_mvm_reorder_buffer *buffer;
|
||||
struct sk_buff *tail;
|
||||
u32 reorder = le32_to_cpu(desc->reorder_data);
|
||||
bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
|
||||
u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
|
||||
u8 sub_frame_idx = desc->amsdu_info &
|
||||
IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
|
||||
int index;
|
||||
u16 nssn, sn;
|
||||
u8 baid;
|
||||
|
||||
baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
|
||||
IWL_RX_MPDU_REORDER_BAID_SHIFT;
|
||||
|
||||
if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
|
||||
return false;
|
||||
|
||||
/* no sta yet */
|
||||
if (WARN_ON(IS_ERR_OR_NULL(sta)))
|
||||
return false;
|
||||
|
||||
/* not a data packet */
|
||||
if (!ieee80211_is_data_qos(hdr->frame_control) ||
|
||||
is_multicast_ether_addr(hdr->addr1))
|
||||
return false;
|
||||
|
||||
if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
|
||||
return false;
|
||||
|
||||
baid_data = rcu_dereference(mvm->baid_map[baid]);
|
||||
if (WARN(!baid_data,
|
||||
"Received baid %d, but no data exists for this BAID\n", baid))
|
||||
return false;
|
||||
if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
|
||||
"baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
|
||||
baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
|
||||
tid))
|
||||
return false;
|
||||
|
||||
nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
|
||||
sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >>
|
||||
IWL_RX_MPDU_REORDER_SN_SHIFT;
|
||||
|
||||
buffer = &baid_data->reorder_buf[queue];
|
||||
|
||||
spin_lock_bh(&buffer->lock);
|
||||
|
||||
/*
|
||||
* If there was a significant jump in the nssn - adjust.
|
||||
* If the SN is smaller than the NSSN it might need to first go into
|
||||
* the reorder buffer, in which case we just release up to it and the
|
||||
* rest of the function will take of storing it and releasing up to the
|
||||
* nssn
|
||||
*/
|
||||
if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
|
||||
buffer->buf_size)) {
|
||||
u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
|
||||
|
||||
iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn);
|
||||
}
|
||||
|
||||
/* drop any oudated packets */
|
||||
if (ieee80211_sn_less(sn, buffer->head_sn))
|
||||
goto drop;
|
||||
|
||||
/* release immediately if allowed by nssn and no stored frames */
|
||||
if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
|
||||
if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
|
||||
buffer->buf_size))
|
||||
buffer->head_sn = nssn;
|
||||
/* No need to update AMSDU last SN - we are moving the head */
|
||||
spin_unlock_bh(&buffer->lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
index = sn % buffer->buf_size;
|
||||
|
||||
/*
|
||||
* Check if we already stored this frame
|
||||
* As AMSDU is either received or not as whole, logic is simple:
|
||||
* If we have frames in that position in the buffer and the last frame
|
||||
* originated from AMSDU had a different SN then it is a retransmission.
|
||||
* If it is the same SN then if the subframe index is incrementing it
|
||||
* is the same AMSDU - otherwise it is a retransmission.
|
||||
*/
|
||||
tail = skb_peek_tail(&buffer->entries[index]);
|
||||
if (tail && !amsdu)
|
||||
goto drop;
|
||||
else if (tail && (sn != buffer->last_amsdu ||
|
||||
buffer->last_sub_index >= sub_frame_idx))
|
||||
goto drop;
|
||||
|
||||
/* put in reorder buffer */
|
||||
__skb_queue_tail(&buffer->entries[index], skb);
|
||||
buffer->num_stored++;
|
||||
buffer->reorder_time[index] = jiffies;
|
||||
|
||||
if (amsdu) {
|
||||
buffer->last_amsdu = sn;
|
||||
buffer->last_sub_index = sub_frame_idx;
|
||||
}
|
||||
|
||||
iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
|
||||
spin_unlock_bh(&buffer->lock);
|
||||
return true;
|
||||
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
spin_unlock_bh(&buffer->lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
|
||||
{
|
||||
unsigned long now = jiffies;
|
||||
unsigned long timeout;
|
||||
struct iwl_mvm_baid_data *data;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
data = rcu_dereference(mvm->baid_map[baid]);
|
||||
if (WARN_ON(!data))
|
||||
goto out;
|
||||
|
||||
if (!data->timeout)
|
||||
goto out;
|
||||
|
||||
timeout = data->timeout;
|
||||
/*
|
||||
* Do not update last rx all the time to avoid cache bouncing
|
||||
* between the rx queues.
|
||||
* Update it every timeout. Worst case is the session will
|
||||
* expire after ~ 2 * timeout, which doesn't matter that much.
|
||||
*/
|
||||
if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now))
|
||||
/* Update is atomic */
|
||||
data->last_rx = now;
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
struct iwl_rx_cmd_buffer *rxb, int queue)
|
||||
{
|
||||
|
@ -484,6 +795,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|||
|
||||
if (sta) {
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
|
||||
IWL_RX_MPDU_REORDER_BAID_MASK) >>
|
||||
IWL_RX_MPDU_REORDER_BAID_SHIFT);
|
||||
|
||||
/*
|
||||
* We have tx blocked stations (with CS bit). If we heard
|
||||
|
@ -536,6 +850,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|||
|
||||
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
|
||||
}
|
||||
if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
|
||||
iwl_mvm_agg_rx_received(mvm, baid);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -593,12 +909,42 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|||
/* TODO: PHY info - gscan */
|
||||
|
||||
iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
|
||||
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
|
||||
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
|
||||
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm,
|
||||
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
struct iwl_rx_cmd_buffer *rxb, int queue)
|
||||
{
|
||||
/* TODO */
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_frame_release *release = (void *)pkt->data;
|
||||
struct ieee80211_sta *sta;
|
||||
struct iwl_mvm_reorder_buffer *reorder_buf;
|
||||
struct iwl_mvm_baid_data *ba_data;
|
||||
|
||||
int baid = release->baid;
|
||||
|
||||
if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
ba_data = rcu_dereference(mvm->baid_map[baid]);
|
||||
if (WARN_ON_ONCE(!ba_data))
|
||||
goto out;
|
||||
|
||||
sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
|
||||
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
|
||||
goto out;
|
||||
|
||||
reorder_buf = &ba_data->reorder_buf[queue];
|
||||
|
||||
spin_lock_bh(&reorder_buf->lock);
|
||||
iwl_mvm_release_frames(mvm, sta, napi, reorder_buf,
|
||||
le16_to_cpu(release->nssn));
|
||||
spin_unlock_bh(&reorder_buf->lock);
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
|
|
@ -223,6 +223,39 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void iwl_mvm_rx_agg_session_expired(unsigned long data)
|
||||
{
|
||||
struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
|
||||
struct iwl_mvm_baid_data *ba_data;
|
||||
struct ieee80211_sta *sta;
|
||||
struct iwl_mvm_sta *mvm_sta;
|
||||
unsigned long timeout;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
ba_data = rcu_dereference(*rcu_ptr);
|
||||
|
||||
if (WARN_ON(!ba_data))
|
||||
goto unlock;
|
||||
|
||||
if (!ba_data->timeout)
|
||||
goto unlock;
|
||||
|
||||
timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
|
||||
if (time_is_after_jiffies(timeout)) {
|
||||
mod_timer(&ba_data->session_timer, timeout);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* Timer expired */
|
||||
sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
|
||||
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
|
||||
ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
|
||||
sta->addr, ba_data->tid);
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
|
@ -293,6 +326,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|||
u8 mac_queue = mvmsta->vif->hw_queue[ac];
|
||||
int queue = -1;
|
||||
int ssn;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
|
@ -321,8 +355,15 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|||
if (queue < 0)
|
||||
queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
|
||||
IWL_MVM_DQA_MAX_DATA_QUEUE);
|
||||
|
||||
/*
|
||||
* Mark TXQ as ready, even though it hasn't been fully configured yet,
|
||||
* to make sure no one else takes it.
|
||||
* This will allow avoiding re-acquiring the lock at the end of the
|
||||
* configuration. On error we'll mark it back as free.
|
||||
*/
|
||||
if (queue >= 0)
|
||||
mvm->queue_info[queue].setup_reserved = false;
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
|
@ -354,7 +395,16 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|||
mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
|
||||
return iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
|
||||
ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
|
||||
|
@ -460,7 +510,8 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
|
|||
/* Make sure we have free resources for this STA */
|
||||
if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
|
||||
!mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
|
||||
!mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].setup_reserved)
|
||||
(mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
|
||||
IWL_MVM_QUEUE_FREE))
|
||||
queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
|
||||
else
|
||||
queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
|
||||
|
@ -470,7 +521,7 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
|
|||
IWL_ERR(mvm, "No available queues for new station\n");
|
||||
return -ENOSPC;
|
||||
}
|
||||
mvm->queue_info[queue].setup_reserved = true;
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
|
@ -1000,6 +1051,33 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
struct iwl_trans_txq_scd_cfg cfg = {
|
||||
.fifo = IWL_MVM_TX_FIFO_VO,
|
||||
.sta_id = mvmvif->bcast_sta.sta_id,
|
||||
.tid = IWL_MAX_TID_COUNT,
|
||||
.aggregate = false,
|
||||
.frame_limit = IWL_FRAME_LIMIT,
|
||||
};
|
||||
unsigned int wdg_timeout =
|
||||
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
|
||||
int queue;
|
||||
|
||||
if ((vif->type == NL80211_IFTYPE_AP) &&
|
||||
(mvmvif->bcast_sta.tfd_queue_msk &
|
||||
BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)))
|
||||
queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
|
||||
else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) &&
|
||||
(mvmvif->bcast_sta.tfd_queue_msk &
|
||||
BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)))
|
||||
queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
|
||||
else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
|
||||
return -EINVAL;
|
||||
|
||||
iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
|
||||
wdg_timeout);
|
||||
}
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_ADHOC)
|
||||
baddr = vif->bss_conf.bssid;
|
||||
|
||||
|
@ -1028,20 +1106,28 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
u32 qmask;
|
||||
u32 qmask = 0;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
qmask = iwl_mvm_mac_get_queues_mask(vif);
|
||||
if (!iwl_mvm_is_dqa_supported(mvm))
|
||||
qmask = iwl_mvm_mac_get_queues_mask(vif);
|
||||
|
||||
/*
|
||||
* The firmware defines the TFD queue mask to only be relevant
|
||||
* for *unicast* queues, so the multicast (CAB) queue shouldn't
|
||||
* be included.
|
||||
*/
|
||||
if (vif->type == NL80211_IFTYPE_AP)
|
||||
if (vif->type == NL80211_IFTYPE_AP) {
|
||||
/*
|
||||
* The firmware defines the TFD queue mask to only be relevant
|
||||
* for *unicast* queues, so the multicast (CAB) queue shouldn't
|
||||
* be included.
|
||||
*/
|
||||
qmask &= ~BIT(vif->cab_queue);
|
||||
|
||||
if (iwl_mvm_is_dqa_supported(mvm))
|
||||
qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
|
||||
} else if (iwl_mvm_is_dqa_supported(mvm) &&
|
||||
vif->type == NL80211_IFTYPE_P2P_DEVICE) {
|
||||
qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
|
||||
}
|
||||
|
||||
return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
|
||||
ieee80211_vif_type_p2p(vif));
|
||||
}
|
||||
|
@ -1099,11 +1185,92 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
|
||||
#define IWL_MAX_RX_BA_SESSIONS 16
|
||||
|
||||
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
|
||||
{
|
||||
struct iwl_mvm_delba_notif notif = {
|
||||
.metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
|
||||
.metadata.sync = 1,
|
||||
.delba.baid = baid,
|
||||
};
|
||||
iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
|
||||
};
|
||||
|
||||
static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_baid_data *data)
|
||||
{
|
||||
int i;
|
||||
|
||||
iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
|
||||
|
||||
for (i = 0; i < mvm->trans->num_rx_queues; i++) {
|
||||
int j;
|
||||
struct iwl_mvm_reorder_buffer *reorder_buf =
|
||||
&data->reorder_buf[i];
|
||||
|
||||
spin_lock_bh(&reorder_buf->lock);
|
||||
if (likely(!reorder_buf->num_stored)) {
|
||||
spin_unlock_bh(&reorder_buf->lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* This shouldn't happen in regular DELBA since the internal
|
||||
* delBA notification should trigger a release of all frames in
|
||||
* the reorder buffer.
|
||||
*/
|
||||
WARN_ON(1);
|
||||
|
||||
for (j = 0; j < reorder_buf->buf_size; j++)
|
||||
__skb_queue_purge(&reorder_buf->entries[j]);
|
||||
/*
|
||||
* Prevent timer re-arm. This prevents a very far fetched case
|
||||
* where we timed out on the notification. There may be prior
|
||||
* RX frames pending in the RX queue before the notification
|
||||
* that might get processed between now and the actual deletion
|
||||
* and we would re-arm the timer although we are deleting the
|
||||
* reorder buffer.
|
||||
*/
|
||||
reorder_buf->removed = true;
|
||||
spin_unlock_bh(&reorder_buf->lock);
|
||||
del_timer_sync(&reorder_buf->reorder_timer);
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
|
||||
u32 sta_id,
|
||||
struct iwl_mvm_baid_data *data,
|
||||
u16 ssn, u8 buf_size)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mvm->trans->num_rx_queues; i++) {
|
||||
struct iwl_mvm_reorder_buffer *reorder_buf =
|
||||
&data->reorder_buf[i];
|
||||
int j;
|
||||
|
||||
reorder_buf->num_stored = 0;
|
||||
reorder_buf->head_sn = ssn;
|
||||
reorder_buf->buf_size = buf_size;
|
||||
/* rx reorder timer */
|
||||
reorder_buf->reorder_timer.function =
|
||||
iwl_mvm_reorder_timer_expired;
|
||||
reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
|
||||
init_timer(&reorder_buf->reorder_timer);
|
||||
spin_lock_init(&reorder_buf->lock);
|
||||
reorder_buf->mvm = mvm;
|
||||
reorder_buf->queue = i;
|
||||
reorder_buf->sta_id = sta_id;
|
||||
for (j = 0; j < reorder_buf->buf_size; j++)
|
||||
__skb_queue_head_init(&reorder_buf->entries[j]);
|
||||
}
|
||||
}
|
||||
|
||||
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
int tid, u16 ssn, bool start, u8 buf_size)
|
||||
int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
|
||||
{
|
||||
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_mvm_add_sta_cmd cmd = {};
|
||||
struct iwl_mvm_baid_data *baid_data = NULL;
|
||||
int ret;
|
||||
u32 status;
|
||||
|
||||
|
@ -1114,6 +1281,19 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (iwl_mvm_has_new_rx_api(mvm) && start) {
|
||||
/*
|
||||
* Allocate here so if allocation fails we can bail out early
|
||||
* before starting the BA session in the firmware
|
||||
*/
|
||||
baid_data = kzalloc(sizeof(*baid_data) +
|
||||
mvm->trans->num_rx_queues *
|
||||
sizeof(baid_data->reorder_buf[0]),
|
||||
GFP_KERNEL);
|
||||
if (!baid_data)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
|
||||
cmd.sta_id = mvm_sta->sta_id;
|
||||
cmd.add_modify = STA_MODE_MODIFY;
|
||||
|
@ -1132,7 +1312,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
iwl_mvm_add_sta_cmd_size(mvm),
|
||||
&cmd, &status);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_free;
|
||||
|
||||
switch (status & IWL_ADD_STA_STATUS_MASK) {
|
||||
case ADD_STA_SUCCESS:
|
||||
|
@ -1150,14 +1330,75 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
break;
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
if (start)
|
||||
mvm->rx_ba_sessions++;
|
||||
else if (mvm->rx_ba_sessions > 0)
|
||||
/* check that restart flow didn't zero the counter */
|
||||
mvm->rx_ba_sessions--;
|
||||
}
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
if (start) {
|
||||
u8 baid;
|
||||
|
||||
mvm->rx_ba_sessions++;
|
||||
|
||||
if (!iwl_mvm_has_new_rx_api(mvm))
|
||||
return 0;
|
||||
|
||||
if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
|
||||
IWL_ADD_STA_BAID_SHIFT);
|
||||
baid_data->baid = baid;
|
||||
baid_data->timeout = timeout;
|
||||
baid_data->last_rx = jiffies;
|
||||
init_timer(&baid_data->session_timer);
|
||||
baid_data->session_timer.function =
|
||||
iwl_mvm_rx_agg_session_expired;
|
||||
baid_data->session_timer.data =
|
||||
(unsigned long)&mvm->baid_map[baid];
|
||||
baid_data->mvm = mvm;
|
||||
baid_data->tid = tid;
|
||||
baid_data->sta_id = mvm_sta->sta_id;
|
||||
|
||||
mvm_sta->tid_to_baid[tid] = baid;
|
||||
if (timeout)
|
||||
mod_timer(&baid_data->session_timer,
|
||||
TU_TO_EXP_TIME(timeout * 2));
|
||||
|
||||
iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
|
||||
baid_data, ssn, buf_size);
|
||||
/*
|
||||
* protect the BA data with RCU to cover a case where our
|
||||
* internal RX sync mechanism will timeout (not that it's
|
||||
* supposed to happen) and we will free the session data while
|
||||
* RX is being processed in parallel
|
||||
*/
|
||||
WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
|
||||
rcu_assign_pointer(mvm->baid_map[baid], baid_data);
|
||||
} else if (mvm->rx_ba_sessions > 0) {
|
||||
u8 baid = mvm_sta->tid_to_baid[tid];
|
||||
|
||||
/* check that restart flow didn't zero the counter */
|
||||
mvm->rx_ba_sessions--;
|
||||
if (!iwl_mvm_has_new_rx_api(mvm))
|
||||
return 0;
|
||||
|
||||
if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
|
||||
return -EINVAL;
|
||||
|
||||
baid_data = rcu_access_pointer(mvm->baid_map[baid]);
|
||||
if (WARN_ON(!baid_data))
|
||||
return -EINVAL;
|
||||
|
||||
/* synchronize all rx queues so we can safely delete */
|
||||
iwl_mvm_free_reorder(mvm, baid_data);
|
||||
del_timer_sync(&baid_data->session_timer);
|
||||
RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
|
||||
kfree_rcu(baid_data, rcu_head);
|
||||
}
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
kfree(baid_data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1175,7 +1416,9 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
mvm_sta->tfd_queue_msk |= BIT(queue);
|
||||
mvm_sta->tid_disable_agg &= ~BIT(tid);
|
||||
} else {
|
||||
mvm_sta->tfd_queue_msk &= ~BIT(queue);
|
||||
/* In DQA-mode the queue isn't removed on agg termination */
|
||||
if (!iwl_mvm_is_dqa_supported(mvm))
|
||||
mvm_sta->tfd_queue_msk &= ~BIT(queue);
|
||||
mvm_sta->tid_disable_agg |= BIT(tid);
|
||||
}
|
||||
|
||||
|
@ -1258,17 +1501,35 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
|
||||
txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
|
||||
mvm->last_agg_queue);
|
||||
if (txq_id < 0) {
|
||||
ret = txq_id;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
IWL_ERR(mvm, "Failed to allocate agg queue\n");
|
||||
goto release_locks;
|
||||
/*
|
||||
* Note the possible cases:
|
||||
* 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
|
||||
* 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
|
||||
* one and mark it as reserved
|
||||
* 3. In DQA mode, but no traffic yet on this TID: same treatment as in
|
||||
* non-DQA mode, since the TXQ hasn't yet been allocated
|
||||
*/
|
||||
txq_id = mvmsta->tid_data[tid].txq_id;
|
||||
if (!iwl_mvm_is_dqa_supported(mvm) ||
|
||||
mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
|
||||
txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
|
||||
mvm->last_agg_queue);
|
||||
if (txq_id < 0) {
|
||||
ret = txq_id;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
IWL_ERR(mvm, "Failed to allocate agg queue\n");
|
||||
goto release_locks;
|
||||
}
|
||||
|
||||
/* TXQ hasn't yet been enabled, so mark it only as reserved */
|
||||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
|
||||
}
|
||||
mvm->queue_info[txq_id].setup_reserved = true;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"AGG for tid %d will be on queue #%d\n",
|
||||
tid, txq_id);
|
||||
|
||||
tid_data = &mvmsta->tid_data[tid];
|
||||
tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
|
||||
tid_data->txq_id = txq_id;
|
||||
|
@ -1303,6 +1564,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
unsigned int wdg_timeout =
|
||||
iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
|
||||
int queue, ret;
|
||||
bool alloc_queue = true;
|
||||
u16 ssn;
|
||||
|
||||
struct iwl_trans_txq_scd_cfg cfg = {
|
||||
|
@ -1328,8 +1590,46 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
|
||||
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
|
||||
|
||||
iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[tid_to_mac80211_ac[tid]],
|
||||
ssn, &cfg, wdg_timeout);
|
||||
/* In DQA mode, the existing queue might need to be reconfigured */
|
||||
if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
/* Maybe there is no need to even alloc a queue... */
|
||||
if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
|
||||
alloc_queue = false;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/*
|
||||
* Only reconfig the SCD for the queue if the window size has
|
||||
* changed from current (become smaller)
|
||||
*/
|
||||
if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
|
||||
/*
|
||||
* If reconfiguring an existing queue, it first must be
|
||||
* drained
|
||||
*/
|
||||
ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
|
||||
BIT(queue));
|
||||
if (ret) {
|
||||
IWL_ERR(mvm,
|
||||
"Error draining queue before reconfig\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
|
||||
mvmsta->sta_id, tid,
|
||||
buf_size, ssn);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm,
|
||||
"Error reconfiguring TXQ #%d\n", queue);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (alloc_queue)
|
||||
iwl_mvm_enable_txq(mvm, queue,
|
||||
vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
|
||||
&cfg, wdg_timeout);
|
||||
|
||||
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
|
||||
if (ret)
|
||||
|
@ -1337,7 +1637,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
|
||||
/* No need to mark as reserved */
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[queue].setup_reserved = false;
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/*
|
||||
|
@ -1384,9 +1684,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
|
||||
mvmsta->agg_tids &= ~BIT(tid);
|
||||
|
||||
/* No need to mark as reserved anymore */
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[txq_id].setup_reserved = false;
|
||||
/*
|
||||
* The TXQ is marked as reserved only if no traffic came through yet
|
||||
* This means no traffic has been sent on this TID (agg'd or not), so
|
||||
* we no longer have use for the queue. Since it hasn't even been
|
||||
* allocated through iwl_mvm_enable_txq, so we can just mark it back as
|
||||
* free.
|
||||
*/
|
||||
if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
|
||||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
switch (tid_data->state) {
|
||||
|
@ -1412,9 +1719,11 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
|
||||
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
|
||||
|
||||
iwl_mvm_disable_txq(mvm, txq_id,
|
||||
vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
|
||||
0);
|
||||
if (!iwl_mvm_is_dqa_supported(mvm)) {
|
||||
int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
|
||||
|
||||
iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
|
||||
}
|
||||
return 0;
|
||||
case IWL_AGG_STARTING:
|
||||
case IWL_EMPTYING_HW_QUEUE_ADDBA:
|
||||
|
@ -1465,9 +1774,16 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
mvmsta->agg_tids &= ~BIT(tid);
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
|
||||
/* No need to mark as reserved */
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[txq_id].setup_reserved = false;
|
||||
/*
|
||||
* The TXQ is marked as reserved only if no traffic came through yet
|
||||
* This means no traffic has been sent on this TID (agg'd or not), so
|
||||
* we no longer have use for the queue. Since it hasn't even been
|
||||
* allocated through iwl_mvm_enable_txq, so we can just mark it back as
|
||||
* free.
|
||||
*/
|
||||
if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
|
||||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
if (old_state >= IWL_AGG_ON) {
|
||||
|
@ -1480,9 +1796,12 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
|
||||
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
|
||||
|
||||
iwl_mvm_disable_txq(mvm, tid_data->txq_id,
|
||||
vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
|
||||
0);
|
||||
if (!iwl_mvm_is_dqa_supported(mvm)) {
|
||||
int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
|
||||
|
||||
iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
|
||||
tid, 0);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1533,17 +1852,12 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
|
|||
mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
|
||||
u8 sta_id = mvmvif->ap_sta_id;
|
||||
|
||||
sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
/*
|
||||
* It is possible that the 'sta' parameter is NULL,
|
||||
* for example when a GTK is removed - the sta_id will then
|
||||
* be the AP ID, and no station was passed by mac80211.
|
||||
*/
|
||||
if (IS_ERR_OR_NULL(sta))
|
||||
return NULL;
|
||||
|
||||
return iwl_mvm_sta_from_mac80211(sta);
|
||||
return iwl_mvm_sta_from_staid_protected(mvm, sta_id);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
|
|
@ -348,6 +348,15 @@ struct iwl_mvm_key_pn {
|
|||
} ____cacheline_aligned_in_smp q[];
|
||||
};
|
||||
|
||||
struct iwl_mvm_delba_data {
|
||||
u32 baid;
|
||||
} __packed;
|
||||
|
||||
struct iwl_mvm_delba_notif {
|
||||
struct iwl_mvm_internal_rxq_notif metadata;
|
||||
struct iwl_mvm_delba_data delba;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_rxq_dup_data - per station per rx queue data
|
||||
* @last_seq: last sequence per tid for duplicate packet detection
|
||||
|
@ -373,6 +382,7 @@ struct iwl_mvm_rxq_dup_data {
|
|||
* @lock: lock to protect the whole struct. Since %tid_data is access from Tx
|
||||
* and from Tx response flow, it needs a spinlock.
|
||||
* @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
|
||||
* @tid_to_baid: a simple map of TID to baid
|
||||
* @reserved_queue: the queue reserved for this STA for DQA purposes
|
||||
* Every STA has is given one reserved queue to allow it to operate. If no
|
||||
* such queue can be guaranteed, the STA addition will fail.
|
||||
|
@ -406,6 +416,7 @@ struct iwl_mvm_sta {
|
|||
bool next_status_eosp;
|
||||
spinlock_t lock;
|
||||
struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
|
||||
u8 tid_to_baid[IWL_MAX_TID_COUNT];
|
||||
struct iwl_lq_sta lq_sta;
|
||||
struct ieee80211_vif *vif;
|
||||
struct iwl_mvm_key_pn __rcu *ptk_pn[4];
|
||||
|
@ -487,7 +498,7 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
|
|||
|
||||
/* AMPDU */
|
||||
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
int tid, u16 ssn, bool start, u8 buf_size);
|
||||
int tid, u16 ssn, bool start, u8 buf_size, u16 timeout);
|
||||
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
|
||||
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
|
|
|
@ -359,16 +359,14 @@ static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac,
|
|||
|
||||
static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
|
||||
{
|
||||
struct ieee80211_sta *sta;
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
int i, err;
|
||||
|
||||
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
|
||||
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
if (IS_ERR_OR_NULL(sta))
|
||||
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i);
|
||||
if (!mvmsta)
|
||||
continue;
|
||||
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
|
||||
if (enable == mvmsta->tt_tx_protection)
|
||||
continue;
|
||||
err = iwl_mvm_tx_protection(mvm, mvmsta, enable);
|
||||
|
|
|
@ -211,6 +211,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
struct iwl_tx_cmd *tx_cmd,
|
||||
struct ieee80211_tx_info *info, u8 sta_id)
|
||||
{
|
||||
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_hdr *hdr = (void *)skb->data;
|
||||
__le16 fc = hdr->frame_control;
|
||||
u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
|
||||
|
@ -294,7 +295,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
tx_cmd->tx_flags = cpu_to_le32(tx_flags);
|
||||
/* Total # bytes to be transmitted */
|
||||
tx_cmd->len = cpu_to_le16((u16)skb->len +
|
||||
(uintptr_t)info->driver_data[0]);
|
||||
(uintptr_t)skb_info->driver_data[0]);
|
||||
tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
|
||||
tx_cmd->sta_id = sta_id;
|
||||
|
||||
|
@ -442,10 +443,11 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
|
|||
*/
|
||||
static struct iwl_device_cmd *
|
||||
iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
int hdrlen, struct ieee80211_sta *sta, u8 sta_id)
|
||||
struct ieee80211_tx_info *info, int hdrlen,
|
||||
struct ieee80211_sta *sta, u8 sta_id)
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
|
||||
struct iwl_device_cmd *dev_cmd;
|
||||
struct iwl_tx_cmd *tx_cmd;
|
||||
|
||||
|
@ -465,33 +467,52 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
|
||||
iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
|
||||
|
||||
memset(&info->status, 0, sizeof(info->status));
|
||||
memset(info->driver_data, 0, sizeof(info->driver_data));
|
||||
memset(&skb_info->status, 0, sizeof(skb_info->status));
|
||||
memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
|
||||
|
||||
info->driver_data[1] = dev_cmd;
|
||||
skb_info->driver_data[1] = dev_cmd;
|
||||
|
||||
return dev_cmd;
|
||||
}
|
||||
|
||||
static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
|
||||
struct ieee80211_tx_info *info, __le16 fc)
|
||||
{
|
||||
if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
if (info->control.vif->type == NL80211_IFTYPE_AP &&
|
||||
ieee80211_is_probe_resp(fc))
|
||||
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
|
||||
else if (ieee80211_is_mgmt(fc) &&
|
||||
info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE)
|
||||
return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
|
||||
}
|
||||
|
||||
return info->hw_queue;
|
||||
}
|
||||
|
||||
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_tx_info info;
|
||||
struct iwl_device_cmd *dev_cmd;
|
||||
struct iwl_tx_cmd *tx_cmd;
|
||||
u8 sta_id;
|
||||
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
||||
int queue;
|
||||
|
||||
if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU))
|
||||
memcpy(&info, skb->cb, sizeof(info));
|
||||
|
||||
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
|
||||
return -1;
|
||||
|
||||
if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
|
||||
(!info->control.vif ||
|
||||
info->hw_queue != info->control.vif->cab_queue)))
|
||||
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
|
||||
(!info.control.vif ||
|
||||
info.hw_queue != info.control.vif->cab_queue)))
|
||||
return -1;
|
||||
|
||||
/* This holds the amsdu headers length */
|
||||
info->driver_data[0] = (void *)(uintptr_t)0;
|
||||
skb_info->driver_data[0] = (void *)(uintptr_t)0;
|
||||
|
||||
/*
|
||||
* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
|
||||
|
@ -500,9 +521,11 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
* and hence needs to be sent on the aux queue
|
||||
*/
|
||||
if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
|
||||
info->control.vif->type == NL80211_IFTYPE_STATION)
|
||||
info.control.vif->type == NL80211_IFTYPE_STATION)
|
||||
IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
|
||||
|
||||
queue = info.hw_queue;
|
||||
|
||||
/*
|
||||
* If the interface on which the frame is sent is the P2P_DEVICE
|
||||
* or an AP/GO interface use the broadcast station associated
|
||||
|
@ -513,15 +536,17 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
* AUX station.
|
||||
*/
|
||||
sta_id = mvm->aux_sta.sta_id;
|
||||
if (info->control.vif) {
|
||||
if (info.control.vif) {
|
||||
struct iwl_mvm_vif *mvmvif =
|
||||
iwl_mvm_vif_from_mac80211(info->control.vif);
|
||||
iwl_mvm_vif_from_mac80211(info.control.vif);
|
||||
|
||||
if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
|
||||
info->control.vif->type == NL80211_IFTYPE_AP)
|
||||
if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
|
||||
info.control.vif->type == NL80211_IFTYPE_AP) {
|
||||
sta_id = mvmvif->bcast_sta.sta_id;
|
||||
else if (info->control.vif->type == NL80211_IFTYPE_STATION &&
|
||||
is_multicast_ether_addr(hdr->addr1)) {
|
||||
queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
|
||||
hdr->frame_control);
|
||||
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
|
||||
is_multicast_ether_addr(hdr->addr1)) {
|
||||
u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
|
||||
|
||||
if (ap_sta_id != IWL_MVM_STATION_COUNT)
|
||||
|
@ -529,19 +554,18 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
}
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue);
|
||||
IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
|
||||
|
||||
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id);
|
||||
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
|
||||
if (!dev_cmd)
|
||||
return -1;
|
||||
|
||||
/* From now on, we cannot access info->control */
|
||||
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
|
||||
|
||||
/* Copy MAC header from skb into command buffer */
|
||||
memcpy(tx_cmd->hdr, hdr, hdrlen);
|
||||
|
||||
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) {
|
||||
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
|
||||
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
|
||||
return -1;
|
||||
}
|
||||
|
@ -560,11 +584,11 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
|
||||
#ifdef CONFIG_INET
|
||||
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_tx_info *info,
|
||||
struct ieee80211_sta *sta,
|
||||
struct sk_buff_head *mpdus_skb)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_hdr *hdr = (void *)skb->data;
|
||||
unsigned int mss = skb_shinfo(skb)->gso_size;
|
||||
struct sk_buff *tmp, *next;
|
||||
|
@ -585,9 +609,11 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
|
||||
return -EINVAL;
|
||||
|
||||
dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
|
||||
|
||||
if (!sta->max_amsdu_len ||
|
||||
!ieee80211_is_data_qos(hdr->frame_control) ||
|
||||
!mvmsta->tlc_amsdu) {
|
||||
(!mvmsta->tlc_amsdu && !dbg_max_amsdu_len)) {
|
||||
num_subframes = 1;
|
||||
pad = 0;
|
||||
goto segment;
|
||||
|
@ -618,7 +644,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
max_amsdu_len = sta->max_amsdu_len;
|
||||
dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
|
||||
|
||||
/* the Tx FIFO to which this A-MSDU will be routed */
|
||||
txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
|
||||
|
@ -632,7 +657,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
|
||||
mvm->shared_mem_cfg.txfifo_size[txf] - 256);
|
||||
|
||||
if (dbg_max_amsdu_len)
|
||||
if (unlikely(dbg_max_amsdu_len))
|
||||
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
|
||||
dbg_max_amsdu_len);
|
||||
|
||||
|
@ -673,6 +698,8 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
|
||||
/* This skb fits in one single A-MSDU */
|
||||
if (num_subframes * mss >= tcp_payload_len) {
|
||||
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
|
||||
|
||||
/*
|
||||
* Compute the length of all the data added for the A-MSDU.
|
||||
* This will be used to compute the length to write in the TX
|
||||
|
@ -681,11 +708,10 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
* already had one set of SNAP / IP / TCP headers.
|
||||
*/
|
||||
num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
|
||||
info = IEEE80211_SKB_CB(skb);
|
||||
amsdu_add = num_subframes * sizeof(struct ethhdr) +
|
||||
(num_subframes - 1) * (snap_ip_tcp + pad);
|
||||
/* This holds the amsdu headers length */
|
||||
info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
|
||||
skb_info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
|
||||
|
||||
__skb_queue_tail(mpdus_skb, skb);
|
||||
return 0;
|
||||
|
@ -725,11 +751,14 @@ segment:
|
|||
ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
|
||||
|
||||
if (tcp_payload_len > mss) {
|
||||
struct ieee80211_tx_info *skb_info =
|
||||
IEEE80211_SKB_CB(tmp);
|
||||
|
||||
num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
|
||||
info = IEEE80211_SKB_CB(tmp);
|
||||
amsdu_add = num_subframes * sizeof(struct ethhdr) +
|
||||
(num_subframes - 1) * (snap_ip_tcp + pad);
|
||||
info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
|
||||
skb_info->driver_data[0] =
|
||||
(void *)(uintptr_t)amsdu_add;
|
||||
skb_shinfo(tmp)->gso_size = mss;
|
||||
} else {
|
||||
qc = ieee80211_get_qos_ctl((void *)tmp->data);
|
||||
|
@ -751,6 +780,7 @@ segment:
|
|||
}
|
||||
#else /* CONFIG_INET */
|
||||
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_tx_info *info,
|
||||
struct ieee80211_sta *sta,
|
||||
struct sk_buff_head *mpdus_skb)
|
||||
{
|
||||
|
@ -794,10 +824,10 @@ static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
|
|||
* Sets the fields in the Tx cmd that are crypto related
|
||||
*/
|
||||
static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_tx_info *info,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
struct iwl_device_cmd *dev_cmd;
|
||||
struct iwl_tx_cmd *tx_cmd;
|
||||
|
@ -818,7 +848,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
|
||||
return -1;
|
||||
|
||||
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id);
|
||||
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
|
||||
sta, mvmsta->sta_id);
|
||||
if (!dev_cmd)
|
||||
goto drop;
|
||||
|
||||
|
@ -902,7 +933,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
|
||||
spin_unlock(&mvmsta->lock);
|
||||
|
||||
if (txq_id < mvm->first_agg_queue)
|
||||
/* Increase pending frames count if this isn't AMPDU */
|
||||
if (!is_ampdu)
|
||||
atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
|
||||
|
||||
return 0;
|
||||
|
@ -918,7 +950,8 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_tx_info info;
|
||||
struct sk_buff_head mpdus_skbs;
|
||||
unsigned int payload_len;
|
||||
int ret;
|
||||
|
@ -929,21 +962,23 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
|
||||
return -1;
|
||||
|
||||
memcpy(&info, skb->cb, sizeof(info));
|
||||
|
||||
/* This holds the amsdu headers length */
|
||||
info->driver_data[0] = (void *)(uintptr_t)0;
|
||||
skb_info->driver_data[0] = (void *)(uintptr_t)0;
|
||||
|
||||
if (!skb_is_gso(skb))
|
||||
return iwl_mvm_tx_mpdu(mvm, skb, sta);
|
||||
return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
|
||||
|
||||
payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
|
||||
tcp_hdrlen(skb) + skb->data_len;
|
||||
|
||||
if (payload_len <= skb_shinfo(skb)->gso_size)
|
||||
return iwl_mvm_tx_mpdu(mvm, skb, sta);
|
||||
return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
|
||||
|
||||
__skb_queue_head_init(&mpdus_skbs);
|
||||
|
||||
ret = iwl_mvm_tx_tso(mvm, skb, sta, &mpdus_skbs);
|
||||
ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -953,7 +988,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
while (!skb_queue_empty(&mpdus_skbs)) {
|
||||
skb = __skb_dequeue(&mpdus_skbs);
|
||||
|
||||
ret = iwl_mvm_tx_mpdu(mvm, skb, sta);
|
||||
ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
|
||||
if (ret) {
|
||||
__skb_queue_purge(&mpdus_skbs);
|
||||
return ret;
|
||||
|
@ -1147,6 +1182,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
|||
u8 skb_freed = 0;
|
||||
u16 next_reclaimed, seq_ctl;
|
||||
bool is_ndp = false;
|
||||
bool txq_agg = false; /* Is this TXQ aggregated */
|
||||
|
||||
__skb_queue_head_init(&skbs);
|
||||
|
||||
|
@ -1277,6 +1313,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
|||
bool send_eosp_ndp = false;
|
||||
|
||||
spin_lock_bh(&mvmsta->lock);
|
||||
txq_agg = (mvmsta->tid_data[tid].state == IWL_AGG_ON);
|
||||
|
||||
if (!is_ndp) {
|
||||
tid_data->next_reclaimed = next_reclaimed;
|
||||
IWL_DEBUG_TX_REPLY(mvm,
|
||||
|
@ -1332,11 +1370,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
|||
* If the txq is not an AMPDU queue, there is no chance we freed
|
||||
* several skbs. Check that out...
|
||||
*/
|
||||
if (txq_id >= mvm->first_agg_queue)
|
||||
if (txq_agg)
|
||||
goto out;
|
||||
|
||||
/* We can't free more than one frame at once on a shared queue */
|
||||
WARN_ON(skb_freed > 1);
|
||||
WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1));
|
||||
|
||||
/* If we have still frames for this STA nothing to do here */
|
||||
if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
|
||||
|
@ -1430,9 +1468,12 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
|
|||
int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
|
||||
int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
|
||||
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
|
||||
struct ieee80211_sta *sta;
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
int queue = SEQ_TO_QUEUE(sequence);
|
||||
|
||||
if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue))
|
||||
if (WARN_ON_ONCE(queue < mvm->first_agg_queue &&
|
||||
(!iwl_mvm_is_dqa_supported(mvm) ||
|
||||
(queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))))
|
||||
return;
|
||||
|
||||
if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
|
||||
|
@ -1442,10 +1483,9 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
|
|||
|
||||
rcu_read_lock();
|
||||
|
||||
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
|
||||
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
|
||||
|
||||
if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
if (!WARN_ON_ONCE(!mvmsta)) {
|
||||
mvmsta->tid_data[tid].rate_n_flags =
|
||||
le32_to_cpu(tx_resp->initial_rate);
|
||||
mvmsta->tid_data[tid].tx_time =
|
||||
|
|
|
@ -90,11 +90,17 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
|
|||
* the mutex, this ensures we don't try to send two
|
||||
* (or more) synchronous commands at a time.
|
||||
*/
|
||||
if (!(cmd->flags & CMD_ASYNC))
|
||||
if (!(cmd->flags & CMD_ASYNC)) {
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
if (!(cmd->flags & CMD_SEND_IN_IDLE))
|
||||
iwl_mvm_ref(mvm, IWL_MVM_REF_SENDING_CMD);
|
||||
}
|
||||
|
||||
ret = iwl_trans_send_cmd(mvm->trans, cmd);
|
||||
|
||||
if (!(cmd->flags & (CMD_ASYNC | CMD_SEND_IN_IDLE)))
|
||||
iwl_mvm_unref(mvm, IWL_MVM_REF_SENDING_CMD);
|
||||
|
||||
/*
|
||||
* If the caller wants the SKB, then don't hide any problems, the
|
||||
* caller might access the response buffer which will be NULL if
|
||||
|
@ -581,12 +587,45 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq)
|
|||
|
||||
for (i = minq; i <= maxq; i++)
|
||||
if (mvm->queue_info[i].hw_queue_refcount == 0 &&
|
||||
!mvm->queue_info[i].setup_reserved)
|
||||
mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
|
||||
return i;
|
||||
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
|
||||
int tid, int frame_limit, u16 ssn)
|
||||
{
|
||||
struct iwl_scd_txq_cfg_cmd cmd = {
|
||||
.scd_queue = queue,
|
||||
.enable = 1,
|
||||
.window = frame_limit,
|
||||
.sta_id = sta_id,
|
||||
.ssn = cpu_to_le16(ssn),
|
||||
.tx_fifo = fifo,
|
||||
.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
|
||||
queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
|
||||
.tid = tid,
|
||||
};
|
||||
int ret;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
|
||||
"Trying to reconfig unallocated queue %d\n", queue)) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
return -ENXIO;
|
||||
}
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
|
||||
WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
|
||||
queue, fifo, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
|
||||
unsigned int wdg_timeout)
|
||||
|
@ -682,6 +721,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
|||
mvm->queue_info[queue].hw_queue_refcount--;
|
||||
|
||||
cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0;
|
||||
if (!cmd.enable)
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
|
||||
|
|
|
@ -479,23 +479,34 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
|
||||
|
||||
/* 9000 Series */
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9560_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9560_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl5165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9560_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9560_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9560_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
|
||||
#endif /* CONFIG_IWLMVM */
|
||||
|
||||
{0}
|
||||
|
@ -585,6 +596,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
{
|
||||
const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
|
||||
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
|
||||
const struct iwl_cfg *cfg_9260lc __maybe_unused = NULL;
|
||||
struct iwl_trans *iwl_trans;
|
||||
struct iwl_trans_pcie *trans_pcie;
|
||||
int ret;
|
||||
|
@ -612,6 +624,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
cfg = cfg_7265d;
|
||||
iwl_trans->cfg = cfg_7265d;
|
||||
}
|
||||
|
||||
if (iwl_trans->cfg->rf_id) {
|
||||
if (cfg == &iwl9260_2ac_cfg)
|
||||
cfg_9260lc = &iwl9260lc_2ac_cfg;
|
||||
if (cfg_9260lc && iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
|
||||
cfg = cfg_9260lc;
|
||||
iwl_trans->cfg = cfg_9260lc;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
pci_set_drvdata(pdev, iwl_trans);
|
||||
|
|
|
@ -481,9 +481,6 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
|||
struct sk_buff_head *skbs);
|
||||
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
|
||||
|
||||
void iwl_trans_pcie_ref(struct iwl_trans *trans);
|
||||
void iwl_trans_pcie_unref(struct iwl_trans *trans);
|
||||
|
||||
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
|
||||
{
|
||||
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
|
||||
|
|
|
@ -161,10 +161,11 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
|
|||
return cpu_to_le32((u32)(dma_addr >> 8));
|
||||
}
|
||||
|
||||
static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val)
|
||||
static void iwl_pcie_write_prph_64_no_grab(struct iwl_trans *trans, u64 ofs,
|
||||
u64 val)
|
||||
{
|
||||
iwl_write_prph(trans, ofs, val & 0xffffffff);
|
||||
iwl_write_prph(trans, ofs + 4, val >> 32);
|
||||
iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff);
|
||||
iwl_write_prph_no_grab(trans, ofs + 4, val >> 32);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -208,8 +209,8 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
|
|||
|
||||
rxq->write_actual = round_down(rxq->write, 8);
|
||||
if (trans->cfg->mq_rx_supported)
|
||||
iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id),
|
||||
rxq->write_actual);
|
||||
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
|
||||
rxq->write_actual);
|
||||
/*
|
||||
* write to FH_RSCSR_CHNL0_WPTR register even in MQ as a W/A to
|
||||
* hardware shadow registers bug - writing to RFH_Q_FRBDCB_WIDX will
|
||||
|
@ -698,6 +699,7 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
|
|||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
u32 rb_size;
|
||||
unsigned long flags;
|
||||
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
|
||||
|
||||
switch (trans_pcie->rx_buf_size) {
|
||||
|
@ -715,23 +717,26 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
|
|||
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
|
||||
}
|
||||
|
||||
if (!iwl_trans_grab_nic_access(trans, &flags))
|
||||
return;
|
||||
|
||||
/* Stop Rx DMA */
|
||||
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
|
||||
iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
|
||||
/* reset and flush pointers */
|
||||
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
|
||||
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
|
||||
iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
|
||||
iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
|
||||
iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
|
||||
iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
|
||||
|
||||
/* Reset driver's Rx queue write index */
|
||||
iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
|
||||
iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
|
||||
|
||||
/* Tell device where to find RBD circular buffer in DRAM */
|
||||
iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
|
||||
(u32)(rxq->bd_dma >> 8));
|
||||
iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
|
||||
(u32)(rxq->bd_dma >> 8));
|
||||
|
||||
/* Tell device where in DRAM to update its Rx status */
|
||||
iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
|
||||
rxq->rb_stts_dma >> 4);
|
||||
iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
|
||||
rxq->rb_stts_dma >> 4);
|
||||
|
||||
/* Enable Rx DMA
|
||||
* FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
|
||||
|
@ -741,13 +746,15 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
|
|||
* RB timeout 0x10
|
||||
* 256 RBDs
|
||||
*/
|
||||
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
|
||||
FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
|
||||
FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
|
||||
FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
|
||||
rb_size|
|
||||
(RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
|
||||
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
|
||||
iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
|
||||
FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
|
||||
FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
|
||||
FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
|
||||
rb_size |
|
||||
(RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
|
||||
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
|
||||
|
||||
iwl_trans_release_nic_access(trans, &flags);
|
||||
|
||||
/* Set interrupt coalescing timer to default (2048 usecs) */
|
||||
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
|
||||
|
@ -761,6 +768,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
|
|||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
u32 rb_size, enabled = 0;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
switch (trans_pcie->rx_buf_size) {
|
||||
|
@ -778,25 +786,31 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
|
|||
rb_size = RFH_RXF_DMA_RB_SIZE_4K;
|
||||
}
|
||||
|
||||
if (!iwl_trans_grab_nic_access(trans, &flags))
|
||||
return;
|
||||
|
||||
/* Stop Rx DMA */
|
||||
iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
|
||||
iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
|
||||
/* disable free amd used rx queue operation */
|
||||
iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0);
|
||||
iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
|
||||
|
||||
for (i = 0; i < trans->num_rx_queues; i++) {
|
||||
/* Tell device where to find RBD free table in DRAM */
|
||||
iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i),
|
||||
(u64)(trans_pcie->rxq[i].bd_dma));
|
||||
iwl_pcie_write_prph_64_no_grab(trans,
|
||||
RFH_Q_FRBDCB_BA_LSB(i),
|
||||
trans_pcie->rxq[i].bd_dma);
|
||||
/* Tell device where to find RBD used table in DRAM */
|
||||
iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i),
|
||||
(u64)(trans_pcie->rxq[i].used_bd_dma));
|
||||
iwl_pcie_write_prph_64_no_grab(trans,
|
||||
RFH_Q_URBDCB_BA_LSB(i),
|
||||
trans_pcie->rxq[i].used_bd_dma);
|
||||
/* Tell device where in DRAM to update its Rx status */
|
||||
iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i),
|
||||
trans_pcie->rxq[i].rb_stts_dma);
|
||||
iwl_pcie_write_prph_64_no_grab(trans,
|
||||
RFH_Q_URBD_STTS_WPTR_LSB(i),
|
||||
trans_pcie->rxq[i].rb_stts_dma);
|
||||
/* Reset device indice tables */
|
||||
iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0);
|
||||
iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0);
|
||||
iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0);
|
||||
iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
|
||||
iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
|
||||
iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
|
||||
|
||||
enabled |= BIT(i) | BIT(i + 16);
|
||||
}
|
||||
|
@ -812,23 +826,26 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
|
|||
* Drop frames that exceed RB size
|
||||
* 512 RBDs
|
||||
*/
|
||||
iwl_write_prph(trans, RFH_RXF_DMA_CFG,
|
||||
RFH_DMA_EN_ENABLE_VAL |
|
||||
rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
|
||||
RFH_RXF_DMA_MIN_RB_4_8 |
|
||||
RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
|
||||
RFH_RXF_DMA_RBDCB_SIZE_512);
|
||||
iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
|
||||
RFH_DMA_EN_ENABLE_VAL |
|
||||
rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
|
||||
RFH_RXF_DMA_MIN_RB_4_8 |
|
||||
RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
|
||||
RFH_RXF_DMA_RBDCB_SIZE_512);
|
||||
|
||||
/*
|
||||
* Activate DMA snooping.
|
||||
* Set RX DMA chunk size to 64B
|
||||
* Default queue is 0
|
||||
*/
|
||||
iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
|
||||
(DEFAULT_RXQ_NUM << RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
|
||||
RFH_GEN_CFG_SERVICE_DMA_SNOOP);
|
||||
iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
|
||||
(DEFAULT_RXQ_NUM <<
|
||||
RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
|
||||
RFH_GEN_CFG_SERVICE_DMA_SNOOP);
|
||||
/* Enable the relevant rx queues */
|
||||
iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled);
|
||||
iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
|
||||
|
||||
iwl_trans_release_nic_access(trans, &flags);
|
||||
|
||||
/* Set interrupt coalescing timer to default (2048 usecs) */
|
||||
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
|
||||
|
@ -1298,7 +1315,7 @@ static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
|
|||
* write 1 clear (W1C) register, meaning that it's being clear
|
||||
* by writing 1 to the bit.
|
||||
*/
|
||||
iwl_write_direct32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
|
||||
iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1817,13 +1834,13 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
|
|||
lock_map_acquire(&trans->sync_cmd_lockdep_map);
|
||||
|
||||
spin_lock(&trans_pcie->irq_lock);
|
||||
inta_fh = iwl_read_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
|
||||
inta_hw = iwl_read_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
|
||||
inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
|
||||
inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
|
||||
/*
|
||||
* Clear causes registers to avoid being handling the same cause.
|
||||
*/
|
||||
iwl_write_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
|
||||
iwl_write_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
|
||||
iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
|
||||
iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
|
||||
spin_unlock(&trans_pcie->irq_lock);
|
||||
|
||||
if (unlikely(!(inta_fh | inta_hw))) {
|
||||
|
|
|
@ -269,9 +269,8 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
|
|||
iwl_pcie_apm_config(trans);
|
||||
|
||||
/* Configure analog phase-lock-loop before activating to D0A */
|
||||
if (trans->cfg->base_params->pll_cfg_val)
|
||||
iwl_set_bit(trans, CSR_ANA_PLL_CFG,
|
||||
trans->cfg->base_params->pll_cfg_val);
|
||||
if (trans->cfg->base_params->pll_cfg)
|
||||
iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
|
||||
|
||||
/*
|
||||
* Set "initialization complete" bit to move adapter from
|
||||
|
@ -361,8 +360,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
|
|||
|
||||
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
|
||||
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
|
||||
|
||||
udelay(10);
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
/*
|
||||
* Set "initialization complete" bit to move adapter from
|
||||
|
@ -408,8 +406,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
|
|||
* SHRD_HW_RST). Turn MAC off before proceeding.
|
||||
*/
|
||||
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
|
||||
|
||||
udelay(10);
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
/* Enable LP XTAL by indirect access through CSR */
|
||||
apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
|
||||
|
@ -506,8 +503,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
|
|||
|
||||
/* Reset the entire device */
|
||||
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
|
||||
|
||||
udelay(10);
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
/*
|
||||
* Clear "initialization complete" bit to move adapter from
|
||||
|
@ -586,7 +582,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
|
|||
|
||||
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
|
||||
CSR_RESET_LINK_PWR_MGMT_DISABLED);
|
||||
msleep(1);
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
for (iter = 0; iter < 10; iter++) {
|
||||
/* If HW is not ready, prepare the conditions to check again */
|
||||
|
@ -1074,7 +1070,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
|
|||
|
||||
/* stop and reset the on-board processor */
|
||||
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
|
||||
udelay(20);
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
/*
|
||||
* Upon stop, the APM issues an interrupt if HW RF kill is set.
|
||||
|
@ -1526,8 +1522,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
|
|||
|
||||
/* Reset the entire device */
|
||||
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
|
||||
|
||||
usleep_range(10, 15);
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
iwl_pcie_apm_init(trans);
|
||||
|
||||
|
@ -1950,7 +1945,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
|
|||
"WR pointer moved while flushing %d -> %d\n",
|
||||
wr_ptr, write_ptr))
|
||||
return -ETIMEDOUT;
|
||||
msleep(1);
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
||||
if (q->read_ptr != q->write_ptr) {
|
||||
|
@ -2013,7 +2008,7 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
|
|||
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
|
||||
}
|
||||
|
||||
void iwl_trans_pcie_ref(struct iwl_trans *trans)
|
||||
static void iwl_trans_pcie_ref(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
|
@ -2028,7 +2023,7 @@ void iwl_trans_pcie_ref(struct iwl_trans *trans)
|
|||
#endif /* CONFIG_PM */
|
||||
}
|
||||
|
||||
void iwl_trans_pcie_unref(struct iwl_trans *trans)
|
||||
static void iwl_trans_pcie_unref(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
|
@ -2907,6 +2902,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
}
|
||||
}
|
||||
|
||||
trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
|
||||
|
||||
iwl_pcie_set_interrupt_capa(pdev, trans);
|
||||
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
|
||||
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <linux/ieee80211.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <net/ip6_checksum.h>
|
||||
#include <net/tso.h>
|
||||
|
||||
|
@ -605,7 +606,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
|
|||
if (trans_pcie->ref_cmd_in_flight) {
|
||||
trans_pcie->ref_cmd_in_flight = false;
|
||||
IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
|
||||
iwl_trans_pcie_unref(trans);
|
||||
iwl_trans_unref(trans);
|
||||
}
|
||||
|
||||
if (!trans->cfg->base_params->apmg_wake_up_wa)
|
||||
|
@ -650,7 +651,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
|
|||
if (txq_id != trans_pcie->cmd_queue) {
|
||||
IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
|
||||
q->id);
|
||||
iwl_trans_pcie_unref(trans);
|
||||
iwl_trans_unref(trans);
|
||||
} else {
|
||||
iwl_pcie_clear_cmd_in_flight(trans);
|
||||
}
|
||||
|
@ -1134,7 +1135,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
|||
|
||||
if (q->read_ptr == q->write_ptr) {
|
||||
IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
|
||||
iwl_trans_pcie_unref(trans);
|
||||
iwl_trans_unref(trans);
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -1153,7 +1154,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
|
|||
!trans_pcie->ref_cmd_in_flight) {
|
||||
trans_pcie->ref_cmd_in_flight = true;
|
||||
IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
|
||||
iwl_trans_pcie_ref(trans);
|
||||
iwl_trans_ref(trans);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1799,6 +1800,16 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
|
|||
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
|
||||
iwl_get_cmd_string(trans, cmd->id));
|
||||
|
||||
if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
|
||||
ret = wait_event_timeout(trans_pcie->d0i3_waitq,
|
||||
pm_runtime_active(&trans_pcie->pci_dev->dev),
|
||||
msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
|
||||
if (!ret) {
|
||||
IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
|
||||
cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
|
||||
if (cmd_idx < 0) {
|
||||
ret = cmd_idx;
|
||||
|
@ -2362,7 +2373,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
txq->frozen_expiry_remainder = txq->wd_timeout;
|
||||
}
|
||||
IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
|
||||
iwl_trans_pcie_ref(trans);
|
||||
iwl_trans_ref(trans);
|
||||
}
|
||||
|
||||
/* Tell device the write index *just past* this latest filled TFD */
|
||||
|
|
Loading…
Reference in New Issue