iwlwifi patches for 4.18

* implement Traffic Condition Monitor;
 * use TCM for scan and BT coex;
 * use TCM to detect when the AP doesn't support UAPSD properly;
 * some more work for the 22000 family of devices;
 * introduce AMSDU rate control offload;
 * a couple of clean-ups and bugfixes.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEF3LNfgb2BPWm68smoUecoho8xfoFAlrZp0YACgkQoUecoho8
 xfoAEBAApNLOyvo7jxKXIdpHz5eBakFs32JsBEMA3EouUCu5RJbNhNOimUKWK4Vy
 C7NfKxx0MYxO2BlD9ncWRvFFwAWi2MIrMH1KV4X2nhrFOUGWOF9J7M6Lr5rD6/kt
 /440TD2XjygpQamDRNcaKt/Jq1Y3aQAy3AMrHJWEX+QfW1xDAS/Y7S61GJpK7C56
 ROwhGg6d3vPh9Wzs/sI+pbXz90St29aBTwSH8AMvNCA3YYDM4rVjepq97aVEc/MJ
 cWjNh4Wy0dMyjkSip5eh8YlyTI5YSSL8SUcx3KrgMiI26TWVvP+6yvkI50HhvNUt
 mCeO4PzYU1y3JM1lynxNoz+BSTrfloMbc0dEIbqSV41sP55xFM+eDQjd7Q4E8M8E
 zHBTOcSXrV/FMTSC6+TGoAbKIaM1JaQOFE87diUKaEQCRazzEuzt3Er5Rt94N6ng
 d5RPY5IuO87A51RXgFBoYG25Ocj/5oVwILG88uYQ4MjfvqidhyyjCRuOgw3LeMXd
 2MIw3afw6JGvgk0vTB3MY1xw3YoocivPjjxR6psWtx3jHAjYJlevTLQOCd+cfC/U
 JkKkyqN+gTOFpADphH6Yp9LM2yA54G1dxqkRoDB7GWTPldiSgT1DT32dIzt7/gAD
 IsxHn2v3muP1mrCN4rpbZaqdpstCCUQbMU10UagM0KSpZeyViOk=
 =AtBc
 -----END PGP SIGNATURE-----

Merge tag 'iwlwifi-next-for-kalle-2018-04-20' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

iwlwifi patches for 4.18

* implement Traffic Condition Monitor;
* use TCM for scan and BT coex;
* use TCM to detect when the AP doesn't support UAPSD properly;
* some more work for the 22000 family of devices;
* introduce AMSDU rate control offload;
* a couple of clean-ups and bugfixes.
This commit is contained in:
Kalle Valo 2018-04-24 13:00:03 +03:00
commit fb7bcb6b46
42 changed files with 1606 additions and 777 deletions

View File

@ -13,7 +13,7 @@ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += fw/notif-wait.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o fw/nvm.o
iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o

View File

@ -137,7 +137,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.gen2 = true, \
.nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true, \
.tx_cmd_queue_size = 32, \
.min_umac_error_event_table = 0x400000
const struct iwl_cfg iwl22000_2ac_cfg_hr = {

View File

@ -1200,16 +1200,16 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
return -EINVAL;
}
if (!data->sku_cap_11n_enable && !data->sku_cap_band_24GHz_enable &&
!data->sku_cap_band_52GHz_enable) {
if (!data->sku_cap_11n_enable && !data->sku_cap_band_24ghz_enable &&
!data->sku_cap_band_52ghz_enable) {
IWL_ERR(priv, "Invalid device sku\n");
return -EINVAL;
}
IWL_DEBUG_INFO(priv,
"Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n",
data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
data->sku_cap_band_24ghz_enable ? "" : "NOT", "enabled",
data->sku_cap_band_52ghz_enable ? "" : "NOT", "enabled",
data->sku_cap_11n_enable ? "" : "NOT", "enabled");
priv->hw_params.tx_chains_num =

View File

@ -87,11 +87,6 @@ enum iwl_data_path_subcmd_ids {
*/
TLC_MNG_CONFIG_CMD = 0xF,
/**
* @TLC_MNG_NOTIF_REQ_CMD: &struct iwl_tlc_notif_req_config_cmd
*/
TLC_MNG_NOTIF_REQ_CMD = 0x10,
/**
* @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
*/

View File

@ -189,23 +189,37 @@ struct iwl_nvm_get_info_general {
u8 reserved;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */
/**
* enum iwl_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku
* @NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED: true if 2.4 band enabled
* @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled
* @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled
* @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled
* @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
* @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled
* @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled
* @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled
* @NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED: true if API lock enabled
*/
enum iwl_nvm_mac_sku_flags {
NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED = BIT(0),
NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = BIT(1),
NVM_MAC_SKU_FLAGS_802_11N_ENABLED = BIT(2),
NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = BIT(3),
NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = BIT(4),
NVM_MAC_SKU_FLAGS_MIMO_DISABLED = BIT(5),
NVM_MAC_SKU_FLAGS_WAPI_ENABLED = BIT(8),
NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED = BIT(14),
NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED = BIT(15),
};
/**
* struct iwl_nvm_get_info_sku - mac information
* @enable_24g: band 2.4G enabled
* @enable_5g: band 5G enabled
* @enable_11n: 11n enabled
* @enable_11ac: 11ac enabled
* @mimo_disable: MIMO enabled
* @ext_crypto: Extended crypto enabled
* @mac_sku_flags: flags for SKU, see &enum iwl_nvm_mac_sku_flags
*/
struct iwl_nvm_get_info_sku {
__le32 enable_24g;
__le32 enable_5g;
__le32 enable_11n;
__le32 enable_11ac;
__le32 mimo_disable;
__le32 ext_crypto;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_1 */
__le32 mac_sku_flags;
} __packed; /* REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_2 */
/**
* struct iwl_nvm_get_info_phy - phy information
@ -243,7 +257,7 @@ struct iwl_nvm_get_info_rsp {
struct iwl_nvm_get_info_sku mac_sku;
struct iwl_nvm_get_info_phy phy_sku;
struct iwl_nvm_get_info_regulatory regulatory;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_1 */
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_2 */
/**
* struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed

View File

@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,6 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -64,62 +66,38 @@
/**
* enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags
* @IWL_TLC_MNG_CFG_FLAGS_CCK_MSK: CCK support
* @IWL_TLC_MNG_CFG_FLAGS_DD_MSK: enable DD
* @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC
* @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC
* @IWL_TLC_MNG_CFG_FLAGS_BF_MSK: enable BFER
* @IWL_TLC_MNG_CFG_FLAGS_DCM_MSK: enable DCM
*/
enum iwl_tlc_mng_cfg_flags {
IWL_TLC_MNG_CFG_FLAGS_CCK_MSK = BIT(0),
IWL_TLC_MNG_CFG_FLAGS_DD_MSK = BIT(1),
IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(2),
IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(3),
IWL_TLC_MNG_CFG_FLAGS_BF_MSK = BIT(4),
IWL_TLC_MNG_CFG_FLAGS_DCM_MSK = BIT(5),
IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(0),
IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(1),
};
/**
* enum iwl_tlc_mng_cfg_cw - channel width options
* @IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ: 20MHZ channel
* @IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ: 40MHZ channel
* @IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ: 80MHZ channel
* @IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ: 160MHZ channel
* @IWL_TLC_MNG_MAX_CH_WIDTH_LAST: maximum value
* @IWL_TLC_MNG_CH_WIDTH_20MHZ: 20MHZ channel
* @IWL_TLC_MNG_CH_WIDTH_40MHZ: 40MHZ channel
* @IWL_TLC_MNG_CH_WIDTH_80MHZ: 80MHZ channel
* @IWL_TLC_MNG_CH_WIDTH_160MHZ: 160MHZ channel
* @IWL_TLC_MNG_CH_WIDTH_LAST: maximum value
*/
enum iwl_tlc_mng_cfg_cw {
IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ,
IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ,
IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ,
IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ,
IWL_TLC_MNG_MAX_CH_WIDTH_LAST = IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ,
IWL_TLC_MNG_CH_WIDTH_20MHZ,
IWL_TLC_MNG_CH_WIDTH_40MHZ,
IWL_TLC_MNG_CH_WIDTH_80MHZ,
IWL_TLC_MNG_CH_WIDTH_160MHZ,
IWL_TLC_MNG_CH_WIDTH_LAST = IWL_TLC_MNG_CH_WIDTH_160MHZ,
};
/**
* enum iwl_tlc_mng_cfg_chains - possible chains
* @IWL_TLC_MNG_CHAIN_A_MSK: chain A
* @IWL_TLC_MNG_CHAIN_B_MSK: chain B
* @IWL_TLC_MNG_CHAIN_C_MSK: chain C
*/
enum iwl_tlc_mng_cfg_chains {
IWL_TLC_MNG_CHAIN_A_MSK = BIT(0),
IWL_TLC_MNG_CHAIN_B_MSK = BIT(1),
IWL_TLC_MNG_CHAIN_C_MSK = BIT(2),
};
/**
* enum iwl_tlc_mng_cfg_gi - guard interval options
* @IWL_TLC_MNG_SGI_20MHZ_MSK: enable short GI for 20MHZ
* @IWL_TLC_MNG_SGI_40MHZ_MSK: enable short GI for 40MHZ
* @IWL_TLC_MNG_SGI_80MHZ_MSK: enable short GI for 80MHZ
* @IWL_TLC_MNG_SGI_160MHZ_MSK: enable short GI for 160MHZ
*/
enum iwl_tlc_mng_cfg_gi {
IWL_TLC_MNG_SGI_20MHZ_MSK = BIT(0),
IWL_TLC_MNG_SGI_40MHZ_MSK = BIT(1),
IWL_TLC_MNG_SGI_80MHZ_MSK = BIT(2),
IWL_TLC_MNG_SGI_160MHZ_MSK = BIT(3),
};
/**
@ -145,25 +123,7 @@ enum iwl_tlc_mng_cfg_mode {
};
/**
* enum iwl_tlc_mng_vht_he_types - VHT HE types
* @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU: VHT HT single user
* @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT: VHT HT single user extended
* @IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU: VHT HT multiple users
* @IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED: trigger based
* @IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM: a count of possible types
*/
enum iwl_tlc_mng_vht_he_types {
IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU = 0,
IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT,
IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU,
IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED,
IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM =
IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED,
};
/**
* enum iwl_tlc_mng_ht_rates - HT/VHT rates
* enum iwl_tlc_mng_ht_rates - HT/VHT/HE rates
* @IWL_TLC_MNG_HT_RATE_MCS0: index of MCS0
* @IWL_TLC_MNG_HT_RATE_MCS1: index of MCS1
* @IWL_TLC_MNG_HT_RATE_MCS2: index of MCS2
@ -174,6 +134,8 @@ enum iwl_tlc_mng_vht_he_types {
* @IWL_TLC_MNG_HT_RATE_MCS7: index of MCS7
* @IWL_TLC_MNG_HT_RATE_MCS8: index of MCS8
* @IWL_TLC_MNG_HT_RATE_MCS9: index of MCS9
* @IWL_TLC_MNG_HT_RATE_MCS10: index of MCS10
* @IWL_TLC_MNG_HT_RATE_MCS11: index of MCS11
* @IWL_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT
*/
enum iwl_tlc_mng_ht_rates {
@ -187,81 +149,73 @@ enum iwl_tlc_mng_ht_rates {
IWL_TLC_MNG_HT_RATE_MCS7,
IWL_TLC_MNG_HT_RATE_MCS8,
IWL_TLC_MNG_HT_RATE_MCS9,
IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS9,
IWL_TLC_MNG_HT_RATE_MCS10,
IWL_TLC_MNG_HT_RATE_MCS11,
IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS11,
};
/* Maximum supported tx antennas number */
#define MAX_RS_ANT_NUM 3
#define MAX_NSS 2
/**
* struct tlc_config_cmd - TLC configuration
* @sta_id: station id
* @reserved1: reserved
* @max_supp_ch_width: channel width
* @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
* @chains: bitmask of &enum iwl_tlc_mng_cfg_chains
* @max_supp_ss: valid values are 0-3, 0 - spatial streams are not supported
* @valid_vht_he_types: bitmap of &enum iwl_tlc_mng_vht_he_types
* @non_ht_supp_rates: bitmap of supported legacy rates
* @ht_supp_rates: bitmap of supported HT/VHT rates, valid bits are 0-9
* @max_ch_width: max supported channel width from @enum iwl_tlc_mng_cfg_cw
* @mode: &enum iwl_tlc_mng_cfg_mode
* @reserved2: reserved
* @he_supp_rates: bitmap of supported HE rates
* @chains: bitmask of &enum iwl_tlc_mng_cfg_chains
* @amsdu: TX amsdu is supported
* @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
* @non_ht_rates: bitmap of supported legacy rates
* @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width>
* pair (0 - 80mhz width and below, 1 - 160mhz).
* @max_mpdu_len: max MPDU length, in bytes
* @sgi_ch_width_supp: bitmap of SGI support per channel width
* @he_gi_support: 11ax HE guard interval
* @max_ampdu_cnt: max AMPDU size (frames count)
* use BIT(@enum iwl_tlc_mng_cfg_cw)
* @reserved2: reserved
*/
struct iwl_tlc_config_cmd {
u8 sta_id;
u8 reserved1[3];
u8 max_supp_ch_width;
u8 chains;
u8 max_supp_ss;
u8 valid_vht_he_types;
__le16 flags;
__le16 non_ht_supp_rates;
__le16 ht_supp_rates[MAX_RS_ANT_NUM];
u8 max_ch_width;
u8 mode;
u8 reserved2;
__le16 he_supp_rates;
u8 chains;
u8 amsdu;
__le16 flags;
__le16 non_ht_rates;
__le16 ht_rates[MAX_NSS][2];
__le16 max_mpdu_len;
u8 sgi_ch_width_supp;
u8 he_gi_support;
__le32 max_ampdu_cnt;
} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_1 */
#define IWL_TLC_NOTIF_INIT_RATE_POS 0
#define IWL_TLC_NOTIF_INIT_RATE_MSK BIT(IWL_TLC_NOTIF_INIT_RATE_POS)
#define IWL_TLC_NOTIF_REQ_INTERVAL (500)
u8 reserved2[1];
} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_2 */
/**
* struct iwl_tlc_notif_req_config_cmd - request notif on specific changes
* @sta_id: relevant station
* @reserved1: reserved
* @flags: bitmap of requested notifications %IWL_TLC_NOTIF_INIT_\*
* @interval: minimum time between notifications from TLC to the driver (msec)
* @reserved2: reserved
* enum iwl_tlc_update_flags - updated fields
* @IWL_TLC_NOTIF_FLAG_RATE: last initial rate update
* @IWL_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update
*/
struct iwl_tlc_notif_req_config_cmd {
u8 sta_id;
u8 reserved1;
__le16 flags;
__le16 interval;
__le16 reserved2;
} __packed; /* TLC_MNG_NOTIF_REQ_CMD_API_S_VER_1 */
enum iwl_tlc_update_flags {
IWL_TLC_NOTIF_FLAG_RATE = BIT(0),
IWL_TLC_NOTIF_FLAG_AMSDU = BIT(1),
};
/**
* struct iwl_tlc_update_notif - TLC notification from FW
* @sta_id: station id
* @reserved: reserved
* @flags: bitmap of notifications reported
* @values: field per flag in struct iwl_tlc_notif_req_config_cmd
* @rate: current initial rate
* @amsdu_size: Max AMSDU size, in bytes
* @amsdu_enabled: bitmap for per-TID AMSDU enablement
*/
struct iwl_tlc_update_notif {
u8 sta_id;
u8 reserved;
__le16 flags;
__le32 values[16];
} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_1 */
u8 reserved[3];
__le32 flags;
__le32 rate;
__le32 amsdu_size;
__le32 amsdu_enabled;
} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */
/**
* enum iwl_tlc_debug_flags - debug options

View File

@ -131,6 +131,8 @@ enum iwl_tx_queue_cfg_actions {
TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
};
#define IWL_DEFAULT_QUEUE_SIZE 256
#define IWL_MGMT_QUEUE_SIZE 16
/**
* struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
* @sta_id: station id

View File

@ -227,4 +227,40 @@ static inline void iwl_fw_cancel_dump(struct iwl_fw_runtime *fwrt)
cancel_delayed_work_sync(&fwrt->dump.wk);
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt)
{
fwrt->timestamp.delay = 0;
cancel_delayed_work_sync(&fwrt->timestamp.wk);
}
void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay);
static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt)
{
cancel_delayed_work_sync(&fwrt->timestamp.wk);
}
static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt)
{
if (!fwrt->timestamp.delay)
return;
schedule_delayed_work(&fwrt->timestamp.wk,
round_jiffies_relative(fwrt->timestamp.delay));
}
#else
static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {}
static inline void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt,
u32 delay) {}
static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {}
static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
#endif /* CONFIG_IWLWIFI_DEBUGFS */
#endif /* __iwl_fw_dbg_h__ */

View File

@ -64,6 +64,7 @@
*****************************************************************************/
#include "api/commands.h"
#include "debugfs.h"
#include "dbg.h"
#define FWRT_DEBUGFS_READ_FILE_OPS(name) \
static ssize_t iwl_dbgfs_##name##_read(struct iwl_fw_runtime *fwrt, \

View File

@ -69,28 +69,6 @@
int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir);
static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt)
{
fwrt->timestamp.delay = 0;
cancel_delayed_work_sync(&fwrt->timestamp.wk);
}
static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt)
{
cancel_delayed_work_sync(&fwrt->timestamp.wk);
}
static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt)
{
if (!fwrt->timestamp.delay)
return;
schedule_delayed_work(&fwrt->timestamp.wk,
round_jiffies_relative(fwrt->timestamp.delay));
}
void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay);
#else
static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
@ -98,13 +76,4 @@ static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
return 0;
}
static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {}
static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {}
static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
static inline void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt,
u32 delay) {}
#endif /* CONFIG_IWLWIFI_DEBUGFS */

View File

@ -1,162 +0,0 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include "iwl-drv.h"
#include "runtime.h"
#include "fw/api/nvm-reg.h"
#include "fw/api/commands.h"
#include "iwl-nvm-parse.h"
struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt)
{
struct iwl_nvm_get_info cmd = {};
struct iwl_nvm_get_info_rsp *rsp;
struct iwl_trans *trans = fwrt->trans;
struct iwl_nvm_data *nvm;
struct iwl_host_cmd hcmd = {
.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
.data = { &cmd, },
.len = { sizeof(cmd) },
.id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)
};
int ret;
bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
ret = iwl_trans_send_cmd(trans, &hcmd);
if (ret)
return ERR_PTR(ret);
if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp),
"Invalid payload len in NVM response from FW %d",
iwl_rx_packet_payload_len(hcmd.resp_pkt))) {
ret = -EINVAL;
goto out;
}
rsp = (void *)hcmd.resp_pkt->data;
if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP)
IWL_INFO(fwrt, "OTP is empty\n");
nvm = kzalloc(sizeof(*nvm) +
sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
GFP_KERNEL);
if (!nvm) {
ret = -ENOMEM;
goto out;
}
iwl_set_hw_address_from_csr(trans, nvm);
/* TODO: if platform NVM has MAC address - override it here */
if (!is_valid_ether_addr(nvm->hw_addr)) {
IWL_ERR(fwrt, "no valid mac address was found\n");
ret = -EINVAL;
goto err_free;
}
IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr);
/* Initialize general data */
nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version);
/* Initialize MAC sku data */
nvm->sku_cap_11ac_enable =
le32_to_cpu(rsp->mac_sku.enable_11ac);
nvm->sku_cap_11n_enable =
le32_to_cpu(rsp->mac_sku.enable_11n);
nvm->sku_cap_band_24GHz_enable =
le32_to_cpu(rsp->mac_sku.enable_24g);
nvm->sku_cap_band_52GHz_enable =
le32_to_cpu(rsp->mac_sku.enable_5g);
nvm->sku_cap_mimo_disabled =
le32_to_cpu(rsp->mac_sku.mimo_disable);
/* Initialize PHY sku data */
nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains);
nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains);
/* Initialize regulatory data */
nvm->lar_enabled =
le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported;
iwl_init_sbands(trans->dev, trans->cfg, nvm,
rsp->regulatory.channel_profile,
nvm->valid_tx_ant & fwrt->fw->valid_tx_ant,
nvm->valid_rx_ant & fwrt->fw->valid_rx_ant,
nvm->lar_enabled, false);
iwl_free_resp(&hcmd);
return nvm;
err_free:
kfree(nvm);
out:
iwl_free_resp(&hcmd);
return ERR_PTR(ret);
}
IWL_EXPORT_SYMBOL(iwl_fw_get_nvm);

View File

@ -170,6 +170,5 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt);
void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt,
struct iwl_rx_cmd_buffer *rxb);
struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt);
#endif /* __iwl_fw_runtime_h__ */

View File

@ -151,6 +151,8 @@ enum iwl_nvm_type {
#define ANT_AC (ANT_A | ANT_C)
#define ANT_BC (ANT_B | ANT_C)
#define ANT_ABC (ANT_A | ANT_B | ANT_C)
#define MAX_ANT_NUM 3
static inline u8 num_of_ant(u8 mask)
{
@ -333,8 +335,6 @@ struct iwl_pwr_tx_backoff {
* @gen2: 22000 and on transport operation
* @cdb: CDB support
* @nvm_type: see &enum iwl_nvm_type
* @tx_cmd_queue_size: size of the cmd queue. If zero, use the same value as
* the regular queues
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@ -386,7 +386,6 @@ struct iwl_cfg {
gen2:1,
cdb:1,
dbgc_supported:1;
u16 tx_cmd_queue_size;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;

View File

@ -899,8 +899,8 @@ iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
EEPROM_SKU_CAP);
data->sku_cap_11n_enable = sku & EEPROM_SKU_CAP_11N_ENABLE;
data->sku_cap_amt_enable = sku & EEPROM_SKU_CAP_AMT_ENABLE;
data->sku_cap_band_24GHz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ;
data->sku_cap_band_52GHz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ;
data->sku_cap_band_24ghz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ;
data->sku_cap_band_52ghz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ;
data->sku_cap_ipan_enable = sku & EEPROM_SKU_CAP_IPAN_ENABLE;
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
data->sku_cap_11n_enable = false;

View File

@ -81,10 +81,11 @@ struct iwl_nvm_data {
__le16 kelvin_voltage;
__le16 xtal_calib[2];
bool sku_cap_band_24GHz_enable;
bool sku_cap_band_52GHz_enable;
bool sku_cap_band_24ghz_enable;
bool sku_cap_band_52ghz_enable;
bool sku_cap_11n_enable;
bool sku_cap_11ac_enable;
bool sku_cap_11ax_enable;
bool sku_cap_amt_enable;
bool sku_cap_ipan_enable;
bool sku_cap_mimo_disabled;

View File

@ -8,6 +8,7 @@
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -35,6 +36,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -68,6 +70,7 @@
#include <linux/export.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/firmware.h>
#include "iwl-drv.h"
#include "iwl-modparams.h"
@ -76,6 +79,10 @@
#include "iwl-io.h"
#include "iwl-csr.h"
#include "fw/acpi.h"
#include "fw/api/nvm-reg.h"
#include "fw/api/commands.h"
#include "fw/api/cmdhdr.h"
#include "fw/img.h"
/* NVM offsets (in words) definitions */
enum nvm_offsets {
@ -146,8 +153,8 @@ static const u8 iwl_ext_nvm_channels[] = {
149, 153, 157, 161, 165, 169, 173, 177, 181
};
#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
#define IWL_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels)
#define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
#define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels)
#define NUM_2GHZ_CHANNELS 14
#define NUM_2GHZ_CHANNELS_EXT 14
#define FIRST_2GHZ_HT_MINUS 5
@ -291,7 +298,7 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 * const nvm_ch_flags,
bool lar_supported, bool no_wide_in_5ghz)
u32 sbands_flags)
{
int ch_idx;
int n_channels = 0;
@ -301,11 +308,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
const u8 *nvm_chan;
if (cfg->nvm_type != IWL_NVM_EXT) {
num_of_ch = IWL_NUM_CHANNELS;
num_of_ch = IWL_NVM_NUM_CHANNELS;
nvm_chan = &iwl_nvm_channels[0];
num_2ghz_channels = NUM_2GHZ_CHANNELS;
} else {
num_of_ch = IWL_NUM_CHANNELS_EXT;
num_of_ch = IWL_NVM_NUM_CHANNELS_EXT;
nvm_chan = &iwl_ext_nvm_channels[0];
num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT;
}
@ -315,11 +322,12 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
if (is_5ghz && !data->sku_cap_band_52GHz_enable)
if (is_5ghz && !data->sku_cap_band_52ghz_enable)
continue;
/* workaround to disable wide channels in 5GHz */
if (no_wide_in_5ghz && is_5ghz) {
if ((sbands_flags & IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ) &&
is_5ghz) {
ch_flags &= ~(NVM_CHANNEL_40MHZ |
NVM_CHANNEL_80MHZ |
NVM_CHANNEL_160MHZ);
@ -328,7 +336,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
if (ch_flags & NVM_CHANNEL_160MHZ)
data->vht160_supported = true;
if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) {
if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR) &&
!(ch_flags & NVM_CHANNEL_VALID)) {
/*
* Channels might become valid later if lar is
* supported, hence we still want to add them to
@ -358,7 +367,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
/* don't put limitations in case we're using LAR */
if (!lar_supported)
if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR))
channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx],
ch_idx, is_5ghz,
ch_flags, cfg);
@ -454,17 +463,17 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
}
void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data, const __le16 *nvm_ch_flags,
u8 tx_chains, u8 rx_chains, bool lar_supported,
bool no_wide_in_5ghz)
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 *nvm_ch_flags, u8 tx_chains,
u8 rx_chains, u32 sbands_flags)
{
int n_channels;
int n_used = 0;
struct ieee80211_supported_band *sband;
n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags,
lar_supported, no_wide_in_5ghz);
sbands_flags);
sband = &data->bands[NL80211_BAND_2GHZ];
sband->band = NL80211_BAND_2GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
@ -490,7 +499,6 @@ void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
n_used, n_channels);
}
IWL_EXPORT_SYMBOL(iwl_init_sbands);
static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
const __le16 *phy_sku)
@ -568,8 +576,8 @@ static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest)
dest[5] = hw_addr[0];
}
void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
struct iwl_nvm_data *data)
static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
struct iwl_nvm_data *data)
{
__le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP));
__le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP));
@ -587,7 +595,6 @@ void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
}
IWL_EXPORT_SYMBOL(iwl_set_hw_address_from_csr);
static void iwl_set_hw_address_family_8000(struct iwl_trans *trans,
const struct iwl_cfg *cfg,
@ -712,20 +719,20 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct device *dev = trans->dev;
struct iwl_nvm_data *data;
bool lar_enabled;
bool no_wide_in_5ghz = iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw);
u32 sku, radio_cfg;
u32 sbands_flags = 0;
u16 lar_config;
const __le16 *ch_section;
if (cfg->nvm_type != IWL_NVM_EXT)
data = kzalloc(sizeof(*data) +
sizeof(struct ieee80211_channel) *
IWL_NUM_CHANNELS,
IWL_NVM_NUM_CHANNELS,
GFP_KERNEL);
else
data = kzalloc(sizeof(*data) +
sizeof(struct ieee80211_channel) *
IWL_NUM_CHANNELS_EXT,
IWL_NVM_NUM_CHANNELS_EXT,
GFP_KERNEL);
if (!data)
return NULL;
@ -740,8 +747,8 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
rx_chains &= data->valid_rx_ant;
sku = iwl_get_sku(cfg, nvm_sw, phy_sku);
data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
data->sku_cap_band_24ghz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
data->sku_cap_band_52ghz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
data->sku_cap_11n_enable = false;
@ -786,8 +793,14 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
return NULL;
}
if (lar_fw_supported && lar_enabled)
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
if (iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw))
sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ;
iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains,
lar_fw_supported && lar_enabled, no_wide_in_5ghz);
sbands_flags);
data->calib_version = 255;
return data;
@ -859,7 +872,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int valid_rules = 0;
bool new_rule;
int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS;
IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
return ERR_PTR(-EINVAL);
@ -938,3 +951,294 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
return regd;
}
IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
#define IWL_MAX_NVM_SECTION_SIZE 0x1b58
#define IWL_MAX_EXT_NVM_SECTION_SIZE 0x1ffc
#define MAX_NVM_FILE_LEN 16384
void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data,
unsigned int len)
{
#define IWL_4165_DEVICE_ID 0x5501
#define NVM_SKU_CAP_MIMO_DISABLE BIT(5)
if (section == NVM_SECTION_TYPE_PHY_SKU &&
hw_id == IWL_4165_DEVICE_ID && data && len >= 5 &&
(data[4] & NVM_SKU_CAP_MIMO_DISABLE))
/* OTP 0x52 bug work around: it's a 1x1 device */
data[3] = ANT_B | (ANT_B << 4);
}
IWL_EXPORT_SYMBOL(iwl_nvm_fixups);
/*
* Reads external NVM from a file into mvm->nvm_sections
*
* HOW TO CREATE THE NVM FILE FORMAT:
* ------------------------------
* 1. create hex file, format:
* 3800 -> header
* 0000 -> header
* 5a40 -> data
*
* rev - 6 bit (word1)
* len - 10 bit (word1)
* id - 4 bit (word2)
* rsv - 12 bit (word2)
*
* 2. flip 8bits with 8 bits per line to get the right NVM file format
*
* 3. create binary file from the hex file
*
* 4. save as "iNVM_xxx.bin" under /lib/firmware
*/
int iwl_read_external_nvm(struct iwl_trans *trans,
const char *nvm_file_name,
struct iwl_nvm_section *nvm_sections)
{
int ret, section_size;
u16 section_id;
const struct firmware *fw_entry;
const struct {
__le16 word1;
__le16 word2;
u8 data[];
} *file_sec;
const u8 *eof;
u8 *temp;
int max_section_size;
const __le32 *dword_buff;
#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
#define NVM_WORD2_ID(x) (x >> 12)
#define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8))
#define EXT_NVM_WORD1_ID(x) ((x) >> 4)
#define NVM_HEADER_0 (0x2A504C54)
#define NVM_HEADER_1 (0x4E564D2A)
#define NVM_HEADER_SIZE (4 * sizeof(u32))
IWL_DEBUG_EEPROM(trans->dev, "Read from external NVM\n");
/* Maximal size depends on NVM version */
if (trans->cfg->nvm_type != IWL_NVM_EXT)
max_section_size = IWL_MAX_NVM_SECTION_SIZE;
else
max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE;
/*
* Obtain NVM image via request_firmware. Since we already used
* request_firmware_nowait() for the firmware binary load and only
* get here after that we assume the NVM request can be satisfied
* synchronously.
*/
ret = request_firmware(&fw_entry, nvm_file_name, trans->dev);
if (ret) {
IWL_ERR(trans, "ERROR: %s isn't available %d\n",
nvm_file_name, ret);
return ret;
}
IWL_INFO(trans, "Loaded NVM file %s (%zu bytes)\n",
nvm_file_name, fw_entry->size);
if (fw_entry->size > MAX_NVM_FILE_LEN) {
IWL_ERR(trans, "NVM file too large\n");
ret = -EINVAL;
goto out;
}
eof = fw_entry->data + fw_entry->size;
dword_buff = (__le32 *)fw_entry->data;
/* some NVM file will contain a header.
* The header is identified by 2 dwords header as follow:
* dword[0] = 0x2A504C54
* dword[1] = 0x4E564D2A
*
* This header must be skipped when providing the NVM data to the FW.
*/
if (fw_entry->size > NVM_HEADER_SIZE &&
dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE);
IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
IWL_INFO(trans, "NVM Manufacturing date %08X\n",
le32_to_cpu(dword_buff[3]));
/* nvm file validation, dword_buff[2] holds the file version */
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
CSR_HW_REV_STEP(trans->hw_rev) == SILICON_C_STEP &&
le32_to_cpu(dword_buff[2]) < 0xE4A) {
ret = -EFAULT;
goto out;
}
} else {
file_sec = (void *)fw_entry->data;
}
while (true) {
if (file_sec->data > eof) {
IWL_ERR(trans,
"ERROR - NVM file too short for section header\n");
ret = -EINVAL;
break;
}
/* check for EOF marker */
if (!file_sec->word1 && !file_sec->word2) {
ret = 0;
break;
}
if (trans->cfg->nvm_type != IWL_NVM_EXT) {
section_size =
2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
} else {
section_size = 2 * EXT_NVM_WORD2_LEN(
le16_to_cpu(file_sec->word2));
section_id = EXT_NVM_WORD1_ID(
le16_to_cpu(file_sec->word1));
}
if (section_size > max_section_size) {
IWL_ERR(trans, "ERROR - section too large (%d)\n",
section_size);
ret = -EINVAL;
break;
}
if (!section_size) {
IWL_ERR(trans, "ERROR - section empty\n");
ret = -EINVAL;
break;
}
if (file_sec->data + section_size > eof) {
IWL_ERR(trans,
"ERROR - NVM file too short for section (%d bytes)\n",
section_size);
ret = -EINVAL;
break;
}
if (WARN(section_id >= NVM_MAX_NUM_SECTIONS,
"Invalid NVM section ID %d\n", section_id)) {
ret = -EINVAL;
break;
}
temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
if (!temp) {
ret = -ENOMEM;
break;
}
iwl_nvm_fixups(trans->hw_id, section_id, temp, section_size);
kfree(nvm_sections[section_id].data);
nvm_sections[section_id].data = temp;
nvm_sections[section_id].length = section_size;
/* advance to the next section */
file_sec = (void *)(file_sec->data + section_size);
}
out:
release_firmware(fw_entry);
return ret;
}
IWL_EXPORT_SYMBOL(iwl_read_external_nvm);
struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
const struct iwl_fw *fw)
{
struct iwl_nvm_get_info cmd = {};
struct iwl_nvm_get_info_rsp *rsp;
struct iwl_nvm_data *nvm;
struct iwl_host_cmd hcmd = {
.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
.data = { &cmd, },
.len = { sizeof(cmd) },
.id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)
};
int ret;
bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
fw_has_capa(&fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
u32 mac_flags;
u32 sbands_flags = 0;
ret = iwl_trans_send_cmd(trans, &hcmd);
if (ret)
return ERR_PTR(ret);
if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp),
"Invalid payload len in NVM response from FW %d",
iwl_rx_packet_payload_len(hcmd.resp_pkt))) {
ret = -EINVAL;
goto out;
}
rsp = (void *)hcmd.resp_pkt->data;
if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP)
IWL_INFO(trans, "OTP is empty\n");
nvm = kzalloc(sizeof(*nvm) +
sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
GFP_KERNEL);
if (!nvm) {
ret = -ENOMEM;
goto out;
}
iwl_set_hw_address_from_csr(trans, nvm);
/* TODO: if platform NVM has MAC address - override it here */
if (!is_valid_ether_addr(nvm->hw_addr)) {
IWL_ERR(trans, "no valid mac address was found\n");
ret = -EINVAL;
goto err_free;
}
IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr);
/* Initialize general data */
nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version);
/* Initialize MAC sku data */
mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags);
nvm->sku_cap_11ac_enable =
!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
nvm->sku_cap_11n_enable =
!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
nvm->sku_cap_band_24ghz_enable =
!!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
nvm->sku_cap_band_52ghz_enable =
!!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
nvm->sku_cap_mimo_disabled =
!!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
/* Initialize PHY sku data */
nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains);
nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains);
if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) {
nvm->lar_enabled = true;
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
}
iwl_init_sbands(trans->dev, trans->cfg, nvm,
rsp->regulatory.channel_profile,
nvm->valid_tx_ant & fw->valid_tx_ant,
nvm->valid_rx_ant & fw->valid_rx_ant,
sbands_flags);
iwl_free_resp(&hcmd);
return nvm;
err_free:
kfree(nvm);
out:
iwl_free_resp(&hcmd);
return ERR_PTR(ret);
}
IWL_EXPORT_SYMBOL(iwl_get_nvm);

View File

@ -7,6 +7,7 @@
*
* Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -33,6 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -67,6 +69,17 @@
#include <net/cfg80211.h>
#include "iwl-eeprom-parse.h"
/**
* enum iwl_nvm_sbands_flags - modification flags for the channel profiles
*
* @IWL_NVM_SBANDS_FLAGS_LAR: LAR is enabled
* @IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ: disallow 40, 80 and 160MHz on 5GHz
*/
enum iwl_nvm_sbands_flags {
IWL_NVM_SBANDS_FLAGS_LAR = BIT(0),
IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ = BIT(1),
};
/**
* iwl_parse_nvm_data - parse NVM data and return values
*
@ -82,20 +95,6 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const __le16 *mac_override, const __le16 *phy_sku,
u8 tx_chains, u8 rx_chains, bool lar_fw_supported);
/**
* iwl_set_hw_address_from_csr - sets HW address for 9000 devices and on
*/
void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
struct iwl_nvm_data *data);
/**
* iwl_init_sbands - parse and set all channel profiles
*/
void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data, const __le16 *nvm_ch_flags,
u8 tx_chains, u8 rx_chains, bool lar_supported,
bool no_wide_in_5ghz);
/**
* iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
*
@ -109,4 +108,33 @@ struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc);
/**
* struct iwl_nvm_section - describes an NVM section in memory.
*
* This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD,
* and saved for later use by the driver. Not all NVM sections are saved
* this way, only the needed ones.
*/
struct iwl_nvm_section {
u16 length;
const u8 *data;
};
/**
* iwl_read_external_nvm - Reads external NVM from a file into nvm_sections
*/
int iwl_read_external_nvm(struct iwl_trans *trans,
const char *nvm_file_name,
struct iwl_nvm_section *nvm_sections);
void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data,
unsigned int len);
/**
* iwl_get_nvm - retrieve NVM data from firmware
*
* Allocates a new iwl_nvm_data structure, fills it with
* NVM data, and returns it to caller.
*/
struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
const struct iwl_fw *fw);
#endif /* __iwl_nvm_parse_h__ */

View File

@ -554,7 +554,7 @@ struct iwl_trans_ops {
/* 22000 functions */
int (*txq_alloc)(struct iwl_trans *trans,
struct iwl_tx_queue_cfg_cmd *cmd,
int cmd_id,
int cmd_id, int size,
unsigned int queue_wdg_timeout);
void (*txq_free)(struct iwl_trans *trans, int queue);
@ -952,8 +952,8 @@ iwl_trans_txq_free(struct iwl_trans *trans, int queue)
static inline int
iwl_trans_txq_alloc(struct iwl_trans *trans,
struct iwl_tx_queue_cfg_cmd *cmd,
int cmd_id,
unsigned int queue_wdg_timeout)
int cmd_id, int size,
unsigned int wdg_timeout)
{
might_sleep();
@ -965,7 +965,7 @@ iwl_trans_txq_alloc(struct iwl_trans *trans,
return -EIO;
}
return trans->ops->txq_alloc(trans, cmd, cmd_id, queue_wdg_timeout);
return trans->ops->txq_alloc(trans, cmd, cmd_id, size, wdg_timeout);
}
static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,

View File

@ -279,6 +279,8 @@ struct iwl_bt_iterator_data {
struct ieee80211_chanctx_conf *primary;
struct ieee80211_chanctx_conf *secondary;
bool primary_ll;
u8 primary_load;
u8 secondary_load;
};
static inline
@ -295,6 +297,30 @@ void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
}
#define MVM_COEX_TCM_PERIOD (HZ * 10)
static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm,
struct iwl_bt_iterator_data *data)
{
unsigned long now = jiffies;
if (!time_after(now, mvm->bt_coex_last_tcm_ts + MVM_COEX_TCM_PERIOD))
return;
mvm->bt_coex_last_tcm_ts = now;
/* We assume here that we don't have more than 2 vifs on 2.4GHz */
/* if the primary is low latency, it will stay primary */
if (data->primary_ll)
return;
if (data->primary_load >= data->secondary_load)
return;
swap(data->primary, data->secondary);
}
/* must be called under rcu_read_lock */
static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
@ -385,6 +411,11 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
/* there is low latency vif - we will be secondary */
data->secondary = chanctx_conf;
}
if (data->primary == chanctx_conf)
data->primary_load = mvm->tcm.result.load[mvmvif->id];
else if (data->secondary == chanctx_conf)
data->secondary_load = mvm->tcm.result.load[mvmvif->id];
return;
}
@ -398,6 +429,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
/* if secondary is not NULL, it might be a GO */
data->secondary = chanctx_conf;
if (data->primary == chanctx_conf)
data->primary_load = mvm->tcm.result.load[mvmvif->id];
else if (data->secondary == chanctx_conf)
data->secondary_load = mvm->tcm.result.load[mvmvif->id];
/*
* don't reduce the Tx power if one of these is true:
* we are in LOOSE
@ -449,6 +484,8 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bt_notif_iterator, &data);
iwl_mvm_bt_coex_tcm_based_ci(mvm, &data);
if (data.primary) {
struct ieee80211_chanctx_conf *chan = data.primary;
if (WARN_ON(!chan->def.chan)) {

View File

@ -69,6 +69,8 @@
#include <linux/ieee80211.h>
#define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20
#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
@ -112,6 +114,11 @@
#define IWL_MVM_PARSE_NVM 0
#define IWL_MVM_ADWELL_ENABLE 1
#define IWL_MVM_ADWELL_MAX_BUDGET 0
#define IWL_MVM_TCM_LOAD_MEDIUM_THRESH 10 /* percentage */
#define IWL_MVM_TCM_LOAD_HIGH_THRESH 50 /* percentage */
#define IWL_MVM_TCM_LOWLAT_ENABLE_THRESH 100 /* packets/10 seconds */
#define IWL_MVM_UAPSD_NONAGG_PERIOD 5000 /* msecs */
#define IWL_MVM_UAPSD_NOAGG_LIST_LEN IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1

View File

@ -1097,6 +1097,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
/* make sure the d0i3 exit work is not pending */
flush_work(&mvm->d0i3_exit_work);
iwl_mvm_pause_tcm(mvm, true);
iwl_fw_runtime_suspend(&mvm->fwrt);
@ -2014,6 +2015,8 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
iwl_mvm_resume_tcm(mvm);
iwl_fw_runtime_resume(&mvm->fwrt);
return ret;
@ -2042,6 +2045,8 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
iwl_mvm_pause_tcm(mvm, true);
iwl_fw_runtime_suspend(&mvm->fwrt);
/* start pseudo D3 */
@ -2104,6 +2109,8 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
__iwl_mvm_resume(mvm, true);
rtnl_unlock();
iwl_mvm_resume_tcm(mvm);
iwl_fw_runtime_resume(&mvm->fwrt);
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;

View File

@ -269,6 +269,8 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
mvmvif->id, mvmvif->color);
pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
vif->bss_conf.bssid);
pos += scnprintf(buf+pos, bufsz-pos, "Load: %d\n",
mvm->tcm.result.load[mvmvif->id]);
pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n");
for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++)
pos += scnprintf(buf+pos, bufsz-pos,

View File

@ -1728,6 +1728,27 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
return ret ?: count;
}
static ssize_t
iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
u8 buf[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM * ETH_ALEN * 3 + 1];
unsigned int pos = 0;
size_t bufsz = sizeof(buf);
int i;
mutex_lock(&mvm->mutex);
for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++)
pos += scnprintf(buf + pos, bufsz - pos, "%pM\n",
mvm->uapsd_noagg_bssids[i].addr);
mutex_unlock(&mvm->mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
/* Device wide debugfs entries */
@ -1762,6 +1783,8 @@ MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
(IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512);
MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids);
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
@ -1972,6 +1995,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
mvm->debugfs_dir, &mvm->drop_bcn_ap_mode))
goto err;
MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
bcast_dir = debugfs_create_dir("bcast_filtering",

View File

@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -35,6 +36,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -79,6 +81,8 @@
#include "mvm.h"
#include "fw/dbg.h"
#include "iwl-phy-db.h"
#include "iwl-modparams.h"
#include "iwl-nvm-parse.h"
#define MVM_UCODE_ALIVE_TIMEOUT HZ
#define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
@ -381,7 +385,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
/* Load NVM to NIC if needed */
if (mvm->nvm_file_name) {
iwl_mvm_read_external_nvm(mvm);
iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
mvm->nvm_sections);
iwl_mvm_load_nvm_to_nic(mvm);
}
@ -410,7 +415,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
/* Read the NVM only at driver load time, no need to do this twice */
if (!IWL_MVM_PARSE_NVM && read_nvm) {
mvm->nvm_data = iwl_fw_get_nvm(&mvm->fwrt);
mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw);
if (IS_ERR(mvm->nvm_data)) {
ret = PTR_ERR(mvm->nvm_data);
mvm->nvm_data = NULL;
@ -1093,6 +1098,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET;
ret = iwl_mvm_config_scan(mvm);
if (ret)
goto error;

View File

@ -952,6 +952,16 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id ==
iwl_mvm_sta_from_mac80211(sta)->sta_id) {
struct iwl_mvm_vif *mvmvif;
u16 macid = iwl_mvm_vif_from_mac80211(vif)->id;
struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid];
mdata->opened_rx_ba_sessions = true;
mvmvif = iwl_mvm_vif_from_mac80211(vif);
cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk);
}
if (!iwl_enable_rx_ampdu(mvm->cfg)) {
ret = -EINVAL;
break;
@ -1435,6 +1445,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mvm->p2p_device_vif = vif;
}
iwl_mvm_tcm_add_vif(mvm, vif);
if (vif->type == NL80211_IFTYPE_MONITOR)
mvm->monitor_on = true;
@ -1486,6 +1498,10 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
iwl_mvm_prepare_mac_removal(mvm, vif);
if (!(vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC))
iwl_mvm_tcm_rm_vif(mvm, vif);
mutex_lock(&mvm->mutex);
if (mvm->bf_allowed_vif == mvmvif) {
@ -2535,6 +2551,16 @@ static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
const u8 *bssid)
{
int i;
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
struct iwl_mvm_tcm_mac *mdata;
mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id];
ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
mdata->opened_rx_ba_sessions = false;
}
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
return;
@ -2549,6 +2575,13 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return;
}
for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) {
if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) {
vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
return;
}
}
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
}

View File

@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -35,6 +36,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -90,7 +92,9 @@
#include "fw/runtime.h"
#include "fw/dbg.h"
#include "fw/acpi.h"
#include "fw/debugfs.h"
#include "iwl-nvm-parse.h"
#include <linux/average.h>
#define IWL_MVM_MAX_ADDRESSES 5
/* RSSI offset for WkP */
@ -444,6 +448,8 @@ struct iwl_mvm_vif {
/* FW identified misbehaving AP */
u8 uapsd_misbehaving_bssid[ETH_ALEN];
struct delayed_work uapsd_nonagg_detected_wk;
/* Indicates that CSA countdown may be started */
bool csa_countdown;
bool csa_failed;
@ -502,18 +508,6 @@ enum iwl_mvm_sched_scan_pass_all_states {
SCHED_SCAN_PASS_ALL_FOUND,
};
/**
* struct iwl_nvm_section - describes an NVM section in memory.
*
* This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD,
* and saved for later use by the driver. Not all NVM sections are saved
* this way, only the needed ones.
*/
struct iwl_nvm_section {
u16 length;
const u8 *data;
};
/**
* struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
* @ct_kill_exit: worker to exit thermal kill
@ -595,6 +589,53 @@ enum iwl_mvm_tdls_cs_state {
IWL_MVM_TDLS_SW_ACTIVE,
};
enum iwl_mvm_traffic_load {
IWL_MVM_TRAFFIC_LOW,
IWL_MVM_TRAFFIC_MEDIUM,
IWL_MVM_TRAFFIC_HIGH,
};
DECLARE_EWMA(rate, 16, 16)
struct iwl_mvm_tcm_mac {
struct {
u32 pkts[IEEE80211_NUM_ACS];
u32 airtime;
} tx;
struct {
u32 pkts[IEEE80211_NUM_ACS];
u32 airtime;
u32 last_ampdu_ref;
} rx;
struct {
/* track AP's transfer in client mode */
u64 rx_bytes;
struct ewma_rate rate;
bool detected;
} uapsd_nonagg_detect;
bool opened_rx_ba_sessions;
};
struct iwl_mvm_tcm {
struct delayed_work work;
spinlock_t lock; /* used when time elapsed */
unsigned long ts; /* timestamp when period ends */
unsigned long ll_ts;
unsigned long uapsd_nonagg_ts;
bool paused;
struct iwl_mvm_tcm_mac data[NUM_MAC_INDEX_DRIVER];
struct {
u32 elapsed; /* milliseconds for this TCM period */
u32 airtime[NUM_MAC_INDEX_DRIVER];
enum iwl_mvm_traffic_load load[NUM_MAC_INDEX_DRIVER];
enum iwl_mvm_traffic_load band_load[NUM_NL80211_BANDS];
enum iwl_mvm_traffic_load global_load;
bool low_latency[NUM_MAC_INDEX_DRIVER];
bool change[NUM_MAC_INDEX_DRIVER];
bool global_change;
} result;
};
/**
* struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer
* @head_sn: reorder window head sn
@ -829,7 +870,10 @@ struct iwl_mvm {
unsigned int scan_status;
void *scan_cmd;
struct iwl_mcast_filter_cmd *mcast_filter_cmd;
/* For CDB this is low band scan type, for non-CDB - type. */
enum iwl_mvm_scan_type scan_type;
enum iwl_mvm_scan_type hb_scan_type;
enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
struct delayed_work scan_timeout_dwork;
@ -978,6 +1022,13 @@ struct iwl_mvm {
*/
bool temperature_test; /* Debug test temperature is enabled */
unsigned long bt_coex_last_tcm_ts;
struct iwl_mvm_tcm tcm;
u8 uapsd_noagg_bssid_write_idx;
struct mac_address uapsd_noagg_bssids[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM]
__aligned(2);
struct iwl_time_quota_cmd last_quota_cmd;
#ifdef CONFIG_NL80211_TESTMODE
@ -1293,6 +1344,16 @@ static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_CDB_SUPPORT);
}
static inline bool iwl_mvm_cdb_scan_api(struct iwl_mvm *mvm)
{
/*
* TODO: should this be the same as iwl_mvm_is_cdb_supported()?
* but then there's a little bit of code in scan that won't make
* any sense...
*/
return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000;
}
static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
{
return fw_has_api(&mvm->fw->ucode_capa,
@ -1438,7 +1499,6 @@ void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
/* NVM */
int iwl_nvm_init(struct iwl_mvm *mvm);
int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm);
static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm)
{
@ -1771,6 +1831,8 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum iwl_mvm_low_latency_cause cause);
/* get SystemLowLatencyMode - only needed for beacon threshold? */
bool iwl_mvm_low_latency(struct iwl_mvm *mvm);
bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band);
/* get VMACLowLatencyMode */
static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
{
@ -1906,6 +1968,17 @@ bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
#define MVM_TCM_PERIOD_MSEC 500
#define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000)
#define MVM_LL_PERIOD (10 * HZ)
void iwl_mvm_tcm_work(struct work_struct *work);
void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm);
void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel);
void iwl_mvm_resume_tcm(struct iwl_mvm *mvm);
void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed);
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,

View File

@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -35,6 +36,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -76,9 +78,7 @@
#include "fw/acpi.h"
/* Default NVM size to read */
#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
#define IWL_MAX_NVM_SECTION_SIZE 0x1b58
#define IWL_MAX_EXT_NVM_SECTION_SIZE 0x1ffc
#define IWL_NVM_DEFAULT_CHUNK_SIZE (2 * 1024)
#define NVM_WRITE_OPCODE 1
#define NVM_READ_OPCODE 0
@ -229,19 +229,6 @@ static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section,
return 0;
}
static void iwl_mvm_nvm_fixups(struct iwl_mvm *mvm, unsigned int section,
u8 *data, unsigned int len)
{
#define IWL_4165_DEVICE_ID 0x5501
#define NVM_SKU_CAP_MIMO_DISABLE BIT(5)
if (section == NVM_SECTION_TYPE_PHY_SKU &&
mvm->trans->hw_id == IWL_4165_DEVICE_ID && data && len >= 5 &&
(data[4] & NVM_SKU_CAP_MIMO_DISABLE))
/* OTP 0x52 bug work around: it's a 1x1 device */
data[3] = ANT_B | (ANT_B << 4);
}
/*
* Reads an NVM section completely.
* NICs prior to 7000 family doesn't have a real NVM, but just read
@ -282,7 +269,7 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
offset += ret;
}
iwl_mvm_nvm_fixups(mvm, section, data, offset);
iwl_nvm_fixups(mvm->trans->hw_id, section, data, offset);
IWL_DEBUG_EEPROM(mvm->trans->dev,
"NVM section %d read completed\n", section);
@ -355,184 +342,6 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
lar_enabled);
}
#define MAX_NVM_FILE_LEN 16384
/*
* Reads external NVM from a file into mvm->nvm_sections
*
* HOW TO CREATE THE NVM FILE FORMAT:
* ------------------------------
* 1. create hex file, format:
* 3800 -> header
* 0000 -> header
* 5a40 -> data
*
* rev - 6 bit (word1)
* len - 10 bit (word1)
* id - 4 bit (word2)
* rsv - 12 bit (word2)
*
* 2. flip 8bits with 8 bits per line to get the right NVM file format
*
* 3. create binary file from the hex file
*
* 4. save as "iNVM_xxx.bin" under /lib/firmware
*/
int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
{
int ret, section_size;
u16 section_id;
const struct firmware *fw_entry;
const struct {
__le16 word1;
__le16 word2;
u8 data[];
} *file_sec;
const u8 *eof;
u8 *temp;
int max_section_size;
const __le32 *dword_buff;
#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
#define NVM_WORD2_ID(x) (x >> 12)
#define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8))
#define EXT_NVM_WORD1_ID(x) ((x) >> 4)
#define NVM_HEADER_0 (0x2A504C54)
#define NVM_HEADER_1 (0x4E564D2A)
#define NVM_HEADER_SIZE (4 * sizeof(u32))
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
/* Maximal size depends on NVM version */
if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT)
max_section_size = IWL_MAX_NVM_SECTION_SIZE;
else
max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE;
/*
* Obtain NVM image via request_firmware. Since we already used
* request_firmware_nowait() for the firmware binary load and only
* get here after that we assume the NVM request can be satisfied
* synchronously.
*/
ret = request_firmware(&fw_entry, mvm->nvm_file_name,
mvm->trans->dev);
if (ret) {
IWL_ERR(mvm, "ERROR: %s isn't available %d\n",
mvm->nvm_file_name, ret);
return ret;
}
IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n",
mvm->nvm_file_name, fw_entry->size);
if (fw_entry->size > MAX_NVM_FILE_LEN) {
IWL_ERR(mvm, "NVM file too large\n");
ret = -EINVAL;
goto out;
}
eof = fw_entry->data + fw_entry->size;
dword_buff = (__le32 *)fw_entry->data;
/* some NVM file will contain a header.
* The header is identified by 2 dwords header as follow:
* dword[0] = 0x2A504C54
* dword[1] = 0x4E564D2A
*
* This header must be skipped when providing the NVM data to the FW.
*/
if (fw_entry->size > NVM_HEADER_SIZE &&
dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE);
IWL_INFO(mvm, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
IWL_INFO(mvm, "NVM Manufacturing date %08X\n",
le32_to_cpu(dword_buff[3]));
/* nvm file validation, dword_buff[2] holds the file version */
if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_C_STEP &&
le32_to_cpu(dword_buff[2]) < 0xE4A) {
ret = -EFAULT;
goto out;
}
} else {
file_sec = (void *)fw_entry->data;
}
while (true) {
if (file_sec->data > eof) {
IWL_ERR(mvm,
"ERROR - NVM file too short for section header\n");
ret = -EINVAL;
break;
}
/* check for EOF marker */
if (!file_sec->word1 && !file_sec->word2) {
ret = 0;
break;
}
if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
section_size =
2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
} else {
section_size = 2 * EXT_NVM_WORD2_LEN(
le16_to_cpu(file_sec->word2));
section_id = EXT_NVM_WORD1_ID(
le16_to_cpu(file_sec->word1));
}
if (section_size > max_section_size) {
IWL_ERR(mvm, "ERROR - section too large (%d)\n",
section_size);
ret = -EINVAL;
break;
}
if (!section_size) {
IWL_ERR(mvm, "ERROR - section empty\n");
ret = -EINVAL;
break;
}
if (file_sec->data + section_size > eof) {
IWL_ERR(mvm,
"ERROR - NVM file too short for section (%d bytes)\n",
section_size);
ret = -EINVAL;
break;
}
if (WARN(section_id >= NVM_MAX_NUM_SECTIONS,
"Invalid NVM section ID %d\n", section_id)) {
ret = -EINVAL;
break;
}
temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
if (!temp) {
ret = -ENOMEM;
break;
}
iwl_mvm_nvm_fixups(mvm, section_id, temp, section_size);
kfree(mvm->nvm_sections[section_id].data);
mvm->nvm_sections[section_id].data = temp;
mvm->nvm_sections[section_id].length = section_size;
/* advance to the next section */
file_sec = (void *)(file_sec->data + section_size);
}
out:
release_firmware(fw_entry);
return ret;
}
/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
{
@ -585,7 +394,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
break;
}
iwl_mvm_nvm_fixups(mvm, section, temp, ret);
iwl_nvm_fixups(mvm->trans->hw_id, section, temp, ret);
mvm->nvm_sections[section].data = temp;
mvm->nvm_sections[section].length = ret;
@ -624,14 +433,17 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
/* Only if PNVM selected in the mod param - load external NVM */
if (mvm->nvm_file_name) {
/* read External NVM file from the mod param */
ret = iwl_mvm_read_external_nvm(mvm);
ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
mvm->nvm_sections);
if (ret) {
mvm->nvm_file_name = nvm_file_C;
if ((ret == -EFAULT || ret == -ENOENT) &&
mvm->nvm_file_name) {
/* in case nvm file was failed try again */
ret = iwl_mvm_read_external_nvm(mvm);
ret = iwl_read_external_nvm(mvm->trans,
mvm->nvm_file_name,
mvm->nvm_sections);
if (ret)
return ret;
} else {

View File

@ -250,6 +250,9 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC),
RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC),
RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF,
iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC),
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
@ -667,6 +670,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
spin_lock_init(&mvm->tcm.lock);
INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work);
mvm->tcm.ts = jiffies;
mvm->tcm.ll_ts = jiffies;
mvm->tcm.uapsd_nonagg_ts = jiffies;
INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
/*
@ -859,6 +868,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
cancel_delayed_work_sync(&mvm->tcm.work);
iwl_mvm_tof_clean(mvm);
mutex_destroy(&mvm->mutex);
@ -1026,8 +1037,6 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
iwl_mvm_rx_queue_notif(mvm, rxb, 0);
else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
else if (cmd == WIDE_ID(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF))
iwl_mvm_tlc_update_notif(mvm, pkt);
else
iwl_mvm_rx_common(mvm, rxb, pkt);
}
@ -1432,6 +1441,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
mvm->d0i3_offloading = false;
}
iwl_mvm_pause_tcm(mvm, true);
/* make sure we have no running tx while configuring the seqno */
synchronize_net();
@ -1615,6 +1625,7 @@ out:
/* the FW might have updated the regdomain */
iwl_mvm_update_changed_regdom(mvm);
iwl_mvm_resume_tcm(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
mutex_unlock(&mvm->mutex);
}

View File

@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -26,6 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -65,14 +67,14 @@ static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta)
{
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
return IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ;
return IWL_TLC_MNG_CH_WIDTH_160MHZ;
case IEEE80211_STA_RX_BW_80:
return IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ;
return IWL_TLC_MNG_CH_WIDTH_80MHZ;
case IEEE80211_STA_RX_BW_40:
return IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ;
return IWL_TLC_MNG_CH_WIDTH_40MHZ;
case IEEE80211_STA_RX_BW_20:
default:
return IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ;
return IWL_TLC_MNG_CH_WIDTH_20MHZ;
}
}
@ -85,7 +87,9 @@ static u8 rs_fw_set_active_chains(u8 chains)
if (chains & ANT_B)
fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK;
if (chains & ANT_C)
fw_chains |= IWL_TLC_MNG_CHAIN_C_MSK;
WARN(false,
"tlc offload doesn't support antenna C. chains: 0x%x\n",
chains);
return fw_chains;
}
@ -97,13 +101,13 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
u8 supp = 0;
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
supp |= IWL_TLC_MNG_SGI_20MHZ_MSK;
supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
supp |= IWL_TLC_MNG_SGI_40MHZ_MSK;
supp |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ);
if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80)
supp |= IWL_TLC_MNG_SGI_80MHZ_MSK;
supp |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ);
if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160)
supp |= IWL_TLC_MNG_SGI_160MHZ_MSK;
supp |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ);
return supp;
}
@ -114,9 +118,7 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
bool vht_ena = vht_cap && vht_cap->vht_supported;
u16 flags = IWL_TLC_MNG_CFG_FLAGS_CCK_MSK |
IWL_TLC_MNG_CFG_FLAGS_DCM_MSK |
IWL_TLC_MNG_CFG_FLAGS_DD_MSK;
u16 flags = 0;
if (mvm->cfg->ht_params->stbc &&
(num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
@ -129,16 +131,11 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
(vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
(num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
(vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
flags |= IWL_TLC_MNG_CFG_FLAGS_BF_MSK;
return flags;
}
static
int rs_fw_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
int rs_fw_vht_highest_rx_mcs_index(const struct ieee80211_sta_vht_cap *vht_cap,
int nss)
{
u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
@ -160,15 +157,16 @@ int rs_fw_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
return 0;
}
static void rs_fw_vht_set_enabled_rates(struct ieee80211_sta *sta,
struct ieee80211_sta_vht_cap *vht_cap,
struct iwl_tlc_config_cmd *cmd)
static void
rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
const struct ieee80211_sta_vht_cap *vht_cap,
struct iwl_tlc_config_cmd *cmd)
{
u16 supp;
int i, highest_mcs;
for (i = 0; i < sta->rx_nss; i++) {
if (i == MAX_RS_ANT_NUM)
if (i == MAX_NSS)
break;
highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
@ -179,7 +177,9 @@ static void rs_fw_vht_set_enabled_rates(struct ieee80211_sta *sta,
if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
cmd->ht_supp_rates[i] = cpu_to_le16(supp);
cmd->ht_rates[i][0] = cpu_to_le16(supp);
if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
cmd->ht_rates[i][1] = cmd->ht_rates[i][0];
}
}
@ -190,8 +190,8 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
int i;
unsigned long tmp;
unsigned long supp; /* must be unsigned long for for_each_set_bit */
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
/* non HT rates */
supp = 0;
@ -199,45 +199,40 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
for_each_set_bit(i, &tmp, BITS_PER_LONG)
supp |= BIT(sband->bitrates[i].hw_value);
cmd->non_ht_supp_rates = cpu_to_le16(supp);
cmd->non_ht_rates = cpu_to_le16(supp);
cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
/* HT/VHT rates */
if (vht_cap && vht_cap->vht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_VHT;
rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
} else if (ht_cap && ht_cap->ht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_HT;
cmd->ht_supp_rates[0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]);
cmd->ht_supp_rates[1] = cpu_to_le16(ht_cap->mcs.rx_mask[1]);
cmd->ht_rates[0][0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]);
cmd->ht_rates[1][0] = cpu_to_le16(ht_cap->mcs.rx_mask[1]);
}
}
static void rs_fw_tlc_mng_notif_req_config(struct iwl_mvm *mvm, u8 sta_id)
{
u32 cmd_id = iwl_cmd_id(TLC_MNG_NOTIF_REQ_CMD, DATA_PATH_GROUP, 0);
struct iwl_tlc_notif_req_config_cmd cfg_cmd = {
.sta_id = sta_id,
.flags = cpu_to_le16(IWL_TLC_NOTIF_INIT_RATE_MSK),
.interval = cpu_to_le16(IWL_TLC_NOTIF_REQ_INTERVAL),
};
int ret;
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
if (ret)
IWL_ERR(mvm, "Failed to send TLC notif request (%d)\n", ret);
}
void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_tlc_update_notif *notif;
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
struct iwl_lq_sta_rs_fw *lq_sta;
u32 flags;
rcu_read_lock();
notif = (void *)pkt->data;
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, notif->sta_id);
sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
if (IS_ERR_OR_NULL(sta)) {
IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n",
notif->sta_id);
goto out;
}
mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (!mvmsta) {
IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n",
@ -245,14 +240,30 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
goto out;
}
flags = le32_to_cpu(notif->flags);
lq_sta = &mvmsta->lq_sta.rs_fw;
if (le16_to_cpu(notif->flags) & IWL_TLC_NOTIF_INIT_RATE_MSK) {
lq_sta->last_rate_n_flags =
le32_to_cpu(notif->values[IWL_TLC_NOTIF_INIT_RATE_POS]);
if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n",
lq_sta->last_rate_n_flags);
}
if (flags & IWL_TLC_NOTIF_FLAG_AMSDU) {
u16 size = le32_to_cpu(notif->amsdu_size);
if (WARN_ON(sta->max_amsdu_len < size))
goto out;
mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
mvmsta->max_amsdu_len = size;
IWL_DEBUG_RATE(mvm,
"AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n",
le32_to_cpu(notif->amsdu_size), size,
mvmsta->amsdu_enabled);
}
out:
rcu_read_unlock();
}
@ -267,12 +278,12 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband;
struct iwl_tlc_config_cmd cfg_cmd = {
.sta_id = mvmsta->sta_id,
.max_supp_ch_width = rs_fw_bw_from_sta_bw(sta),
.max_ch_width = rs_fw_bw_from_sta_bw(sta),
.flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
.chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
.max_supp_ss = sta->rx_nss,
.max_ampdu_cnt = cpu_to_le32(mvmsta->max_agg_bufsize),
.max_mpdu_len = cpu_to_le16(sta->max_amsdu_len),
.sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
.amsdu = iwl_mvm_is_csum_supported(mvm),
};
int ret;
@ -287,8 +298,6 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
if (ret)
IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
rs_fw_tlc_mng_notif_req_config(mvm, cfg_cmd.sta_id);
}
int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,

View File

@ -1715,12 +1715,18 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
/*
* In case TLC offload is not active amsdu_enabled is either 0xFFFF
* or 0, since there is no per-TID alg.
*/
if ((!is_vht(&tbl->rate) && !is_ht(&tbl->rate)) ||
tbl->rate.index < IWL_RATE_MCS_5_INDEX ||
scale_action == RS_ACTION_DOWNSCALE)
mvmsta->tlc_amsdu = false;
mvmsta->amsdu_enabled = 0;
else
mvmsta->tlc_amsdu = true;
mvmsta->amsdu_enabled = 0xFFFF;
mvmsta->max_amsdu_len = sta->max_amsdu_len;
}
/*
@ -3134,7 +3140,8 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
sband = hw->wiphy->bands[band];
lq_sta->lq.sta_id = mvmsta->sta_id;
mvmsta->tlc_amsdu = false;
mvmsta->amsdu_enabled = 0;
mvmsta->max_amsdu_len = sta->max_amsdu_len;
for (j = 0; j < LQ_SIZE; j++)
rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
@ -3744,7 +3751,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
(rate->sgi) ? "SGI" : "NGI",
(rate->ldpc) ? "LDPC" : "BCC",
(lq_sta->is_agg) ? "AGG on" : "",
(mvmsta->tlc_amsdu) ? "AMSDU on" : "");
(mvmsta->amsdu_enabled) ? "AMSDU on" : "");
}
desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X\n",
lq_sta->last_rate_n_flags);

View File

@ -454,5 +454,6 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum nl80211_band band);
int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool enable);
void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt);
void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
#endif /* __rs__ */

View File

@ -254,6 +254,74 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
return 0;
}
static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct ieee80211_hdr *hdr, u32 len,
struct iwl_rx_phy_info *phy_info,
u32 rate_n_flags)
{
struct iwl_mvm_sta *mvmsta;
struct iwl_mvm_tcm_mac *mdata;
int mac;
int ac = IEEE80211_AC_BE; /* treat non-QoS as BE */
struct iwl_mvm_vif *mvmvif;
/* expected throughput in 100Kbps, single stream, 20 MHz */
static const u8 thresh_tpt[] = {
9, 18, 30, 42, 60, 78, 90, 96, 120, 135,
};
u16 thr;
if (ieee80211_is_data_qos(hdr->frame_control))
ac = tid_to_mac80211_ac[ieee80211_get_tid(hdr)];
mvmsta = iwl_mvm_sta_from_mac80211(sta);
mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
schedule_delayed_work(&mvm->tcm.work, 0);
mdata = &mvm->tcm.data[mac];
mdata->rx.pkts[ac]++;
/* count the airtime only once for each ampdu */
if (mdata->rx.last_ampdu_ref != mvm->ampdu_ref) {
mdata->rx.last_ampdu_ref = mvm->ampdu_ref;
mdata->rx.airtime += le16_to_cpu(phy_info->frame_time);
}
if (!(rate_n_flags & (RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK)))
return;
mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
if (mdata->opened_rx_ba_sessions ||
mdata->uapsd_nonagg_detect.detected ||
(!mvmvif->queue_params[IEEE80211_AC_VO].uapsd &&
!mvmvif->queue_params[IEEE80211_AC_VI].uapsd &&
!mvmvif->queue_params[IEEE80211_AC_BE].uapsd &&
!mvmvif->queue_params[IEEE80211_AC_BK].uapsd) ||
mvmsta->sta_id != mvmvif->ap_sta_id)
return;
if (rate_n_flags & RATE_MCS_HT_MSK) {
thr = thresh_tpt[rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK];
thr *= 1 + ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >>
RATE_HT_MCS_NSS_POS);
} else {
if (WARN_ON((rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK) >=
ARRAY_SIZE(thresh_tpt)))
return;
thr = thresh_tpt[rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK];
thr *= 1 + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS);
}
thr <<= ((rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) >>
RATE_MCS_CHAN_WIDTH_POS);
mdata->uapsd_nonagg_detect.rx_bytes += len;
ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, thr);
}
static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
struct sk_buff *skb,
u32 status)
@ -408,6 +476,12 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
NULL);
}
if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
!is_multicast_ether_addr(hdr->addr1) &&
ieee80211_is_data(hdr->frame_control))
iwl_mvm_rx_handle_tcm(mvm, sta, hdr, len, phy_info,
rate_n_flags);
if (ieee80211_is_data(hdr->frame_control))
iwl_mvm_rx_csum(sta, skb, rx_pkt_status);
}
@ -654,7 +728,8 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
int expected_size;
int i;
u8 *energy;
__le32 *bytes, *air_time;
__le32 *bytes;
__le32 *air_time;
__le32 flags;
if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
@ -752,6 +827,32 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
sta->avg_energy = energy[i];
}
rcu_read_unlock();
/*
* Don't update in case the statistics are not cleared, since
* we will end up counting twice the same airtime, once in TCM
* request and once in statistics notification.
*/
if (!(le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR))
return;
spin_lock(&mvm->tcm.lock);
for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) {
struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[i];
u32 airtime = le32_to_cpu(air_time[i]);
u32 rx_bytes = le32_to_cpu(bytes[i]);
mdata->uapsd_nonagg_detect.rx_bytes += rx_bytes;
if (airtime) {
/* re-init every time to store rate from FW */
ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
ewma_rate_add(&mdata->uapsd_nonagg_detect.rate,
rx_bytes * 8 / airtime);
}
mdata->rx.airtime += airtime;
}
spin_unlock(&mvm->tcm.lock);
}
void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)

View File

@ -112,7 +112,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
return -1;
if (ieee80211_is_data_qos(hdr->frame_control))
tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
tid = ieee80211_get_tid(hdr);
else
tid = 0;
@ -347,8 +347,7 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
if (ieee80211_is_data_qos(hdr->frame_control))
/* frame has qos control */
tid = *ieee80211_get_qos_ctl(hdr) &
IEEE80211_QOS_CTL_TID_MASK;
tid = ieee80211_get_tid(hdr);
else
tid = IWL_MAX_TID_COUNT;
@ -628,7 +627,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
bool last_subframe =
desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
u8 tid = ieee80211_get_tid(hdr);
u8 sub_frame_idx = desc->amsdu_info &
IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
struct iwl_mvm_reorder_buf_entry *entries;
@ -941,6 +940,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
IWL_RX_MPDU_REORDER_BAID_MASK) >>
IWL_RX_MPDU_REORDER_BAID_SHIFT);
if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
!is_multicast_ether_addr(hdr->addr1) &&
ieee80211_is_data(hdr->frame_control) &&
time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
schedule_delayed_work(&mvm->tcm.work, 0);
/*
* We have tx blocked stations (with CS bit). If we heard
* frames from a blocked station on a new channel we can

View File

@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -19,9 +20,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
* along with this program
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
@ -76,12 +75,6 @@
#define IWL_DENSE_EBS_SCAN_RATIO 5
#define IWL_SPARSE_EBS_SCAN_RATIO 1
enum iwl_mvm_traffic_load {
IWL_MVM_TRAFFIC_LOW,
IWL_MVM_TRAFFIC_MEDIUM,
IWL_MVM_TRAFFIC_HIGH,
};
#define IWL_SCAN_DWELL_ACTIVE 10
#define IWL_SCAN_DWELL_PASSIVE 110
#define IWL_SCAN_DWELL_FRAGMENTED 44
@ -123,7 +116,9 @@ static struct iwl_mvm_scan_timing_params scan_timing[] = {
};
struct iwl_mvm_scan_params {
/* For CDB this is low band scan type, for non-CDB - type. */
enum iwl_mvm_scan_type type;
enum iwl_mvm_scan_type hb_type;
u32 n_channels;
u16 delay;
int n_ssids;
@ -152,7 +147,7 @@ static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
if (iwl_mvm_is_adaptive_dwell_supported(mvm))
return (void *)&cmd->v7.data;
if (iwl_mvm_has_new_tx_api(mvm))
if (iwl_mvm_cdb_scan_api(mvm))
return (void *)&cmd->v6.data;
return (void *)&cmd->v1.data;
@ -169,7 +164,7 @@ iwl_mvm_get_scan_req_umac_channel(struct iwl_mvm *mvm)
if (iwl_mvm_is_adaptive_dwell_supported(mvm))
return &cmd->v7.channel;
if (iwl_mvm_has_new_tx_api(mvm))
if (iwl_mvm_cdb_scan_api(mvm))
return &cmd->v6.channel;
return &cmd->v1.channel;
@ -234,15 +229,21 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
{
return IWL_MVM_TRAFFIC_LOW;
return mvm->tcm.result.global_load;
}
static enum iwl_mvm_traffic_load
iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
{
return mvm->tcm.result.band_load[band];
}
static enum
iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
enum iwl_mvm_traffic_load load,
bool low_latency)
{
int global_cnt = 0;
enum iwl_mvm_traffic_load load;
bool low_latency;
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
@ -251,9 +252,6 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
if (!global_cnt)
return IWL_SCAN_TYPE_UNASSOC;
load = iwl_mvm_get_traffic_load(mvm);
low_latency = iwl_mvm_low_latency(mvm);
if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && !p2p_device &&
fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
return IWL_SCAN_TYPE_FRAGMENTED;
@ -264,25 +262,57 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
return IWL_SCAN_TYPE_WILD;
}
static enum
iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
{
enum iwl_mvm_traffic_load load;
bool low_latency;
load = iwl_mvm_get_traffic_load(mvm);
low_latency = iwl_mvm_low_latency(mvm);
return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
}
static enum
iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
bool p2p_device,
enum nl80211_band band)
{
enum iwl_mvm_traffic_load load;
bool low_latency;
load = iwl_mvm_get_traffic_load_band(mvm, band);
low_latency = iwl_mvm_low_latency_band(mvm, band);
return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
}
static int
iwl_mvm_get_measurement_dwell(struct iwl_mvm *mvm,
struct cfg80211_scan_request *req,
struct iwl_mvm_scan_params *params)
{
u32 duration = scan_timing[params->type].max_out_time;
if (!req->duration)
return 0;
if (req->duration_mandatory &&
req->duration > scan_timing[params->type].max_out_time) {
if (iwl_mvm_is_cdb_supported(mvm)) {
u32 hb_time = scan_timing[params->hb_type].max_out_time;
duration = min_t(u32, duration, hb_time);
}
if (req->duration_mandatory && req->duration > duration) {
IWL_DEBUG_SCAN(mvm,
"Measurement scan - too long dwell %hu (max out time %u)\n",
req->duration,
scan_timing[params->type].max_out_time);
duration);
return -EOPNOTSUPP;
}
return min_t(u32, (u32)req->duration,
scan_timing[params->type].max_out_time);
return min_t(u32, (u32)req->duration, duration);
}
static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
@ -437,6 +467,7 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
ieee80211_scan_completed(mvm->hw, &info);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
cancel_delayed_work(&mvm->scan_timeout_dwork);
iwl_mvm_resume_tcm(mvm);
} else {
IWL_ERR(mvm,
"got scan complete notification but no scan is running\n");
@ -1030,22 +1061,38 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
u32 flags, u8 channel_flags)
{
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
struct iwl_scan_config *cfg = config;
cfg->flags = cpu_to_le32(flags);
cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
cfg->out_of_channel_time[0] =
cpu_to_le32(scan_timing[type].max_out_time);
cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time);
if (iwl_mvm_is_cdb_supported(mvm)) {
cfg->suspend_time[1] =
cpu_to_le32(scan_timing[type].suspend_time);
cfg->out_of_channel_time[1] =
enum iwl_mvm_scan_type lb_type, hb_type;
lb_type = iwl_mvm_get_scan_type_band(mvm, false,
NL80211_BAND_2GHZ);
hb_type = iwl_mvm_get_scan_type_band(mvm, false,
NL80211_BAND_5GHZ);
cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
cpu_to_le32(scan_timing[lb_type].max_out_time);
cfg->suspend_time[SCAN_LB_LMAC_IDX] =
cpu_to_le32(scan_timing[lb_type].suspend_time);
cfg->out_of_channel_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(scan_timing[hb_type].max_out_time);
cfg->suspend_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(scan_timing[hb_type].suspend_time);
} else {
enum iwl_mvm_scan_type type =
iwl_mvm_get_scan_type(mvm, false);
cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
cpu_to_le32(scan_timing[type].max_out_time);
cfg->suspend_time[SCAN_LB_LMAC_IDX] =
cpu_to_le32(scan_timing[type].suspend_time);
}
iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
@ -1065,7 +1112,8 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
struct iwl_host_cmd cmd = {
.id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
};
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
enum iwl_mvm_scan_type type;
enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET;
int num_channels =
mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
@ -1075,10 +1123,20 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
return -ENOBUFS;
if (type == mvm->scan_type)
return 0;
if (iwl_mvm_is_cdb_supported(mvm)) {
type = iwl_mvm_get_scan_type_band(mvm, false,
NL80211_BAND_2GHZ);
hb_type = iwl_mvm_get_scan_type_band(mvm, false,
NL80211_BAND_5GHZ);
if (type == mvm->scan_type && hb_type == mvm->hb_scan_type)
return 0;
} else {
type = iwl_mvm_get_scan_type(mvm, false);
if (type == mvm->scan_type)
return 0;
}
if (iwl_mvm_has_new_tx_api(mvm))
if (iwl_mvm_cdb_scan_api(mvm))
cmd_size = sizeof(struct iwl_scan_config);
else
cmd_size = sizeof(struct iwl_scan_config_v1);
@ -1107,10 +1165,15 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
IWL_CHANNEL_FLAG_EBS_ADD |
IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
if (iwl_mvm_has_new_tx_api(mvm)) {
flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ?
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
/*
* Check for fragmented scan on LMAC2 - high band.
* LMAC1 - low band is checked above.
*/
if (iwl_mvm_cdb_scan_api(mvm)) {
if (iwl_mvm_is_cdb_supported(mvm))
flags |= (hb_type == IWL_SCAN_TYPE_FRAGMENTED) ?
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
} else {
iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags);
@ -1123,8 +1186,10 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (!ret)
if (!ret) {
mvm->scan_type = type;
mvm->hb_scan_type = hb_type;
}
kfree(cfg);
return ret;
@ -1178,7 +1243,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cpu_to_le32(timing->suspend_time);
if (iwl_mvm_is_cdb_supported(mvm)) {
hb_timing = &scan_timing[params->type];
hb_timing = &scan_timing[params->hb_type];
cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(hb_timing->max_out_time);
@ -1208,7 +1273,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
if (iwl_mvm_is_cdb_supported(mvm)) {
hb_timing = &scan_timing[params->type];
hb_timing = &scan_timing[params->hb_type];
cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(hb_timing->max_out_time);
@ -1216,7 +1281,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cpu_to_le32(hb_timing->suspend_time);
}
if (iwl_mvm_has_new_tx_api(mvm)) {
if (iwl_mvm_cdb_scan_api(mvm)) {
cmd->v6.scan_priority =
cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] =
@ -1232,6 +1297,11 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cpu_to_le32(timing->suspend_time);
}
}
if (iwl_mvm_is_regular_scan(params))
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
else
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
}
static void
@ -1262,11 +1332,12 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
if (params->type == IWL_SCAN_TYPE_FRAGMENTED) {
if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
if (iwl_mvm_is_cdb_supported(mvm))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
}
if (iwl_mvm_is_cdb_supported(mvm) &&
params->hb_type == IWL_SCAN_TYPE_FRAGMENTED)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm) &&
fw_has_capa(&mvm->fw->ucode_capa,
@ -1497,6 +1568,21 @@ void iwl_mvm_scan_timeout_wk(struct work_struct *work)
iwl_force_nmi(mvm->trans);
}
static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
bool p2p)
{
if (iwl_mvm_is_cdb_supported(mvm)) {
params->type =
iwl_mvm_get_scan_type_band(mvm, p2p,
NL80211_BAND_2GHZ);
params->hb_type =
iwl_mvm_get_scan_type_band(mvm, p2p,
NL80211_BAND_5GHZ);
} else {
params->type = iwl_mvm_get_scan_type(mvm, p2p);
}
}
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies)
@ -1544,9 +1630,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
params.scan_plans = &scan_plan;
params.n_scan_plans = 1;
params.type =
iwl_mvm_get_scan_type(mvm,
vif->type == NL80211_IFTYPE_P2P_DEVICE);
iwl_mvm_fill_scan_type(mvm, &params,
vif->type == NL80211_IFTYPE_P2P_DEVICE);
ret = iwl_mvm_get_measurement_dwell(mvm, req, &params);
if (ret < 0)
@ -1568,6 +1653,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (ret)
return ret;
iwl_mvm_pause_tcm(mvm, false);
ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (ret) {
/* If the scan failed, it usually means that the FW was unable
@ -1575,6 +1662,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* should try to send the command again with different params.
*/
IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
iwl_mvm_resume_tcm(mvm);
return ret;
}
@ -1638,9 +1726,8 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
params.n_scan_plans = req->n_scan_plans;
params.scan_plans = req->scan_plans;
params.type =
iwl_mvm_get_scan_type(mvm,
vif->type == NL80211_IFTYPE_P2P_DEVICE);
iwl_mvm_fill_scan_type(mvm, &params,
vif->type == NL80211_IFTYPE_P2P_DEVICE);
/* In theory, LMAC scans can handle a 32-bit delay, but since
* waiting for over 18 hours to start the scan is a bit silly
@ -1711,6 +1798,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
mvm->scan_vif = NULL;
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
cancel_delayed_work(&mvm->scan_timeout_dwork);
iwl_mvm_resume_tcm(mvm);
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
@ -1827,7 +1915,7 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
base_size = IWL_SCAN_REQ_UMAC_SIZE_V8;
else if (iwl_mvm_is_adaptive_dwell_supported(mvm))
base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
else if (iwl_mvm_has_new_tx_api(mvm))
else if (iwl_mvm_cdb_scan_api(mvm))
base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))

View File

@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -35,6 +36,7 @@
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -2466,6 +2468,15 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
iwl_mvm_has_new_tx_api(mvm)) {
u8 ac = tid_to_mac80211_ac[tid];
ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
if (ret)
return ret;
}
spin_lock_bh(&mvmsta->lock);
/* possible race condition - we entered D0i3 while starting agg */
@ -2887,7 +2898,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
u32 sta_id,
struct ieee80211_key_conf *key, bool mcast,
u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
u8 key_offset)
u8 key_offset, bool mfp)
{
union {
struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
@ -2960,6 +2971,8 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
if (mcast)
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
if (mfp)
key_flags |= cpu_to_le16(STA_KEY_MFP);
u.cmd.common.key_offset = key_offset;
u.cmd.common.key_flags = key_flags;
@ -3101,11 +3114,13 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
struct ieee80211_key_seq seq;
u16 p1k[5];
u32 sta_id;
bool mfp = false;
if (sta) {
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
sta_id = mvm_sta->sta_id;
mfp = sta->mfp;
} else if (vif->type == NL80211_IFTYPE_AP &&
!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@ -3127,7 +3142,8 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
seq.tkip.iv32, p1k, 0, key_offset);
seq.tkip.iv32, p1k, 0, key_offset,
mfp);
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_WEP40:
@ -3135,11 +3151,11 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
0, NULL, 0, key_offset);
0, NULL, 0, key_offset, mfp);
break;
default:
ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
0, NULL, 0, key_offset);
0, NULL, 0, key_offset, mfp);
}
return ret;
@ -3366,6 +3382,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
{
struct iwl_mvm_sta *mvm_sta;
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
bool mfp = sta ? sta->mfp : false;
rcu_read_lock();
@ -3373,7 +3390,8 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
if (WARN_ON_ONCE(!mvm_sta))
goto unlock;
iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
mfp);
unlock:
rcu_read_unlock();

View File

@ -391,7 +391,9 @@ struct iwl_mvm_rxq_dup_data {
* @tx_protection: reference counter for controlling the Tx protection.
* @tt_tx_protection: is thermal throttling enable Tx protection?
* @disable_tx: is tx to this STA disabled?
* @tlc_amsdu: true if A-MSDU is allowed
* @amsdu_enabled: bitmap of TX AMSDU allowed TIDs.
* In case TLC offload is not active it is either 0xFFFF or 0.
* @max_amsdu_len: max AMSDU length
* @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON)
* @sleep_tx_count: the number of frames that we told the firmware to let out
* even when that station is asleep. This is useful in case the queue
@ -436,7 +438,8 @@ struct iwl_mvm_sta {
bool tt_tx_protection;
bool disable_tx;
bool tlc_amsdu;
u16 amsdu_enabled;
u16 max_amsdu_len;
bool sleeping;
bool associated;
u8 agg_tids;

View File

@ -767,16 +767,16 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
u16 snap_ip_tcp, pad;
unsigned int dbg_max_amsdu_len;
netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
u8 *qc, tid, txf;
u8 tid, txf;
snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
tcp_hdrlen(skb);
dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len);
if (!sta->max_amsdu_len ||
if (!mvmsta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
(!mvmsta->tlc_amsdu && !dbg_max_amsdu_len))
(!mvmsta->amsdu_enabled && !dbg_max_amsdu_len))
return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
/*
@ -790,8 +790,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
}
qc = ieee80211_get_qos_ctl(hdr);
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
tid = ieee80211_get_tid(hdr);
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
return -EINVAL;
@ -803,7 +802,12 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
!mvmsta->tid_data[tid].amsdu_in_ampdu_allowed)
return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
max_amsdu_len = sta->max_amsdu_len;
if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(mvmsta->vif)) ||
tid_to_mac80211_ac[tid] < IEEE80211_AC_BE ||
!(mvmsta->amsdu_enabled & BIT(tid)))
return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
max_amsdu_len = mvmsta->max_amsdu_len;
/* the Tx FIFO to which this A-MSDU will be routed */
txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, tid_to_mac80211_ac[tid]);
@ -841,7 +845,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
*/
num_subframes = (max_amsdu_len + pad) / (subf_len + pad);
if (num_subframes > 1)
*qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
*ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
tcp_hdrlen(skb) + skb->data_len;
@ -930,6 +934,32 @@ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
return false;
}
static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta,
int airtime)
{
int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
if (mvm->tcm.paused)
return;
if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
schedule_delayed_work(&mvm->tcm.work, 0);
mdata->tx.airtime += airtime;
}
static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta, int tid)
{
u32 ac = tid_to_mac80211_ac[tid];
int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
mdata->tx.pkts[ac]++;
}
/*
* Sets the fields in the Tx cmd that are crypto related
*/
@ -976,9 +1006,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
* assignment of MGMT TID
*/
if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
u8 *qc = NULL;
qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
tid = ieee80211_get_tid(hdr);
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
goto drop_unlock_sta;
@ -1067,6 +1095,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_unlock(&mvmsta->lock);
iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid);
return 0;
drop_unlock_sta:
@ -1469,6 +1499,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
if (!IS_ERR(sta)) {
mvmsta = iwl_mvm_sta_from_mac80211(sta);
iwl_mvm_tx_airtime(mvm, mvmsta,
le16_to_cpu(tx_resp->wireless_media_time));
if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) {
struct iwl_mvm_tid_data *tid_data =
&mvmsta->tid_data[tid];
@ -1610,6 +1643,8 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
le16_to_cpu(tx_resp->wireless_media_time);
mvmsta->tid_data[tid].lq_color =
TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
iwl_mvm_tx_airtime(mvm, mvmsta,
le16_to_cpu(tx_resp->wireless_media_time));
}
rcu_read_unlock();
@ -1800,6 +1835,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
le32_to_cpu(ba_res->tx_rate));
}
iwl_mvm_tx_airtime(mvm, mvmsta,
le32_to_cpu(ba_res->wireless_time));
out_unlock:
rcu_read_unlock();
out:

View File

@ -278,8 +278,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
u8 ind = last_idx;
int i;
for (i = 0; i < MAX_RS_ANT_NUM; i++) {
ind = (ind + 1) % MAX_RS_ANT_NUM;
for (i = 0; i < MAX_ANT_NUM; i++) {
ind = (ind + 1) % MAX_ANT_NUM;
if (valid & BIT(ind))
return ind;
}
@ -728,12 +728,14 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
.sta_id = sta_id,
.tid = tid,
};
int queue;
int queue, size = IWL_DEFAULT_QUEUE_SIZE;
if (cmd.tid == IWL_MAX_TID_COUNT)
if (cmd.tid == IWL_MAX_TID_COUNT) {
cmd.tid = IWL_MGMT_TID;
size = IWL_MGMT_QUEUE_SIZE;
}
queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd,
SCD_QUEUE_CFG, timeout);
SCD_QUEUE_CFG, size, timeout);
if (queue < 0) {
IWL_DEBUG_TX_QUEUES(mvm,
@ -1074,23 +1076,48 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_power_update_mac(mvm);
}
struct iwl_mvm_low_latency_iter {
bool result;
bool result_per_band[NUM_NL80211_BANDS];
};
static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
{
bool *result = _data;
struct iwl_mvm_low_latency_iter *result = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
enum nl80211_band band;
if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif)))
*result = true;
if (iwl_mvm_vif_low_latency(mvmvif)) {
result->result = true;
if (!mvmvif->phy_ctxt)
return;
band = mvmvif->phy_ctxt->channel->band;
result->result_per_band[band] = true;
}
}
bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
{
bool result = false;
struct iwl_mvm_low_latency_iter data = {};
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_ll_iter, &result);
iwl_mvm_ll_iter, &data);
return result;
return data.result;
}
bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band)
{
struct iwl_mvm_low_latency_iter data = {};
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_ll_iter, &data);
return data.result_per_band[band];
}
struct iwl_bss_iter_data {
@ -1429,6 +1456,387 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
sta->addr, tid);
}
u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
{
if (!elapsed)
return 0;
return (100 * airtime / elapsed) / USEC_PER_MSEC;
}
static enum iwl_mvm_traffic_load
iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
{
u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
return IWL_MVM_TRAFFIC_HIGH;
if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
return IWL_MVM_TRAFFIC_MEDIUM;
return IWL_MVM_TRAFFIC_LOW;
}
struct iwl_mvm_tcm_iter_data {
struct iwl_mvm *mvm;
bool any_sent;
};
static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
{
struct iwl_mvm_tcm_iter_data *data = _data;
struct iwl_mvm *mvm = data->mvm;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
return;
low_latency = mvm->tcm.result.low_latency[mvmvif->id];
if (!mvm->tcm.result.change[mvmvif->id] &&
prev == low_latency) {
iwl_mvm_update_quotas(mvm, false, NULL);
return;
}
if (prev != low_latency) {
/* this sends traffic load and updates quota as well */
iwl_mvm_update_low_latency(mvm, vif, low_latency,
LOW_LATENCY_TRAFFIC);
} else {
iwl_mvm_update_quotas(mvm, false, NULL);
}
data->any_sent = true;
}
static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
{
struct iwl_mvm_tcm_iter_data data = {
.mvm = mvm,
.any_sent = false,
};
mutex_lock(&mvm->mutex);
ieee80211_iterate_active_interfaces(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_tcm_iter, &data);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
iwl_mvm_config_scan(mvm);
mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm;
struct iwl_mvm_vif *mvmvif;
struct ieee80211_vif *vif;
mvmvif = container_of(wk, struct iwl_mvm_vif,
uapsd_nonagg_detected_wk.work);
vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
mvm = mvmvif->mvm;
if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions)
return;
/* remember that this AP is broken */
memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr,
vif->bss_conf.bssid, ETH_ALEN);
mvm->uapsd_noagg_bssid_write_idx++;
if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN)
mvm->uapsd_noagg_bssid_write_idx = 0;
iwl_mvm_connection_loss(mvm, vif,
"AP isn't using AMPDU with uAPSD enabled");
}
static void iwl_mvm_uapsd_agg_disconnect_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
int *mac_id = data;
if (vif->type != NL80211_IFTYPE_STATION)
return;
if (mvmvif->id != *mac_id)
return;
if (!vif->bss_conf.assoc)
return;
if (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd &&
!mvmvif->queue_params[IEEE80211_AC_VI].uapsd &&
!mvmvif->queue_params[IEEE80211_AC_BE].uapsd &&
!mvmvif->queue_params[IEEE80211_AC_BK].uapsd)
return;
if (mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected)
return;
mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected = true;
IWL_INFO(mvm,
"detected AP should do aggregation but isn't, likely due to U-APSD\n");
schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ);
}
static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
unsigned int elapsed,
int mac)
{
u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
u64 tpt;
unsigned long rate;
rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions ||
mvm->tcm.data[mac].uapsd_nonagg_detect.detected)
return;
if (iwl_mvm_has_new_rx_api(mvm)) {
tpt = 8 * bytes; /* kbps */
do_div(tpt, elapsed);
rate *= 1000; /* kbps */
if (tpt < 22 * rate / 100)
return;
} else {
/*
* the rate here is actually the threshold, in 100Kbps units,
* so do the needed conversion from bytes to 100Kbps:
* 100kb = bits / (100 * 1000),
* 100kbps = 100kb / (msecs / 1000) ==
* (bits / (100 * 1000)) / (msecs / 1000) ==
* bits / (100 * msecs)
*/
tpt = (8 * bytes);
do_div(tpt, elapsed * 100);
if (tpt < rate)
return;
}
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_uapsd_agg_disconnect_iter, &mac);
}
static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u32 *band = _data;
if (!mvmvif->phy_ctxt)
return;
band[mvmvif->id] = mvmvif->phy_ctxt->channel->band;
}
static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
unsigned long ts,
bool handle_uapsd)
{
unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
unsigned int uapsd_elapsed =
jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts);
u32 total_airtime = 0;
u32 band_airtime[NUM_NL80211_BANDS] = {0};
u32 band[NUM_MAC_INDEX_DRIVER] = {0};
int ac, mac, i;
bool low_latency = false;
enum iwl_mvm_traffic_load load, band_load;
bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
if (handle_ll)
mvm->tcm.ll_ts = ts;
if (handle_uapsd)
mvm->tcm.uapsd_nonagg_ts = ts;
mvm->tcm.result.elapsed = elapsed;
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_tcm_iterator,
&band);
for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
u32 vo_vi_pkts = 0;
u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
total_airtime += airtime;
band_airtime[band[mac]] += airtime;
load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
mvm->tcm.result.load[mac] = load;
mvm->tcm.result.airtime[mac] = airtime;
for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
vo_vi_pkts += mdata->rx.pkts[ac] +
mdata->tx.pkts[ac];
/* enable immediately with enough packets but defer disabling */
if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
mvm->tcm.result.low_latency[mac] = true;
else if (handle_ll)
mvm->tcm.result.low_latency[mac] = false;
if (handle_ll) {
/* clear old data */
memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
}
low_latency |= mvm->tcm.result.low_latency[mac];
if (!mvm->tcm.result.low_latency[mac] && handle_uapsd)
iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed,
mac);
/* clear old data */
if (handle_uapsd)
mdata->uapsd_nonagg_detect.rx_bytes = 0;
memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
}
load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
mvm->tcm.result.global_change = load != mvm->tcm.result.global_load;
mvm->tcm.result.global_load = load;
for (i = 0; i < NUM_NL80211_BANDS; i++) {
band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed);
mvm->tcm.result.band_load[i] = band_load;
}
/*
* If the current load isn't low we need to force re-evaluation
* in the TCM period, so that we can return to low load if there
* was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get
* triggered by traffic).
*/
if (load != IWL_MVM_TRAFFIC_LOW)
return MVM_TCM_PERIOD;
/*
* If low-latency is active we need to force re-evaluation after
* (the longer) MVM_LL_PERIOD, so that we can disable low-latency
* when there's no traffic at all.
*/
if (low_latency)
return MVM_LL_PERIOD;
/*
* Otherwise, we don't need to run the work struct because we're
* in the default "idle" state - traffic indication is low (which
* also covers the "no traffic" case) and low-latency is disabled
* so there's no state that may need to be disabled when there's
* no traffic at all.
*
* Note that this has no impact on the regular scheduling of the
* updates triggered by traffic - those happen whenever one of the
* two timeouts expire (if there's traffic at all.)
*/
return 0;
}
void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
{
unsigned long ts = jiffies;
bool handle_uapsd =
time_after(ts, mvm->tcm.uapsd_nonagg_ts +
msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD));
spin_lock(&mvm->tcm.lock);
if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
spin_unlock(&mvm->tcm.lock);
return;
}
spin_unlock(&mvm->tcm.lock);
if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
mutex_lock(&mvm->mutex);
if (iwl_mvm_request_statistics(mvm, true))
handle_uapsd = false;
mutex_unlock(&mvm->mutex);
}
spin_lock(&mvm->tcm.lock);
/* re-check if somebody else won the recheck race */
if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
/* calculate statistics */
unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
handle_uapsd);
/* the memset needs to be visible before the timestamp */
smp_mb();
mvm->tcm.ts = ts;
if (work_delay)
schedule_delayed_work(&mvm->tcm.work, work_delay);
}
spin_unlock(&mvm->tcm.lock);
iwl_mvm_tcm_results(mvm);
}
void iwl_mvm_tcm_work(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
tcm.work);
iwl_mvm_recalc_tcm(mvm);
}
void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
{
spin_lock_bh(&mvm->tcm.lock);
mvm->tcm.paused = true;
spin_unlock_bh(&mvm->tcm.lock);
if (with_cancel)
cancel_delayed_work_sync(&mvm->tcm.work);
}
void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
{
int mac;
spin_lock_bh(&mvm->tcm.lock);
mvm->tcm.ts = jiffies;
mvm->tcm.ll_ts = jiffies;
for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
}
/* The TCM data needs to be reset before "paused" flag changes */
smp_mb();
mvm->tcm.paused = false;
spin_unlock_bh(&mvm->tcm.lock);
}
void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk,
iwl_mvm_tcm_uapsd_nonagg_detected_wk);
}
void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
}
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
{
bool ps_disabled;

View File

@ -244,7 +244,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
ctxt_info->hcmd_cfg.cmd_queue_addr =
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size =
TFD_QUEUE_CB_SIZE(trans_pcie->tx_cmd_queue_size);
TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);

View File

@ -383,7 +383,6 @@ struct iwl_self_init_dram {
* @hw_init_mask: initial unmasked hw causes
* @fh_mask: current unmasked fh causes
* @hw_mask: current unmasked hw causes
* @tx_cmd_queue_size: the size of the tx command queue
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
@ -465,7 +464,6 @@ struct iwl_trans_pcie {
u32 fh_mask;
u32 hw_mask;
cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
u16 tx_cmd_queue_size;
};
static inline struct iwl_trans_pcie *
@ -537,7 +535,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
void iwl_pcie_set_tx_cmd_queue_size(struct iwl_trans *trans);
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
u8 idx)
@ -822,7 +819,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
struct iwl_tx_queue_cfg_cmd *cmd,
int cmd_id,
int cmd_id, int size,
unsigned int timeout);
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,

View File

@ -488,6 +488,23 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
if (iwl_queue_space(txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
*dev_cmd_ptr = dev_cmd;
__skb_queue_tail(&txq->overflow_q, skb);
spin_unlock(&txq->lock);
return 0;
}
}
idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
/* Set up driver data for this TFD */
@ -523,9 +540,6 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* Tell device the write index *just past* this latest filled TFD */
txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
if (iwl_queue_space(txq) < txq->high_mark)
iwl_stop_queue(trans, txq);
/*
* At this point the frame is "transmitted" successfully
* and we will get a TX status notification eventually.
@ -555,15 +569,13 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
unsigned long flags;
void *dup_buf = NULL;
dma_addr_t phys_addr;
int i, cmd_pos, idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
int i, cmd_pos, idx;
u16 copy_size, cmd_size, tb0_size;
bool had_nocopy = false;
u8 group_id = iwl_cmd_groupid(cmd->id);
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
memset(tfd, 0, sizeof(*tfd));
struct iwl_tfh_tfd *tfd;
copy_size = sizeof(struct iwl_cmd_header_wide);
cmd_size = sizeof(struct iwl_cmd_header_wide);
@ -634,6 +646,10 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
spin_lock_bh(&txq->lock);
idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
memset(tfd, 0, sizeof(*tfd));
if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
@ -957,6 +973,13 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
}
while (!skb_queue_empty(&txq->overflow_q)) {
struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
iwl_op_mode_free_skb(trans->op_mode, skb);
}
spin_unlock_bh(&txq->lock);
/* just in case - this queue may have been stopped */
@ -972,7 +995,7 @@ static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
trans_pcie->tfd_size * txq->n_window,
txq->tfds, txq->dma_addr);
dma_free_coherent(dev,
sizeof(*txq->first_tb_bufs) * txq->n_window,
@ -1020,7 +1043,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
struct iwl_tx_queue_cfg_cmd *cmd,
int cmd_id,
int cmd_id, int size,
unsigned int timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -1046,12 +1069,12 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
return -ENOMEM;
}
ret = iwl_pcie_txq_alloc(trans, txq, TFD_TX_CMD_SLOTS, false);
ret = iwl_pcie_txq_alloc(trans, txq, size, false);
if (ret) {
IWL_ERR(trans, "Tx queue alloc failed\n");
goto error;
}
ret = iwl_pcie_txq_init(trans, txq, TFD_TX_CMD_SLOTS, false);
ret = iwl_pcie_txq_init(trans, txq, size, false);
if (ret) {
IWL_ERR(trans, "Tx queue init failed\n");
goto error;
@ -1061,7 +1084,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
cmd->tfdq_addr = cpu_to_le64(txq->dma_addr);
cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_TX_CMD_SLOTS));
cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
ret = iwl_trans_send_cmd(trans, &hcmd);
if (ret)
@ -1152,8 +1175,6 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
struct iwl_txq *cmd_queue;
int txq_id = trans_pcie->cmd_queue, ret;
iwl_pcie_set_tx_cmd_queue_size(trans);
/* alloc and init the command queue */
if (!trans_pcie->txq[txq_id]) {
cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL);
@ -1162,8 +1183,7 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
return -ENOMEM;
}
trans_pcie->txq[txq_id] = cmd_queue;
ret = iwl_pcie_txq_alloc(trans, cmd_queue,
trans_pcie->tx_cmd_queue_size, true);
ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
goto error;
@ -1172,8 +1192,7 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
cmd_queue = trans_pcie->txq[txq_id];
}
ret = iwl_pcie_txq_init(trans, cmd_queue,
trans_pcie->tx_cmd_queue_size, true);
ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true);
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;

View File

@ -495,6 +495,9 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
if (WARN_ON(txq->entries || txq->tfds))
return -EINVAL;
if (trans->cfg->use_tfh)
tfd_sz = trans_pcie->tfd_size * slots_num;
timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
txq->trans_pcie = trans_pcie;
@ -950,8 +953,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
txq_id++) {
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
slots_num = cmd_queue ? trans_pcie->tx_cmd_queue_size :
TFD_TX_CMD_SLOTS;
slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
slots_num, cmd_queue);
@ -970,21 +972,6 @@ error:
return ret;
}
void iwl_pcie_set_tx_cmd_queue_size(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int queue_size = TFD_CMD_SLOTS;
if (trans->cfg->tx_cmd_queue_size)
queue_size = trans->cfg->tx_cmd_queue_size;
if (WARN_ON(!(is_power_of_2(queue_size) &&
TFD_QUEUE_CB_SIZE(queue_size) > 0)))
trans_pcie->tx_cmd_queue_size = TFD_CMD_SLOTS;
else
trans_pcie->tx_cmd_queue_size = queue_size;
}
int iwl_pcie_tx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -992,8 +979,6 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
int txq_id, slots_num;
bool alloc = false;
iwl_pcie_set_tx_cmd_queue_size(trans);
if (!trans_pcie->txq_memory) {
ret = iwl_pcie_tx_alloc(trans);
if (ret)
@ -1017,8 +1002,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
txq_id++) {
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
slots_num = cmd_queue ? trans_pcie->tx_cmd_queue_size :
TFD_TX_CMD_SLOTS;
slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
slots_num, cmd_queue);
if (ret) {
@ -1166,7 +1150,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
* In that case, iwl_queue_space will be small again
* and we won't wake mac80211's queue.
*/
iwl_trans_pcie_tx(trans, skb, dev_cmd_ptr, txq_id);
iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
}
spin_lock_bh(&txq->lock);