Second batch of improvements and fixes for v4.11.
* A bunch of bugfixes for the DQA code; * Work on support for new A000 devices continues; * Some clean-ups and general improvements -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEF3LNfgb2BPWm68smoUecoho8xfoFAliYsnMACgkQoUecoho8 xfo2Mw/+M6KrHmnLnLsjEsifgZNlTszkPq4KjN+g+aH0SuYO6fxVeigK7lmzrk8x SG5VCyQg/dkXmaQWpa1gEEt8TgEap6cOO8m4h/8eARN71a/zLExGBOr2nzLm0LWW 7NlXBGpZ3h7RwhSSzNjOGY7BPCHlDFDAG8QqDPqF2IzSLzTtGj9IXgHwBGHr4eGM KAyXEdBO4+zXz3maYzr7POeymWGkGtK8v3/TT4PCgJpnrz/UGqt6+YWwPneYGFi/ aANjwlp2/EYDnc6PYUZs2gEVC4beaFvYao+EMopISb/OMmAdE2G6cEnPInWnyMp9 Ea2QLYCWK5/VR4v/TGluaCTm+LVvObHxTh44Jj+EDi3iUfFp1JJbSOwgq13zR+GG CTqCfsZ6DxQCkf3mtHOtnf5CvTTYfFIYeIr47L/caLzu0mW6dQYK8+FZo98YOTMH YYeIy5TcYeefBVQLdxQjeL1R5RttoGCdcqldpBSHM+FZ+vQ1XSzJa/YkwWe4unY3 R+7doFofdgeFbTXPKTiWybUBKvWQh+uAKwF964fHZC/L1m/uesGdxMAcd5NhOnwY xWlZSOWn9GE69Ded5hfeemdKIdyWFEDLyDCg6dwSx9poCMqls6yzKHz8DT1dGrM6 JD/CX3LCtV4fhjK/uB2Qa3TgWAbXcdl44R0Tx2xzHqexcGF9pZE= =DFHh -----END PGP SIGNATURE----- Merge tag 'iwlwifi-next-for-kalle-2017-02-06' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next Second batch of improvements and fixes for v4.11. * A bunch of bugfixes for the DQA code; * Work on support for new A000 devices continues; * Some clean-ups and general improvements
This commit is contained in:
commit
d7eb3c0a6b
|
@ -72,9 +72,13 @@
|
|||
#define IWL_A000_SMEM_OFFSET 0x400000
|
||||
#define IWL_A000_SMEM_LEN 0x68000
|
||||
|
||||
#define IWL_A000_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
|
||||
#define IWL_A000_MODULE_FIRMWARE(api) \
|
||||
IWL_A000_FW_PRE "-" __stringify(api) ".ucode"
|
||||
#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
|
||||
#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
|
||||
|
||||
#define IWL_A000_HR_MODULE_FIRMWARE(api) \
|
||||
IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
|
||||
#define IWL_A000_JF_MODULE_FIRMWARE(api) \
|
||||
IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode"
|
||||
|
||||
#define NVM_HW_SECTION_NUM_FAMILY_A000 10
|
||||
|
||||
|
@ -116,11 +120,12 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
|
|||
.mq_rx_supported = true, \
|
||||
.vht_mu_mimo_supported = true, \
|
||||
.mac_addr_from_csr = true, \
|
||||
.use_tfh = true
|
||||
.use_tfh = true, \
|
||||
.rf_id = true
|
||||
|
||||
const struct iwl_cfg iwla000_2ac_cfg = {
|
||||
const struct iwl_cfg iwla000_2ac_cfg_hr = {
|
||||
.name = "Intel(R) Dual Band Wireless AC a000",
|
||||
.fw_name_pre = IWL_A000_FW_PRE,
|
||||
.fw_name_pre = IWL_A000_HR_FW_PRE,
|
||||
IWL_DEVICE_A000,
|
||||
.ht_params = &iwl_a000_ht_params,
|
||||
.nvm_ver = IWL_A000_NVM_VERSION,
|
||||
|
@ -128,4 +133,15 @@ const struct iwl_cfg iwla000_2ac_cfg = {
|
|||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
|
||||
};
|
||||
|
||||
MODULE_FIRMWARE(IWL_A000_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
|
||||
const struct iwl_cfg iwla000_2ac_cfg_jf = {
|
||||
.name = "Intel(R) Dual Band Wireless AC a000",
|
||||
.fw_name_pre = IWL_A000_JF_FW_PRE,
|
||||
IWL_DEVICE_A000,
|
||||
.ht_params = &iwl_a000_ht_params,
|
||||
.nvm_ver = IWL_A000_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
|
||||
};
|
||||
|
||||
MODULE_FIRMWARE(IWL_A000_HR_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_A000_JF_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
|
||||
|
|
|
@ -455,7 +455,8 @@ extern const struct iwl_cfg iwl9260_2ac_cfg;
|
|||
extern const struct iwl_cfg iwl9270_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9460_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9560_2ac_cfg;
|
||||
extern const struct iwl_cfg iwla000_2ac_cfg;
|
||||
extern const struct iwl_cfg iwla000_2ac_cfg_hr;
|
||||
extern const struct iwl_cfg iwla000_2ac_cfg_jf;
|
||||
#endif /* CONFIG_IWLMVM */
|
||||
|
||||
#endif /* __IWL_CONFIG_H__ */
|
||||
|
|
|
@ -349,6 +349,7 @@ enum {
|
|||
/* RF_ID value */
|
||||
#define CSR_HW_RF_ID_TYPE_JF (0x00105000)
|
||||
#define CSR_HW_RF_ID_TYPE_LC (0x00101000)
|
||||
#define CSR_HW_RF_ID_TYPE_HR (0x00109000)
|
||||
|
||||
/* EEPROM REG */
|
||||
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
|
||||
|
|
|
@ -102,7 +102,6 @@ static struct dentry *iwl_dbgfs_root;
|
|||
* @op_mode: the running op_mode
|
||||
* @trans: transport layer
|
||||
* @dev: for debug prints only
|
||||
* @cfg: configuration struct
|
||||
* @fw_index: firmware revision to try loading
|
||||
* @firmware_name: composite filename of ucode file to load
|
||||
* @request_firmware_complete: the firmware has been obtained from user space
|
||||
|
@ -114,7 +113,6 @@ struct iwl_drv {
|
|||
struct iwl_op_mode *op_mode;
|
||||
struct iwl_trans *trans;
|
||||
struct device *dev;
|
||||
const struct iwl_cfg *cfg;
|
||||
|
||||
int fw_index; /* firmware we're trying to load */
|
||||
char firmware_name[64]; /* name of firmware file to load */
|
||||
|
@ -213,18 +211,18 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw,
|
|||
|
||||
static int iwl_request_firmware(struct iwl_drv *drv, bool first)
|
||||
{
|
||||
const char *name_pre = drv->cfg->fw_name_pre;
|
||||
const char *name_pre = drv->trans->cfg->fw_name_pre;
|
||||
char tag[8];
|
||||
|
||||
if (first) {
|
||||
drv->fw_index = drv->cfg->ucode_api_max;
|
||||
drv->fw_index = drv->trans->cfg->ucode_api_max;
|
||||
sprintf(tag, "%d", drv->fw_index);
|
||||
} else {
|
||||
drv->fw_index--;
|
||||
sprintf(tag, "%d", drv->fw_index);
|
||||
}
|
||||
|
||||
if (drv->fw_index < drv->cfg->ucode_api_min) {
|
||||
if (drv->fw_index < drv->trans->cfg->ucode_api_min) {
|
||||
IWL_ERR(drv, "no suitable firmware found!\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
@ -1207,7 +1205,7 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
|
|||
dbgfs_dir = drv->dbgfs_op_mode;
|
||||
#endif
|
||||
|
||||
op_mode = ops->start(drv->trans, drv->cfg, &drv->fw, dbgfs_dir);
|
||||
op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir);
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
if (!op_mode) {
|
||||
|
@ -1247,8 +1245,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
|||
struct iwlwifi_opmode_table *op;
|
||||
int err;
|
||||
struct iwl_firmware_pieces *pieces;
|
||||
const unsigned int api_max = drv->cfg->ucode_api_max;
|
||||
const unsigned int api_min = drv->cfg->ucode_api_min;
|
||||
const unsigned int api_max = drv->trans->cfg->ucode_api_max;
|
||||
const unsigned int api_min = drv->trans->cfg->ucode_api_min;
|
||||
size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
|
||||
u32 api_ver;
|
||||
int i;
|
||||
|
@ -1310,7 +1308,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
|||
* In mvm uCode there is no difference between data and instructions
|
||||
* sections.
|
||||
*/
|
||||
if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces, drv->cfg))
|
||||
if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces,
|
||||
drv->trans->cfg))
|
||||
goto try_again;
|
||||
|
||||
/* Allocate ucode buffers for card's bus-master loading ... */
|
||||
|
@ -1408,14 +1407,14 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
|||
fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12;
|
||||
else
|
||||
fw->init_evtlog_size =
|
||||
drv->cfg->base_params->max_event_log_size;
|
||||
drv->trans->cfg->base_params->max_event_log_size;
|
||||
fw->init_errlog_ptr = pieces->init_errlog_ptr;
|
||||
fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr;
|
||||
if (pieces->inst_evtlog_size)
|
||||
fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12;
|
||||
else
|
||||
fw->inst_evtlog_size =
|
||||
drv->cfg->base_params->max_event_log_size;
|
||||
drv->trans->cfg->base_params->max_event_log_size;
|
||||
fw->inst_errlog_ptr = pieces->inst_errlog_ptr;
|
||||
|
||||
/*
|
||||
|
@ -1504,8 +1503,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
|||
kfree(pieces);
|
||||
}
|
||||
|
||||
struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
|
||||
const struct iwl_cfg *cfg)
|
||||
struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_drv *drv;
|
||||
int ret;
|
||||
|
@ -1518,7 +1516,6 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
|
|||
|
||||
drv->trans = trans;
|
||||
drv->dev = trans->dev;
|
||||
drv->cfg = cfg;
|
||||
|
||||
init_completion(&drv->request_firmware_complete);
|
||||
INIT_LIST_HEAD(&drv->list);
|
||||
|
|
|
@ -118,15 +118,13 @@ struct iwl_cfg;
|
|||
* iwl_drv_start - start the drv
|
||||
*
|
||||
* @trans_ops: the ops of the transport
|
||||
* @cfg: device specific constants / virtual functions
|
||||
*
|
||||
* starts the driver: fetches the firmware. This should be called by bus
|
||||
* specific system flows implementations. For example, the bus specific probe
|
||||
* function should do bus related operations only, and then call to this
|
||||
* function. It returns the driver object or %NULL if an error occurred.
|
||||
*/
|
||||
struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
|
||||
const struct iwl_cfg *cfg);
|
||||
struct iwl_drv *iwl_drv_start(struct iwl_trans *trans);
|
||||
|
||||
/**
|
||||
* iwl_drv_stop - stop the drv
|
||||
|
|
|
@ -91,7 +91,7 @@ void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
|
|||
memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
|
||||
memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
|
||||
mvmvif->rekey_data.replay_ctr =
|
||||
cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
|
||||
cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
|
||||
mvmvif->rekey_data.valid = true;
|
||||
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
@ -1741,7 +1741,7 @@ out:
|
|||
static struct iwl_wowlan_status *
|
||||
iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
{
|
||||
u32 base = mvm->error_event_table;
|
||||
u32 base = mvm->error_event_table[0];
|
||||
struct error_table_start {
|
||||
/* cf. struct iwl_error_event_table */
|
||||
u32 valid;
|
||||
|
|
|
@ -64,13 +64,14 @@
|
|||
#define __fw_api_mac_h__
|
||||
|
||||
/*
|
||||
* The first MAC indices (starting from 0)
|
||||
* are available to the driver, AUX follows
|
||||
* The first MAC indices (starting from 0) are available to the driver,
|
||||
* AUX indices follows - 1 for non-CDB, 2 for CDB.
|
||||
*/
|
||||
#define MAC_INDEX_AUX 4
|
||||
#define MAC_INDEX_MIN_DRIVER 0
|
||||
#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX
|
||||
#define NUM_MAC_INDEX (MAC_INDEX_AUX + 1)
|
||||
#define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1)
|
||||
#define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2)
|
||||
|
||||
#define IWL_MVM_STATION_COUNT 16
|
||||
#define IWL_MVM_TDLS_STA_COUNT 4
|
||||
|
|
|
@ -453,6 +453,8 @@ enum scan_config_flags {
|
|||
SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19),
|
||||
SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20),
|
||||
SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21),
|
||||
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED = BIT(22),
|
||||
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED = BIT(23),
|
||||
|
||||
/* Bits 26-31 are for num of channels in channel_array */
|
||||
#define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26)
|
||||
|
@ -485,6 +487,20 @@ enum iwl_channel_flags {
|
|||
IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3),
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_scan_dwell
|
||||
* @active: default dwell time for active scan
|
||||
* @passive: default dwell time for passive scan
|
||||
* @fragmented: default dwell time for fragmented scan
|
||||
* @extended: default dwell time for channels 1, 6 and 11
|
||||
*/
|
||||
struct iwl_scan_dwell {
|
||||
u8 active;
|
||||
u8 passive;
|
||||
u8 fragmented;
|
||||
u8 extended;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_scan_config
|
||||
* @flags: enum scan_config_flags
|
||||
|
@ -493,10 +509,7 @@ enum iwl_channel_flags {
|
|||
* @legacy_rates: default legacy rates - enum scan_config_rates
|
||||
* @out_of_channel_time: default max out of serving channel time
|
||||
* @suspend_time: default max suspend time
|
||||
* @dwell_active: default dwell time for active scan
|
||||
* @dwell_passive: default dwell time for passive scan
|
||||
* @dwell_fragmented: default dwell time for fragmented scan
|
||||
* @dwell_extended: default dwell time for channels 1, 6 and 11
|
||||
* @dwell: dwells for the scan
|
||||
* @mac_addr: default mac address to be used in probes
|
||||
* @bcast_sta_id: the index of the station in the fw
|
||||
* @channel_flags: default channel flags - enum iwl_channel_flags
|
||||
|
@ -510,16 +523,29 @@ struct iwl_scan_config {
|
|||
__le32 legacy_rates;
|
||||
__le32 out_of_channel_time;
|
||||
__le32 suspend_time;
|
||||
u8 dwell_active;
|
||||
u8 dwell_passive;
|
||||
u8 dwell_fragmented;
|
||||
u8 dwell_extended;
|
||||
struct iwl_scan_dwell dwell;
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
u8 bcast_sta_id;
|
||||
u8 channel_flags;
|
||||
u8 channel_array[];
|
||||
} __packed; /* SCAN_CONFIG_DB_CMD_API_S */
|
||||
|
||||
#define SCAN_TWO_LMACS 2
|
||||
|
||||
struct iwl_scan_config_cdb {
|
||||
__le32 flags;
|
||||
__le32 tx_chains;
|
||||
__le32 rx_chains;
|
||||
__le32 legacy_rates;
|
||||
__le32 out_of_channel_time[SCAN_TWO_LMACS];
|
||||
__le32 suspend_time[SCAN_TWO_LMACS];
|
||||
struct iwl_scan_dwell dwell;
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
u8 bcast_sta_id;
|
||||
u8 channel_flags;
|
||||
u8 channel_array[];
|
||||
} __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */
|
||||
|
||||
/**
|
||||
* iwl_umac_scan_flags
|
||||
*@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
|
||||
|
@ -540,17 +566,18 @@ enum iwl_umac_scan_uid_offsets {
|
|||
};
|
||||
|
||||
enum iwl_umac_scan_general_flags {
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10),
|
||||
IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11),
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -610,8 +637,9 @@ struct iwl_scan_req_umac_tail {
|
|||
* @active_dwell: dwell time for active scan
|
||||
* @passive_dwell: dwell time for passive scan
|
||||
* @fragmented_dwell: dwell time for fragmented passive scan
|
||||
* @max_out_time: max out of serving channel time
|
||||
* @suspend_time: max suspend time
|
||||
* @max_out_time: max out of serving channel time, per LMAC - for CDB there
|
||||
* are 2 LMACs
|
||||
* @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs
|
||||
* @scan_priority: scan internal prioritization &enum iwl_scan_priority
|
||||
* @channel_flags: &enum iwl_scan_channel_flags
|
||||
* @n_channels: num of channels in scan request
|
||||
|
@ -631,15 +659,33 @@ struct iwl_scan_req_umac {
|
|||
u8 active_dwell;
|
||||
u8 passive_dwell;
|
||||
u8 fragmented_dwell;
|
||||
__le32 max_out_time;
|
||||
__le32 suspend_time;
|
||||
__le32 scan_priority;
|
||||
/* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
|
||||
u8 channel_flags;
|
||||
u8 n_channels;
|
||||
__le16 reserved;
|
||||
u8 data[];
|
||||
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
|
||||
union {
|
||||
struct {
|
||||
__le32 max_out_time;
|
||||
__le32 suspend_time;
|
||||
__le32 scan_priority;
|
||||
/* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
|
||||
u8 channel_flags;
|
||||
u8 n_channels;
|
||||
__le16 reserved;
|
||||
u8 data[];
|
||||
} no_cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
|
||||
struct {
|
||||
__le32 max_out_time[SCAN_TWO_LMACS];
|
||||
__le32 suspend_time[SCAN_TWO_LMACS];
|
||||
__le32 scan_priority;
|
||||
/* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
|
||||
u8 channel_flags;
|
||||
u8 n_channels;
|
||||
__le16 reserved;
|
||||
u8 data[];
|
||||
} cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_5 */
|
||||
};
|
||||
} __packed;
|
||||
|
||||
#define IWL_SCAN_REQ_UMAC_SIZE_CDB sizeof(struct iwl_scan_req_umac)
|
||||
#define IWL_SCAN_REQ_UMAC_SIZE (sizeof(struct iwl_scan_req_umac) - \
|
||||
2 * sizeof(__le32))
|
||||
|
||||
/**
|
||||
* struct iwl_umac_scan_abort
|
||||
|
|
|
@ -220,7 +220,7 @@ struct mvm_statistics_bt_activity {
|
|||
__le32 lo_priority_rx_denied_cnt;
|
||||
} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
|
||||
|
||||
struct mvm_statistics_general_v8 {
|
||||
struct mvm_statistics_general_common {
|
||||
__le32 radio_temperature;
|
||||
__le32 radio_voltage;
|
||||
struct mvm_statistics_dbg dbg;
|
||||
|
@ -248,11 +248,22 @@ struct mvm_statistics_general_v8 {
|
|||
__le64 on_time_rf;
|
||||
__le64 on_time_scan;
|
||||
__le64 tx_time;
|
||||
} __packed;
|
||||
|
||||
struct mvm_statistics_general_v8 {
|
||||
struct mvm_statistics_general_common common;
|
||||
__le32 beacon_counter[NUM_MAC_INDEX];
|
||||
u8 beacon_average_energy[NUM_MAC_INDEX];
|
||||
u8 reserved[4 - (NUM_MAC_INDEX % 4)];
|
||||
} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
|
||||
|
||||
struct mvm_statistics_general_cdb {
|
||||
struct mvm_statistics_general_common common;
|
||||
__le32 beacon_counter[NUM_MAC_INDEX_CDB];
|
||||
u8 beacon_average_energy[NUM_MAC_INDEX_CDB];
|
||||
u8 reserved[4 - (NUM_MAC_INDEX_CDB % 4)];
|
||||
} __packed; /* STATISTICS_GENERAL_API_S_VER_9 */
|
||||
|
||||
/**
|
||||
* struct mvm_statistics_load - RX statistics for multi-queue devices
|
||||
* @air_time: accumulated air time, per mac
|
||||
|
@ -267,6 +278,13 @@ struct mvm_statistics_load {
|
|||
u8 avg_energy[IWL_MVM_STATION_COUNT];
|
||||
} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */
|
||||
|
||||
struct mvm_statistics_load_cdb {
|
||||
__le32 air_time[NUM_MAC_INDEX_CDB];
|
||||
__le32 byte_count[NUM_MAC_INDEX_CDB];
|
||||
__le32 pkt_count[NUM_MAC_INDEX_CDB];
|
||||
u8 avg_energy[IWL_MVM_STATION_COUNT];
|
||||
} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_2 */
|
||||
|
||||
struct mvm_statistics_rx {
|
||||
struct mvm_statistics_rx_phy ofdm;
|
||||
struct mvm_statistics_rx_phy cck;
|
||||
|
@ -281,6 +299,7 @@ struct mvm_statistics_rx {
|
|||
* while associated. To disable this behavior, set DISABLE_NOTIF flag in the
|
||||
* STATISTICS_CMD (0x9c), below.
|
||||
*/
|
||||
|
||||
struct iwl_notif_statistics_v10 {
|
||||
__le32 flag;
|
||||
struct mvm_statistics_rx rx;
|
||||
|
@ -296,6 +315,14 @@ struct iwl_notif_statistics_v11 {
|
|||
struct mvm_statistics_load load_stats;
|
||||
} __packed; /* STATISTICS_NTFY_API_S_VER_11 */
|
||||
|
||||
struct iwl_notif_statistics_cdb {
|
||||
__le32 flag;
|
||||
struct mvm_statistics_rx rx;
|
||||
struct mvm_statistics_tx tx;
|
||||
struct mvm_statistics_general_cdb general;
|
||||
struct mvm_statistics_load_cdb load_stats;
|
||||
} __packed; /* STATISTICS_NTFY_API_S_VER_12 */
|
||||
|
||||
#define IWL_STATISTICS_FLG_CLEAR 0x1
|
||||
#define IWL_STATISTICS_FLG_DISABLE_NOTIF 0x2
|
||||
|
||||
|
|
|
@ -672,8 +672,7 @@ struct iwl_mac_beacon_cmd_v6 {
|
|||
} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */
|
||||
|
||||
/**
|
||||
* struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA
|
||||
* @tx: the tx commands associated with the beacon frame
|
||||
* struct iwl_mac_beacon_cmd_data - data of beacon template with offloaded CSA
|
||||
* @template_id: currently equal to the mac context id of the coresponding
|
||||
* mac.
|
||||
* @tim_idx: the offset of the tim IE in the beacon
|
||||
|
@ -682,16 +681,38 @@ struct iwl_mac_beacon_cmd_v6 {
|
|||
* @csa_offset: offset to the CSA IE if present
|
||||
* @frame: the template of the beacon frame
|
||||
*/
|
||||
struct iwl_mac_beacon_cmd {
|
||||
struct iwl_tx_cmd tx;
|
||||
struct iwl_mac_beacon_cmd_data {
|
||||
__le32 template_id;
|
||||
__le32 tim_idx;
|
||||
__le32 tim_size;
|
||||
__le32 ecsa_offset;
|
||||
__le32 csa_offset;
|
||||
struct ieee80211_hdr frame[0];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA
|
||||
* @tx: the tx commands associated with the beacon frame
|
||||
* @data: see &iwl_mac_beacon_cmd_data
|
||||
*/
|
||||
struct iwl_mac_beacon_cmd_v7 {
|
||||
struct iwl_tx_cmd tx;
|
||||
struct iwl_mac_beacon_cmd_data data;
|
||||
} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */
|
||||
|
||||
/**
|
||||
* struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA
|
||||
* @byte_cnt: byte count of the beacon frame
|
||||
* @flags: for future use
|
||||
* @data: see &iwl_mac_beacon_cmd_data
|
||||
*/
|
||||
struct iwl_mac_beacon_cmd {
|
||||
__le16 byte_cnt;
|
||||
__le16 flags;
|
||||
__le64 reserved;
|
||||
struct iwl_mac_beacon_cmd_data data;
|
||||
} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_8 */
|
||||
|
||||
struct iwl_beacon_notif {
|
||||
struct iwl_mvm_tx_resp beacon_notify_hdr;
|
||||
__le64 tsf;
|
||||
|
|
|
@ -341,6 +341,10 @@ enum iwl_prot_offload_subcmd_ids {
|
|||
STORED_BEACON_NTF = 0xFF,
|
||||
};
|
||||
|
||||
enum iwl_regulatory_and_nvm_subcmd_ids {
|
||||
NVM_ACCESS_COMPLETE = 0x0,
|
||||
};
|
||||
|
||||
enum iwl_fmac_debug_cmds {
|
||||
LMAC_RD_WR = 0x0,
|
||||
UMAC_RD_WR = 0x1,
|
||||
|
@ -355,6 +359,7 @@ enum {
|
|||
PHY_OPS_GROUP = 0x4,
|
||||
DATA_PATH_GROUP = 0x5,
|
||||
PROT_OFFLOAD_GROUP = 0xb,
|
||||
REGULATORY_AND_NVM_GROUP = 0xc,
|
||||
DEBUG_GROUP = 0xf,
|
||||
};
|
||||
|
||||
|
@ -593,60 +598,7 @@ enum {
|
|||
|
||||
#define IWL_ALIVE_FLG_RFKILL BIT(0)
|
||||
|
||||
struct mvm_alive_resp_ver1 {
|
||||
__le16 status;
|
||||
__le16 flags;
|
||||
u8 ucode_minor;
|
||||
u8 ucode_major;
|
||||
__le16 id;
|
||||
u8 api_minor;
|
||||
u8 api_major;
|
||||
u8 ver_subtype;
|
||||
u8 ver_type;
|
||||
u8 mac;
|
||||
u8 opt;
|
||||
__le16 reserved2;
|
||||
__le32 timestamp;
|
||||
__le32 error_event_table_ptr; /* SRAM address for error log */
|
||||
__le32 log_event_table_ptr; /* SRAM address for event log */
|
||||
__le32 cpu_register_ptr;
|
||||
__le32 dbgm_config_ptr;
|
||||
__le32 alive_counter_ptr;
|
||||
__le32 scd_base_ptr; /* SRAM address for SCD */
|
||||
} __packed; /* ALIVE_RES_API_S_VER_1 */
|
||||
|
||||
struct mvm_alive_resp_ver2 {
|
||||
__le16 status;
|
||||
__le16 flags;
|
||||
u8 ucode_minor;
|
||||
u8 ucode_major;
|
||||
__le16 id;
|
||||
u8 api_minor;
|
||||
u8 api_major;
|
||||
u8 ver_subtype;
|
||||
u8 ver_type;
|
||||
u8 mac;
|
||||
u8 opt;
|
||||
__le16 reserved2;
|
||||
__le32 timestamp;
|
||||
__le32 error_event_table_ptr; /* SRAM address for error log */
|
||||
__le32 log_event_table_ptr; /* SRAM address for LMAC event log */
|
||||
__le32 cpu_register_ptr;
|
||||
__le32 dbgm_config_ptr;
|
||||
__le32 alive_counter_ptr;
|
||||
__le32 scd_base_ptr; /* SRAM address for SCD */
|
||||
__le32 st_fwrd_addr; /* pointer to Store and forward */
|
||||
__le32 st_fwrd_size;
|
||||
u8 umac_minor; /* UMAC version: minor */
|
||||
u8 umac_major; /* UMAC version: major */
|
||||
__le16 umac_id; /* UMAC version: id */
|
||||
__le32 error_info_addr; /* SRAM address for UMAC error log */
|
||||
__le32 dbg_print_buff_addr;
|
||||
} __packed; /* ALIVE_RES_API_S_VER_2 */
|
||||
|
||||
struct mvm_alive_resp {
|
||||
__le16 status;
|
||||
__le16 flags;
|
||||
struct iwl_lmac_alive {
|
||||
__le32 ucode_minor;
|
||||
__le32 ucode_major;
|
||||
u8 ver_subtype;
|
||||
|
@ -662,12 +614,29 @@ struct mvm_alive_resp {
|
|||
__le32 scd_base_ptr; /* SRAM address for SCD */
|
||||
__le32 st_fwrd_addr; /* pointer to Store and forward */
|
||||
__le32 st_fwrd_size;
|
||||
} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
|
||||
|
||||
struct iwl_umac_alive {
|
||||
__le32 umac_minor; /* UMAC version: minor */
|
||||
__le32 umac_major; /* UMAC version: major */
|
||||
__le32 error_info_addr; /* SRAM address for UMAC error log */
|
||||
__le32 dbg_print_buff_addr;
|
||||
} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
|
||||
|
||||
struct mvm_alive_resp_v3 {
|
||||
__le16 status;
|
||||
__le16 flags;
|
||||
struct iwl_lmac_alive lmac_data;
|
||||
struct iwl_umac_alive umac_data;
|
||||
} __packed; /* ALIVE_RES_API_S_VER_3 */
|
||||
|
||||
struct mvm_alive_resp {
|
||||
__le16 status;
|
||||
__le16 flags;
|
||||
struct iwl_lmac_alive lmac_data[2];
|
||||
struct iwl_umac_alive umac_data;
|
||||
} __packed; /* ALIVE_RES_API_S_VER_4 */
|
||||
|
||||
/* Error response/notification */
|
||||
enum {
|
||||
FW_ERR_UNKNOWN_CMD = 0x0,
|
||||
|
@ -708,7 +677,6 @@ struct iwl_error_resp {
|
|||
#define MAX_MACS_IN_BINDING (3)
|
||||
#define MAX_BINDINGS (4)
|
||||
#define AUX_BINDING_INDEX (3)
|
||||
#define MAX_PHYS (4)
|
||||
|
||||
/* Used to extract ID and color from the context dword */
|
||||
#define FW_CTXT_ID_POS (0)
|
||||
|
@ -1251,13 +1219,16 @@ struct iwl_missed_beacons_notif {
|
|||
* @external_ver: external image version
|
||||
* @status: MFUART loading status
|
||||
* @duration: MFUART loading time
|
||||
* @image_size: MFUART image size in bytes
|
||||
*/
|
||||
struct iwl_mfuart_load_notif {
|
||||
__le32 installed_ver;
|
||||
__le32 external_ver;
|
||||
__le32 status;
|
||||
__le32 duration;
|
||||
} __packed; /*MFU_LOADER_NTFY_API_S_VER_1*/
|
||||
/* image size valid only in v2 of the command */
|
||||
__le32 image_size;
|
||||
} __packed; /*MFU_LOADER_NTFY_API_S_VER_2*/
|
||||
|
||||
/**
|
||||
* struct iwl_set_calib_default_cmd - set default value for calibration.
|
||||
|
@ -2200,4 +2171,11 @@ struct iwl_dbg_mem_access_rsp {
|
|||
__le32 data[];
|
||||
} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed
|
||||
*/
|
||||
struct iwl_nvm_access_complete_cmd {
|
||||
__le32 reserved;
|
||||
} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
|
||||
|
||||
#endif /* __fw_api_h__ */
|
||||
|
|
|
@ -811,12 +811,16 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
|||
struct iwl_fw_error_dump_paging *paging;
|
||||
struct page *pages =
|
||||
mvm->fw_paging_db[i].fw_paging_block;
|
||||
dma_addr_t addr = mvm->fw_paging_db[i].fw_paging_phys;
|
||||
|
||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
|
||||
dump_data->len = cpu_to_le32(sizeof(*paging) +
|
||||
PAGING_BLOCK_SIZE);
|
||||
paging = (void *)dump_data->data;
|
||||
paging->index = cpu_to_le32(i);
|
||||
dma_sync_single_for_cpu(mvm->trans->dev, addr,
|
||||
PAGING_BLOCK_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
memcpy(paging->data, page_address(pages),
|
||||
PAGING_BLOCK_SIZE);
|
||||
dump_data = iwl_fw_error_next_data(dump_data);
|
||||
|
|
|
@ -214,6 +214,10 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
|
|||
memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
|
||||
image->sec[sec_idx].data,
|
||||
mvm->fw_paging_db[0].fw_paging_size);
|
||||
dma_sync_single_for_device(mvm->trans->dev,
|
||||
mvm->fw_paging_db[0].fw_paging_phys,
|
||||
mvm->fw_paging_db[0].fw_paging_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
IWL_DEBUG_FW(mvm,
|
||||
"Paging: copied %d CSS bytes to first block\n",
|
||||
|
@ -228,9 +232,16 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
|
|||
* loop stop at num_of_paging_blk since that last block is not full.
|
||||
*/
|
||||
for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
|
||||
memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
|
||||
struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
|
||||
|
||||
memcpy(page_address(block->fw_paging_block),
|
||||
image->sec[sec_idx].data + offset,
|
||||
mvm->fw_paging_db[idx].fw_paging_size);
|
||||
block->fw_paging_size);
|
||||
dma_sync_single_for_device(mvm->trans->dev,
|
||||
block->fw_paging_phys,
|
||||
block->fw_paging_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
|
||||
IWL_DEBUG_FW(mvm,
|
||||
"Paging: copied %d paging bytes to block %d\n",
|
||||
|
@ -242,9 +253,15 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
|
|||
|
||||
/* copy the last paging block */
|
||||
if (mvm->num_of_pages_in_last_blk > 0) {
|
||||
memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
|
||||
struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
|
||||
|
||||
memcpy(page_address(block->fw_paging_block),
|
||||
image->sec[sec_idx].data + offset,
|
||||
FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
|
||||
dma_sync_single_for_device(mvm->trans->dev,
|
||||
block->fw_paging_phys,
|
||||
block->fw_paging_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
IWL_DEBUG_FW(mvm,
|
||||
"Paging: copied %d pages in the last block %d\n",
|
||||
|
@ -444,81 +461,61 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
|
|||
struct iwl_mvm *mvm =
|
||||
container_of(notif_wait, struct iwl_mvm, notif_wait);
|
||||
struct iwl_mvm_alive_data *alive_data = data;
|
||||
struct mvm_alive_resp_ver1 *palive1;
|
||||
struct mvm_alive_resp_ver2 *palive2;
|
||||
struct mvm_alive_resp_v3 *palive3;
|
||||
struct mvm_alive_resp *palive;
|
||||
struct iwl_umac_alive *umac;
|
||||
struct iwl_lmac_alive *lmac1;
|
||||
struct iwl_lmac_alive *lmac2 = NULL;
|
||||
u16 status;
|
||||
|
||||
if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
|
||||
palive1 = (void *)pkt->data;
|
||||
|
||||
mvm->support_umac_log = false;
|
||||
mvm->error_event_table =
|
||||
le32_to_cpu(palive1->error_event_table_ptr);
|
||||
mvm->log_event_table =
|
||||
le32_to_cpu(palive1->log_event_table_ptr);
|
||||
alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);
|
||||
|
||||
alive_data->valid = le16_to_cpu(palive1->status) ==
|
||||
IWL_ALIVE_STATUS_OK;
|
||||
IWL_DEBUG_FW(mvm,
|
||||
"Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
|
||||
le16_to_cpu(palive1->status), palive1->ver_type,
|
||||
palive1->ver_subtype, palive1->flags);
|
||||
} else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
|
||||
palive2 = (void *)pkt->data;
|
||||
|
||||
mvm->error_event_table =
|
||||
le32_to_cpu(palive2->error_event_table_ptr);
|
||||
mvm->log_event_table =
|
||||
le32_to_cpu(palive2->log_event_table_ptr);
|
||||
alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
|
||||
mvm->umac_error_event_table =
|
||||
le32_to_cpu(palive2->error_info_addr);
|
||||
mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
|
||||
mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);
|
||||
|
||||
alive_data->valid = le16_to_cpu(palive2->status) ==
|
||||
IWL_ALIVE_STATUS_OK;
|
||||
if (mvm->umac_error_event_table)
|
||||
mvm->support_umac_log = true;
|
||||
|
||||
IWL_DEBUG_FW(mvm,
|
||||
"Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
|
||||
le16_to_cpu(palive2->status), palive2->ver_type,
|
||||
palive2->ver_subtype, palive2->flags);
|
||||
|
||||
IWL_DEBUG_FW(mvm,
|
||||
"UMAC version: Major - 0x%x, Minor - 0x%x\n",
|
||||
palive2->umac_major, palive2->umac_minor);
|
||||
} else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
|
||||
if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
|
||||
palive = (void *)pkt->data;
|
||||
|
||||
mvm->error_event_table =
|
||||
le32_to_cpu(palive->error_event_table_ptr);
|
||||
mvm->log_event_table =
|
||||
le32_to_cpu(palive->log_event_table_ptr);
|
||||
alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
|
||||
mvm->umac_error_event_table =
|
||||
le32_to_cpu(palive->error_info_addr);
|
||||
mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
|
||||
mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);
|
||||
|
||||
alive_data->valid = le16_to_cpu(palive->status) ==
|
||||
IWL_ALIVE_STATUS_OK;
|
||||
if (mvm->umac_error_event_table)
|
||||
mvm->support_umac_log = true;
|
||||
|
||||
IWL_DEBUG_FW(mvm,
|
||||
"Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
|
||||
le16_to_cpu(palive->status), palive->ver_type,
|
||||
palive->ver_subtype, palive->flags);
|
||||
|
||||
IWL_DEBUG_FW(mvm,
|
||||
"UMAC version: Major - 0x%x, Minor - 0x%x\n",
|
||||
le32_to_cpu(palive->umac_major),
|
||||
le32_to_cpu(palive->umac_minor));
|
||||
umac = &palive->umac_data;
|
||||
lmac1 = &palive->lmac_data[0];
|
||||
lmac2 = &palive->lmac_data[1];
|
||||
status = le16_to_cpu(palive->status);
|
||||
} else {
|
||||
palive3 = (void *)pkt->data;
|
||||
umac = &palive3->umac_data;
|
||||
lmac1 = &palive3->lmac_data;
|
||||
status = le16_to_cpu(palive3->status);
|
||||
}
|
||||
|
||||
mvm->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr);
|
||||
if (lmac2)
|
||||
mvm->error_event_table[1] =
|
||||
le32_to_cpu(lmac2->error_event_table_ptr);
|
||||
mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr);
|
||||
mvm->sf_space.addr = le32_to_cpu(lmac1->st_fwrd_addr);
|
||||
mvm->sf_space.size = le32_to_cpu(lmac1->st_fwrd_size);
|
||||
|
||||
mvm->umac_error_event_table = le32_to_cpu(umac->error_info_addr);
|
||||
|
||||
alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr);
|
||||
alive_data->valid = status == IWL_ALIVE_STATUS_OK;
|
||||
if (mvm->umac_error_event_table)
|
||||
mvm->support_umac_log = true;
|
||||
|
||||
IWL_DEBUG_FW(mvm,
|
||||
"Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
|
||||
status, lmac1->ver_type, lmac1->ver_subtype);
|
||||
|
||||
if (lmac2)
|
||||
IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
|
||||
|
||||
IWL_DEBUG_FW(mvm,
|
||||
"UMAC version: Major - 0x%x, Minor - 0x%x\n",
|
||||
le32_to_cpu(umac->umac_major),
|
||||
le32_to_cpu(umac->umac_minor));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
|
||||
struct iwl_rx_packet *pkt, void *data)
|
||||
{
|
||||
WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -537,6 +534,48 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
|
|||
return false;
|
||||
}
|
||||
|
||||
static int iwl_mvm_init_paging(struct iwl_mvm *mvm)
|
||||
{
|
||||
const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode];
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Configure and operate fw paging mechanism.
|
||||
* The driver configures the paging flow only once.
|
||||
* The CPU2 paging image is included in the IWL_UCODE_INIT image.
|
||||
*/
|
||||
if (!fw->paging_mem_size)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* When dma is not enabled, the driver needs to copy / write
|
||||
* the downloaded / uploaded page to / from the smem.
|
||||
* This gets the location of the place were the pages are
|
||||
* stored.
|
||||
*/
|
||||
if (!is_device_dma_capable(mvm->trans->dev)) {
|
||||
ret = iwl_trans_get_paging_item(mvm);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "failed to get FW paging item\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = iwl_save_fw_paging(mvm, fw);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "failed to save the FW paging image\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = iwl_send_paging_cmd(mvm, fw);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "failed to send the paging cmd\n");
|
||||
iwl_free_fw_paging(mvm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
||||
enum iwl_ucode_type ucode_type)
|
||||
{
|
||||
|
@ -607,40 +646,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
|||
|
||||
iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
|
||||
|
||||
/*
|
||||
* configure and operate fw paging mechanism.
|
||||
* driver configures the paging flow only once, CPU2 paging image
|
||||
* included in the IWL_UCODE_INIT image.
|
||||
*/
|
||||
if (fw->paging_mem_size) {
|
||||
/*
|
||||
* When dma is not enabled, the driver needs to copy / write
|
||||
* the downloaded / uploaded page to / from the smem.
|
||||
* This gets the location of the place were the pages are
|
||||
* stored.
|
||||
*/
|
||||
if (!is_device_dma_capable(mvm->trans->dev)) {
|
||||
ret = iwl_trans_get_paging_item(mvm);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "failed to get FW paging item\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = iwl_save_fw_paging(mvm, fw);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "failed to save the FW paging image\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = iwl_send_paging_cmd(mvm, fw);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "failed to send the paging cmd\n");
|
||||
iwl_free_fw_paging(mvm);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: all the queues are enabled as part of the interface
|
||||
* initialization, but in firmware restart scenarios they
|
||||
|
@ -798,6 +803,75 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
||||
{
|
||||
struct iwl_notification_wait init_wait;
|
||||
struct iwl_nvm_access_complete_cmd nvm_complete = {};
|
||||
static const u16 init_complete[] = {
|
||||
INIT_COMPLETE_NOTIF,
|
||||
};
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
iwl_init_notification_wait(&mvm->notif_wait,
|
||||
&init_wait,
|
||||
init_complete,
|
||||
ARRAY_SIZE(init_complete),
|
||||
iwl_wait_init_complete,
|
||||
NULL);
|
||||
|
||||
/* Will also start the device */
|
||||
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* TODO: remove when integrating context info */
|
||||
ret = iwl_mvm_init_paging(mvm);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to init paging: %d\n",
|
||||
ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Read the NVM only at driver load time, no need to do this twice */
|
||||
if (read_nvm) {
|
||||
/* Read nvm */
|
||||
ret = iwl_nvm_init(mvm, true);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
/* In case we read the NVM from external file, load it to the NIC */
|
||||
if (mvm->nvm_file_name)
|
||||
iwl_mvm_load_nvm_to_nic(mvm);
|
||||
|
||||
ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
|
||||
if (WARN_ON(ret))
|
||||
goto error;
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
|
||||
NVM_ACCESS_COMPLETE), 0,
|
||||
sizeof(nvm_complete), &nvm_complete);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
|
||||
ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* We wait for the INIT complete notification */
|
||||
return iwl_wait_notification(&mvm->notif_wait, &init_wait,
|
||||
MVM_UCODE_ALIVE_TIMEOUT);
|
||||
|
||||
error:
|
||||
iwl_remove_notification(&mvm->notif_wait, &init_wait);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_packet *pkt)
|
||||
{
|
||||
|
@ -1058,6 +1132,43 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
return iwl_run_unified_mvm_ucode(mvm, false);
|
||||
|
||||
ret = iwl_run_init_mvm_ucode(mvm, false);
|
||||
|
||||
if (iwlmvm_mod_params.init_dbg)
|
||||
return 0;
|
||||
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
|
||||
/* this can't happen */
|
||||
if (WARN_ON(ret > 0))
|
||||
ret = -ERFKILL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stop and start the transport without entering low power
|
||||
* mode. This will save the state of other components on the
|
||||
* device that are triggered by the INIT firwmare (MFUART).
|
||||
*/
|
||||
_iwl_trans_stop_device(mvm->trans, false);
|
||||
ret = _iwl_trans_start_hw(mvm->trans, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return iwl_mvm_init_paging(mvm);
|
||||
}
|
||||
|
||||
int iwl_mvm_up(struct iwl_mvm *mvm)
|
||||
{
|
||||
int ret, i;
|
||||
|
@ -1070,35 +1181,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* If we haven't completed the run of the init ucode during
|
||||
* module loading, load init ucode now
|
||||
* (for example, if we were in RFKILL)
|
||||
*/
|
||||
ret = iwl_run_init_mvm_ucode(mvm, false);
|
||||
|
||||
if (iwlmvm_mod_params.init_dbg)
|
||||
return 0;
|
||||
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
|
||||
/* this can't happen */
|
||||
if (WARN_ON(ret > 0))
|
||||
ret = -ERFKILL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stop and start the transport without entering low power
|
||||
* mode. This will save the state of other components on the
|
||||
* device that are triggered by the INIT firwmare (MFUART).
|
||||
*/
|
||||
_iwl_trans_stop_device(mvm->trans, false);
|
||||
ret = _iwl_trans_start_hw(mvm->trans, false);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
|
||||
ret = iwl_mvm_load_rt_fw(mvm);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
|
||||
goto error;
|
||||
|
@ -1125,13 +1208,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
|||
goto error;
|
||||
|
||||
/* Send phy db control command and then phy db calibration*/
|
||||
ret = iwl_send_phy_db_data(mvm->phy_db);
|
||||
if (ret)
|
||||
goto error;
|
||||
if (!iwl_mvm_has_new_tx_api(mvm)) {
|
||||
ret = iwl_send_phy_db_data(mvm->phy_db);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = iwl_send_phy_cfg_cmd(mvm);
|
||||
if (ret)
|
||||
goto error;
|
||||
ret = iwl_send_phy_cfg_cmd(mvm);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Init RSS configuration */
|
||||
if (iwl_mvm_has_new_rx_api(mvm)) {
|
||||
|
@ -1311,10 +1396,19 @@ void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
|
|||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
|
||||
|
||||
IWL_DEBUG_INFO(mvm,
|
||||
"MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
|
||||
le32_to_cpu(mfuart_notif->installed_ver),
|
||||
le32_to_cpu(mfuart_notif->external_ver),
|
||||
le32_to_cpu(mfuart_notif->status),
|
||||
le32_to_cpu(mfuart_notif->duration));
|
||||
if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
|
||||
IWL_DEBUG_INFO(mvm,
|
||||
"MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x, image size: 0x%08x\n",
|
||||
le32_to_cpu(mfuart_notif->installed_ver),
|
||||
le32_to_cpu(mfuart_notif->external_ver),
|
||||
le32_to_cpu(mfuart_notif->status),
|
||||
le32_to_cpu(mfuart_notif->duration),
|
||||
le32_to_cpu(mfuart_notif->image_size));
|
||||
else
|
||||
IWL_DEBUG_INFO(mvm,
|
||||
"MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
|
||||
le32_to_cpu(mfuart_notif->installed_ver),
|
||||
le32_to_cpu(mfuart_notif->external_ver),
|
||||
le32_to_cpu(mfuart_notif->status),
|
||||
le32_to_cpu(mfuart_notif->duration));
|
||||
}
|
||||
|
|
|
@ -531,38 +531,26 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
/*
|
||||
* If DQA is supported - queues were already disabled, since in
|
||||
* DQA-mode the queues are a property of the STA and not of the
|
||||
* vif, and at this point the STA was already deleted
|
||||
*/
|
||||
if (iwl_mvm_is_dqa_supported(mvm))
|
||||
return;
|
||||
|
||||
switch (vif->type) {
|
||||
case NL80211_IFTYPE_P2P_DEVICE:
|
||||
if (!iwl_mvm_is_dqa_supported(mvm))
|
||||
iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
|
||||
IWL_MVM_OFFCHANNEL_QUEUE,
|
||||
IWL_MAX_TID_COUNT, 0);
|
||||
else
|
||||
iwl_mvm_disable_txq(mvm,
|
||||
IWL_MVM_DQA_P2P_DEVICE_QUEUE,
|
||||
vif->hw_queue[0], IWL_MAX_TID_COUNT,
|
||||
0);
|
||||
iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
|
||||
IWL_MVM_OFFCHANNEL_QUEUE,
|
||||
IWL_MAX_TID_COUNT, 0);
|
||||
|
||||
break;
|
||||
case NL80211_IFTYPE_AP:
|
||||
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
|
||||
IWL_MAX_TID_COUNT, 0);
|
||||
|
||||
if (iwl_mvm_is_dqa_supported(mvm))
|
||||
iwl_mvm_disable_txq(mvm,
|
||||
IWL_MVM_DQA_AP_PROBE_RESP_QUEUE,
|
||||
vif->hw_queue[0], IWL_MAX_TID_COUNT,
|
||||
0);
|
||||
/* fall through */
|
||||
default:
|
||||
/*
|
||||
* If DQA is supported - queues were already disabled, since in
|
||||
* DQA-mode the queues are a property of the STA and not of the
|
||||
* vif, and at this point the STA was already deleted
|
||||
*/
|
||||
if (iwl_mvm_is_dqa_supported(mvm))
|
||||
break;
|
||||
|
||||
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
|
||||
iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
|
||||
vif->hw_queue[ac],
|
||||
|
@ -991,7 +979,7 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
|
|||
}
|
||||
|
||||
static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
|
||||
struct iwl_mac_beacon_cmd_v6 *beacon_cmd,
|
||||
__le32 *tim_index, __le32 *tim_size,
|
||||
u8 *beacon, u32 frame_size)
|
||||
{
|
||||
u32 tim_idx;
|
||||
|
@ -1008,8 +996,8 @@ static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
|
|||
|
||||
/* If TIM field was found, set variables */
|
||||
if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
|
||||
beacon_cmd->tim_idx = cpu_to_le32(tim_idx);
|
||||
beacon_cmd->tim_size = cpu_to_le32((u32)beacon[tim_idx+1]);
|
||||
*tim_index = cpu_to_le32(tim_idx);
|
||||
*tim_size = cpu_to_le32((u32)beacon[tim_idx + 1]);
|
||||
} else {
|
||||
IWL_WARN(mvm, "Unable to find TIM Element in beacon\n");
|
||||
}
|
||||
|
@ -1043,8 +1031,9 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
|
|||
};
|
||||
union {
|
||||
struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6;
|
||||
struct iwl_mac_beacon_cmd beacon_cmd;
|
||||
struct iwl_mac_beacon_cmd_v7 beacon_cmd;
|
||||
} u = {};
|
||||
struct iwl_mac_beacon_cmd beacon_cmd;
|
||||
struct ieee80211_tx_info *info;
|
||||
u32 beacon_skb_len;
|
||||
u32 rate, tx_flags;
|
||||
|
@ -1054,6 +1043,46 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
|
|||
|
||||
beacon_skb_len = beacon->len;
|
||||
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) {
|
||||
u32 csa_offset, ecsa_offset;
|
||||
|
||||
csa_offset = iwl_mvm_find_ie_offset(beacon->data,
|
||||
WLAN_EID_CHANNEL_SWITCH,
|
||||
beacon_skb_len);
|
||||
ecsa_offset =
|
||||
iwl_mvm_find_ie_offset(beacon->data,
|
||||
WLAN_EID_EXT_CHANSWITCH_ANN,
|
||||
beacon_skb_len);
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
beacon_cmd.data.template_id =
|
||||
cpu_to_le32((u32)mvmvif->id);
|
||||
beacon_cmd.data.ecsa_offset = cpu_to_le32(ecsa_offset);
|
||||
beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset);
|
||||
beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon_skb_len);
|
||||
if (vif->type == NL80211_IFTYPE_AP)
|
||||
iwl_mvm_mac_ctxt_set_tim(mvm,
|
||||
&beacon_cmd.data.tim_idx,
|
||||
&beacon_cmd.data.tim_size,
|
||||
beacon->data,
|
||||
beacon_skb_len);
|
||||
cmd.len[0] = sizeof(beacon_cmd);
|
||||
cmd.data[0] = &beacon_cmd;
|
||||
goto send;
|
||||
|
||||
} else {
|
||||
u.beacon_cmd.data.ecsa_offset =
|
||||
cpu_to_le32(ecsa_offset);
|
||||
u.beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset);
|
||||
cmd.len[0] = sizeof(u.beacon_cmd);
|
||||
cmd.data[0] = &u;
|
||||
}
|
||||
} else {
|
||||
cmd.len[0] = sizeof(u.beacon_cmd_v6);
|
||||
cmd.data[0] = &u;
|
||||
}
|
||||
|
||||
/* TODO: for now the beacon template id is set to be the mac context id.
|
||||
* Might be better to handle it as another resource ... */
|
||||
u.beacon_cmd_v6.template_id = cpu_to_le32((u32)mvmvif->id);
|
||||
|
@ -1092,29 +1121,13 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
|
|||
|
||||
/* Set up TX beacon command fields */
|
||||
if (vif->type == NL80211_IFTYPE_AP)
|
||||
iwl_mvm_mac_ctxt_set_tim(mvm, &u.beacon_cmd_v6,
|
||||
iwl_mvm_mac_ctxt_set_tim(mvm, &u.beacon_cmd_v6.tim_idx,
|
||||
&u.beacon_cmd_v6.tim_size,
|
||||
beacon->data,
|
||||
beacon_skb_len);
|
||||
|
||||
send:
|
||||
/* Submit command */
|
||||
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) {
|
||||
u.beacon_cmd.csa_offset =
|
||||
cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
|
||||
WLAN_EID_CHANNEL_SWITCH,
|
||||
beacon_skb_len));
|
||||
u.beacon_cmd.ecsa_offset =
|
||||
cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
|
||||
WLAN_EID_EXT_CHANSWITCH_ANN,
|
||||
beacon_skb_len));
|
||||
|
||||
cmd.len[0] = sizeof(u.beacon_cmd);
|
||||
} else {
|
||||
cmd.len[0] = sizeof(u.beacon_cmd_v6);
|
||||
}
|
||||
|
||||
cmd.data[0] = &u;
|
||||
cmd.dataflags[0] = 0;
|
||||
cmd.len[1] = beacon_skb_len;
|
||||
cmd.data[1] = beacon->data;
|
||||
|
|
|
@ -1210,8 +1210,6 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
|
|||
/* the fw is stopped, the aux sta is dead: clean up driver state */
|
||||
iwl_mvm_del_aux_sta(mvm);
|
||||
|
||||
iwl_free_fw_paging(mvm);
|
||||
|
||||
/*
|
||||
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
|
||||
* won't be called in this case).
|
||||
|
@ -2106,22 +2104,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
|
|||
if (ret)
|
||||
goto out_unbind;
|
||||
|
||||
/* enable the multicast queue, now that we have a station for it */
|
||||
if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
unsigned int wdg_timeout =
|
||||
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
|
||||
struct iwl_trans_txq_scd_cfg cfg = {
|
||||
.fifo = IWL_MVM_TX_FIFO_MCAST,
|
||||
.sta_id = mvmvif->bcast_sta.sta_id,
|
||||
.tid = IWL_MAX_TID_COUNT,
|
||||
.aggregate = false,
|
||||
.frame_limit = IWL_FRAME_LIMIT,
|
||||
};
|
||||
|
||||
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
|
||||
&cfg, wdg_timeout);
|
||||
}
|
||||
|
||||
/* must be set before quota calculations */
|
||||
mvmvif->ap_ibss_active = true;
|
||||
|
||||
|
@ -2554,6 +2536,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
|
|||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
|
||||
int ret;
|
||||
|
||||
IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
|
||||
|
@ -2582,8 +2565,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
|
|||
if (old_state == IEEE80211_STA_NONE &&
|
||||
new_state == IEEE80211_STA_NOTEXIST &&
|
||||
iwl_mvm_is_dqa_supported(mvm)) {
|
||||
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
|
||||
|
||||
iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
|
||||
flush_work(&mvm->add_stream_wk);
|
||||
|
||||
|
@ -2594,6 +2575,9 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
|
|||
}
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
/* track whether or not the station is associated */
|
||||
mvm_sta->associated = new_state >= IEEE80211_STA_ASSOC;
|
||||
|
||||
if (old_state == IEEE80211_STA_NOTEXIST &&
|
||||
new_state == IEEE80211_STA_NONE) {
|
||||
/*
|
||||
|
|
|
@ -739,8 +739,9 @@ struct iwl_mvm {
|
|||
|
||||
enum iwl_ucode_type cur_ucode;
|
||||
bool ucode_loaded;
|
||||
bool hw_registered;
|
||||
bool calibrating;
|
||||
u32 error_event_table;
|
||||
u32 error_event_table[2];
|
||||
u32 log_event_table;
|
||||
u32 umac_error_event_table;
|
||||
bool support_umac_log;
|
||||
|
@ -1217,6 +1218,19 @@ static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
|
|||
return mvm->trans->cfg->use_tfh;
|
||||
}
|
||||
|
||||
static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
|
||||
{
|
||||
/*
|
||||
* TODO:
|
||||
* The issue of how to determine CDB support is still not well defined.
|
||||
* It may be that it will be for all next HW devices and it may be per
|
||||
* FW compilation and it may also differ between different devices.
|
||||
* For now take a ride on the new TX API and get back to it when
|
||||
* it is well defined.
|
||||
*/
|
||||
return iwl_mvm_has_new_tx_api(mvm);
|
||||
}
|
||||
|
||||
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
|
||||
{
|
||||
#ifdef CONFIG_THERMAL
|
||||
|
@ -1257,6 +1271,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
|
|||
******************/
|
||||
/* uCode */
|
||||
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
|
||||
int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
|
||||
|
||||
/* Utils */
|
||||
int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
|
||||
|
@ -1686,6 +1701,7 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
|||
|
||||
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
|
||||
{
|
||||
iwl_free_fw_paging(mvm);
|
||||
mvm->ucode_loaded = false;
|
||||
iwl_trans_stop_device(mvm->trans);
|
||||
}
|
||||
|
|
|
@ -466,6 +466,13 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
|
|||
HCMD_NAME(STORED_BEACON_NTF),
|
||||
};
|
||||
|
||||
/* Please keep this array *SORTED* by hex value.
|
||||
* Access is done through binary search
|
||||
*/
|
||||
static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
|
||||
HCMD_NAME(NVM_ACCESS_COMPLETE),
|
||||
};
|
||||
|
||||
static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
|
||||
[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
|
||||
[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
|
||||
|
@ -474,6 +481,8 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
|
|||
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
|
||||
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
|
||||
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
|
||||
[REGULATORY_AND_NVM_GROUP] =
|
||||
HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
|
||||
};
|
||||
|
||||
/* this forward declaration can avoid to export the function */
|
||||
|
@ -597,7 +606,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
|
||||
}
|
||||
mvm->sf_state = SF_UNINIT;
|
||||
mvm->cur_ucode = IWL_UCODE_INIT;
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
mvm->cur_ucode = IWL_UCODE_REGULAR;
|
||||
else
|
||||
mvm->cur_ucode = IWL_UCODE_INIT;
|
||||
mvm->drop_bcn_ap_mode = true;
|
||||
|
||||
mutex_init(&mvm->mutex);
|
||||
|
@ -720,7 +732,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
|
||||
mutex_lock(&mvm->mutex);
|
||||
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
|
||||
err = iwl_run_init_mvm_ucode(mvm, true);
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
err = iwl_run_unified_mvm_ucode(mvm, true);
|
||||
else
|
||||
err = iwl_run_init_mvm_ucode(mvm, true);
|
||||
if (!err || !iwlmvm_mod_params.init_dbg)
|
||||
iwl_mvm_stop_device(mvm);
|
||||
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
|
||||
|
@ -743,6 +758,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
err = iwl_mvm_mac_setup_register(mvm);
|
||||
if (err)
|
||||
goto out_free;
|
||||
mvm->hw_registered = true;
|
||||
|
||||
min_backoff = calc_min_backoff(trans, cfg);
|
||||
iwl_mvm_thermal_initialize(mvm, min_backoff);
|
||||
|
@ -764,6 +780,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
|
||||
out_unregister:
|
||||
ieee80211_unregister_hw(mvm->hw);
|
||||
mvm->hw_registered = false;
|
||||
iwl_mvm_leds_exit(mvm);
|
||||
iwl_mvm_thermal_exit(mvm);
|
||||
out_free:
|
||||
|
@ -1192,7 +1209,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
|
|||
reprobe->dev = mvm->trans->dev;
|
||||
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
|
||||
schedule_work(&reprobe->work);
|
||||
} else if (mvm->cur_ucode == IWL_UCODE_REGULAR) {
|
||||
} else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
|
||||
mvm->hw_registered) {
|
||||
/* don't let the transport/FW power down */
|
||||
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
|
||||
|
||||
|
|
|
@ -174,6 +174,14 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
|
|||
enum ieee80211_ac_numbers ac;
|
||||
bool tid_found = false;
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
/* set advanced pm flag with no uapsd ACs to enable ps-poll */
|
||||
if (mvmvif->dbgfs_pm.use_ps_poll) {
|
||||
cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
|
||||
if (!mvmvif->queue_params[ac].uapsd)
|
||||
continue;
|
||||
|
@ -204,16 +212,6 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
|
|||
}
|
||||
}
|
||||
|
||||
if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
/* set advanced pm flag with no uapsd ACs to enable ps-poll */
|
||||
if (mvmvif->dbgfs_pm.use_ps_poll)
|
||||
cmd->flags |=
|
||||
cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
|
||||
|
||||
if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
|
||||
|
@ -601,9 +599,8 @@ static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac,
|
|||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
bool *disable_ps = _data;
|
||||
|
||||
if (mvmvif->phy_ctxt)
|
||||
if (mvmvif->phy_ctxt->id < MAX_PHYS)
|
||||
*disable_ps |= mvmvif->ps_disabled;
|
||||
if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX)
|
||||
*disable_ps |= mvmvif->ps_disabled;
|
||||
}
|
||||
|
||||
static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
|
||||
|
@ -611,6 +608,7 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
|
|||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_power_vifs *power_iterator = _data;
|
||||
bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX;
|
||||
|
||||
switch (ieee80211_vif_type_p2p(vif)) {
|
||||
case NL80211_IFTYPE_P2P_DEVICE:
|
||||
|
@ -621,34 +619,30 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
|
|||
/* only a single MAC of the same type */
|
||||
WARN_ON(power_iterator->ap_vif);
|
||||
power_iterator->ap_vif = vif;
|
||||
if (mvmvif->phy_ctxt)
|
||||
if (mvmvif->phy_ctxt->id < MAX_PHYS)
|
||||
power_iterator->ap_active = true;
|
||||
if (active)
|
||||
power_iterator->ap_active = true;
|
||||
break;
|
||||
|
||||
case NL80211_IFTYPE_MONITOR:
|
||||
/* only a single MAC of the same type */
|
||||
WARN_ON(power_iterator->monitor_vif);
|
||||
power_iterator->monitor_vif = vif;
|
||||
if (mvmvif->phy_ctxt)
|
||||
if (mvmvif->phy_ctxt->id < MAX_PHYS)
|
||||
power_iterator->monitor_active = true;
|
||||
if (active)
|
||||
power_iterator->monitor_active = true;
|
||||
break;
|
||||
|
||||
case NL80211_IFTYPE_P2P_CLIENT:
|
||||
/* only a single MAC of the same type */
|
||||
WARN_ON(power_iterator->p2p_vif);
|
||||
power_iterator->p2p_vif = vif;
|
||||
if (mvmvif->phy_ctxt)
|
||||
if (mvmvif->phy_ctxt->id < MAX_PHYS)
|
||||
power_iterator->p2p_active = true;
|
||||
if (active)
|
||||
power_iterator->p2p_active = true;
|
||||
break;
|
||||
|
||||
case NL80211_IFTYPE_STATION:
|
||||
power_iterator->bss_vif = vif;
|
||||
if (mvmvif->phy_ctxt)
|
||||
if (mvmvif->phy_ctxt->id < MAX_PHYS)
|
||||
power_iterator->bss_active = true;
|
||||
if (active)
|
||||
power_iterator->bss_active = true;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -497,8 +497,7 @@ struct iwl_mvm_stat_data {
|
|||
struct iwl_mvm *mvm;
|
||||
__le32 mac_id;
|
||||
u8 beacon_filter_average_energy;
|
||||
struct mvm_statistics_general_v8 *general;
|
||||
struct mvm_statistics_load *load;
|
||||
void *general;
|
||||
};
|
||||
|
||||
static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
|
||||
|
@ -518,10 +517,26 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
|
|||
* the notification directly.
|
||||
*/
|
||||
if (data->general) {
|
||||
mvmvif->beacon_stats.num_beacons =
|
||||
le32_to_cpu(data->general->beacon_counter[mvmvif->id]);
|
||||
mvmvif->beacon_stats.avg_signal =
|
||||
-data->general->beacon_average_energy[mvmvif->id];
|
||||
u16 vif_id = mvmvif->id;
|
||||
|
||||
if (iwl_mvm_is_cdb_supported(mvm)) {
|
||||
struct mvm_statistics_general_cdb *general =
|
||||
data->general;
|
||||
|
||||
mvmvif->beacon_stats.num_beacons =
|
||||
le32_to_cpu(general->beacon_counter[vif_id]);
|
||||
mvmvif->beacon_stats.avg_signal =
|
||||
-general->beacon_average_energy[vif_id];
|
||||
} else {
|
||||
struct mvm_statistics_general_v8 *general =
|
||||
data->general;
|
||||
|
||||
mvmvif->beacon_stats.num_beacons =
|
||||
le32_to_cpu(general->beacon_counter[vif_id]);
|
||||
mvmvif->beacon_stats.avg_signal =
|
||||
-general->beacon_average_energy[vif_id];
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (mvmvif->id != id)
|
||||
|
@ -615,46 +630,65 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
|
|||
void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_packet *pkt)
|
||||
{
|
||||
struct iwl_notif_statistics_v11 *stats = (void *)&pkt->data;
|
||||
struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data;
|
||||
struct iwl_mvm_stat_data data = {
|
||||
.mvm = mvm,
|
||||
};
|
||||
int expected_size = iwl_mvm_has_new_rx_api(mvm) ? sizeof(*stats) :
|
||||
sizeof(struct iwl_notif_statistics_v10);
|
||||
int expected_size;
|
||||
|
||||
if (iwl_mvm_is_cdb_supported(mvm))
|
||||
expected_size = sizeof(*stats);
|
||||
else if (iwl_mvm_has_new_rx_api(mvm))
|
||||
expected_size = sizeof(struct iwl_notif_statistics_v11);
|
||||
else
|
||||
expected_size = sizeof(struct iwl_notif_statistics_v10);
|
||||
|
||||
if (iwl_rx_packet_payload_len(pkt) != expected_size)
|
||||
goto invalid;
|
||||
|
||||
data.mac_id = stats->rx.general.mac_id;
|
||||
data.beacon_filter_average_energy =
|
||||
stats->general.beacon_filter_average_energy;
|
||||
stats->general.common.beacon_filter_average_energy;
|
||||
|
||||
iwl_mvm_update_rx_statistics(mvm, &stats->rx);
|
||||
|
||||
mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time);
|
||||
mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time);
|
||||
mvm->radio_stats.rx_time = le64_to_cpu(stats->general.common.rx_time);
|
||||
mvm->radio_stats.tx_time = le64_to_cpu(stats->general.common.tx_time);
|
||||
mvm->radio_stats.on_time_rf =
|
||||
le64_to_cpu(stats->general.on_time_rf);
|
||||
le64_to_cpu(stats->general.common.on_time_rf);
|
||||
mvm->radio_stats.on_time_scan =
|
||||
le64_to_cpu(stats->general.on_time_scan);
|
||||
le64_to_cpu(stats->general.common.on_time_scan);
|
||||
|
||||
data.general = &stats->general;
|
||||
if (iwl_mvm_has_new_rx_api(mvm)) {
|
||||
int i;
|
||||
u8 *energy;
|
||||
__le32 *bytes, *air_time;
|
||||
|
||||
data.load = &stats->load_stats;
|
||||
if (!iwl_mvm_is_cdb_supported(mvm)) {
|
||||
struct iwl_notif_statistics_v11 *v11 =
|
||||
(void *)&pkt->data;
|
||||
|
||||
energy = (void *)&v11->load_stats.avg_energy;
|
||||
bytes = (void *)&v11->load_stats.byte_count;
|
||||
air_time = (void *)&v11->load_stats.air_time;
|
||||
} else {
|
||||
energy = (void *)&stats->load_stats.avg_energy;
|
||||
bytes = (void *)&stats->load_stats.byte_count;
|
||||
air_time = (void *)&stats->load_stats.air_time;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
|
||||
struct iwl_mvm_sta *sta;
|
||||
|
||||
if (!data.load->avg_energy[i])
|
||||
if (!energy[i])
|
||||
continue;
|
||||
|
||||
sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
|
||||
if (!sta)
|
||||
continue;
|
||||
sta->avg_energy = data.load->avg_energy[i];
|
||||
sta->avg_energy = energy[i];
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
|
|
@ -149,8 +149,17 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
|
|||
unsigned int headlen, fraglen, pad_len = 0;
|
||||
unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
||||
|
||||
if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD)
|
||||
if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
|
||||
pad_len = 2;
|
||||
|
||||
/*
|
||||
* If the device inserted padding it means that (it thought)
|
||||
* the 802.11 header wasn't a multiple of 4 bytes long. In
|
||||
* this case, reserve two bytes at the start of the SKB to
|
||||
* align the payload properly in case we end up copying it.
|
||||
*/
|
||||
skb_reserve(skb, pad_len);
|
||||
}
|
||||
len -= pad_len;
|
||||
|
||||
/* If frame is small enough to fit in skb->head, pull it completely.
|
||||
|
|
|
@ -197,7 +197,7 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
|
|||
int *global_cnt = data;
|
||||
|
||||
if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
|
||||
mvmvif->phy_ctxt->id < MAX_PHYS)
|
||||
mvmvif->phy_ctxt->id < NUM_PHY_CTX)
|
||||
*global_cnt += 1;
|
||||
}
|
||||
|
||||
|
@ -943,18 +943,92 @@ static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
|
|||
return cpu_to_le32(rates);
|
||||
}
|
||||
|
||||
static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm,
|
||||
struct iwl_scan_dwell *dwell,
|
||||
struct iwl_mvm_scan_timing_params *timing)
|
||||
{
|
||||
dwell->active = timing->dwell_active;
|
||||
dwell->passive = timing->dwell_passive;
|
||||
dwell->fragmented = timing->dwell_fragmented;
|
||||
dwell->extended = timing->dwell_extended;
|
||||
}
|
||||
|
||||
static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
|
||||
{
|
||||
struct ieee80211_supported_band *band;
|
||||
int i, j = 0;
|
||||
|
||||
band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
|
||||
for (i = 0; i < band->n_channels; i++, j++)
|
||||
channels[j] = band->channels[i].hw_value;
|
||||
band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
|
||||
for (i = 0; i < band->n_channels; i++, j++)
|
||||
channels[j] = band->channels[i].hw_value;
|
||||
}
|
||||
|
||||
static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
|
||||
u32 flags, u8 channel_flags)
|
||||
{
|
||||
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
|
||||
struct iwl_scan_config *cfg = config;
|
||||
|
||||
cfg->flags = cpu_to_le32(flags);
|
||||
cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
|
||||
cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
|
||||
cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
|
||||
cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time);
|
||||
cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
|
||||
|
||||
iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]);
|
||||
|
||||
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
|
||||
|
||||
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
|
||||
cfg->channel_flags = channel_flags;
|
||||
|
||||
iwl_mvm_fill_channels(mvm, cfg->channel_array);
|
||||
}
|
||||
|
||||
static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config,
|
||||
u32 flags, u8 channel_flags)
|
||||
{
|
||||
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
|
||||
struct iwl_scan_config_cdb *cfg = config;
|
||||
|
||||
cfg->flags = cpu_to_le32(flags);
|
||||
cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
|
||||
cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
|
||||
cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
|
||||
cfg->out_of_channel_time[0] =
|
||||
cpu_to_le32(scan_timing[type].max_out_time);
|
||||
cfg->out_of_channel_time[1] =
|
||||
cpu_to_le32(scan_timing[type].max_out_time);
|
||||
cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time);
|
||||
cfg->suspend_time[1] = cpu_to_le32(scan_timing[type].suspend_time);
|
||||
|
||||
iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]);
|
||||
|
||||
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
|
||||
|
||||
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
|
||||
cfg->channel_flags = channel_flags;
|
||||
|
||||
iwl_mvm_fill_channels(mvm, cfg->channel_array);
|
||||
}
|
||||
|
||||
int iwl_mvm_config_scan(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_scan_config *scan_config;
|
||||
struct ieee80211_supported_band *band;
|
||||
int num_channels =
|
||||
mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
|
||||
mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
|
||||
int ret, i, j = 0, cmd_size;
|
||||
void *cfg;
|
||||
int ret, cmd_size;
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
|
||||
};
|
||||
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
|
||||
int num_channels =
|
||||
mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
|
||||
mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
|
||||
u32 flags;
|
||||
u8 channel_flags;
|
||||
|
||||
if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
|
||||
return -ENOBUFS;
|
||||
|
@ -965,52 +1039,45 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels;
|
||||
if (iwl_mvm_is_cdb_supported(mvm))
|
||||
cmd_size = sizeof(struct iwl_scan_config_cdb);
|
||||
else
|
||||
cmd_size = sizeof(struct iwl_scan_config);
|
||||
cmd_size += mvm->fw->ucode_capa.n_scan_channels;
|
||||
|
||||
scan_config = kzalloc(cmd_size, GFP_KERNEL);
|
||||
if (!scan_config)
|
||||
cfg = kzalloc(cmd_size, GFP_KERNEL);
|
||||
if (!cfg)
|
||||
return -ENOMEM;
|
||||
|
||||
scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
|
||||
SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
|
||||
SCAN_CONFIG_FLAG_SET_TX_CHAINS |
|
||||
SCAN_CONFIG_FLAG_SET_RX_CHAINS |
|
||||
SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
|
||||
SCAN_CONFIG_FLAG_SET_ALL_TIMES |
|
||||
SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
|
||||
SCAN_CONFIG_FLAG_SET_MAC_ADDR |
|
||||
SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
|
||||
SCAN_CONFIG_N_CHANNELS(num_channels) |
|
||||
(type == IWL_SCAN_TYPE_FRAGMENTED ?
|
||||
SCAN_CONFIG_FLAG_SET_FRAGMENTED :
|
||||
SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED));
|
||||
scan_config->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
|
||||
scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
|
||||
scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm);
|
||||
scan_config->out_of_channel_time =
|
||||
cpu_to_le32(scan_timing[type].max_out_time);
|
||||
scan_config->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
|
||||
scan_config->dwell_active = scan_timing[type].dwell_active;
|
||||
scan_config->dwell_passive = scan_timing[type].dwell_passive;
|
||||
scan_config->dwell_fragmented = scan_timing[type].dwell_fragmented;
|
||||
scan_config->dwell_extended = scan_timing[type].dwell_extended;
|
||||
flags = SCAN_CONFIG_FLAG_ACTIVATE |
|
||||
SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
|
||||
SCAN_CONFIG_FLAG_SET_TX_CHAINS |
|
||||
SCAN_CONFIG_FLAG_SET_RX_CHAINS |
|
||||
SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
|
||||
SCAN_CONFIG_FLAG_SET_ALL_TIMES |
|
||||
SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
|
||||
SCAN_CONFIG_FLAG_SET_MAC_ADDR |
|
||||
SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
|
||||
SCAN_CONFIG_N_CHANNELS(num_channels) |
|
||||
(type == IWL_SCAN_TYPE_FRAGMENTED ?
|
||||
SCAN_CONFIG_FLAG_SET_FRAGMENTED :
|
||||
SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
|
||||
|
||||
memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
|
||||
channel_flags = IWL_CHANNEL_FLAG_EBS |
|
||||
IWL_CHANNEL_FLAG_ACCURATE_EBS |
|
||||
IWL_CHANNEL_FLAG_EBS_ADD |
|
||||
IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
|
||||
|
||||
scan_config->bcast_sta_id = mvm->aux_sta.sta_id;
|
||||
scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS |
|
||||
IWL_CHANNEL_FLAG_ACCURATE_EBS |
|
||||
IWL_CHANNEL_FLAG_EBS_ADD |
|
||||
IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
|
||||
if (iwl_mvm_is_cdb_supported(mvm)) {
|
||||
flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ?
|
||||
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
|
||||
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
|
||||
iwl_mvm_fill_scan_config_cdb(mvm, cfg, flags, channel_flags);
|
||||
} else {
|
||||
iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
|
||||
}
|
||||
|
||||
band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
|
||||
for (i = 0; i < band->n_channels; i++, j++)
|
||||
scan_config->channel_array[j] = band->channels[i].hw_value;
|
||||
band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
|
||||
for (i = 0; i < band->n_channels; i++, j++)
|
||||
scan_config->channel_array[j] = band->channels[i].hw_value;
|
||||
|
||||
cmd.data[0] = scan_config;
|
||||
cmd.data[0] = cfg;
|
||||
cmd.len[0] = cmd_size;
|
||||
cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
|
||||
|
||||
|
@ -1020,7 +1087,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
|
|||
if (!ret)
|
||||
mvm->scan_type = type;
|
||||
|
||||
kfree(scan_config);
|
||||
kfree(cfg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1039,19 +1106,31 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
|
|||
struct iwl_scan_req_umac *cmd,
|
||||
struct iwl_mvm_scan_params *params)
|
||||
{
|
||||
struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type];
|
||||
|
||||
if (params->measurement_dwell) {
|
||||
cmd->active_dwell = params->measurement_dwell;
|
||||
cmd->passive_dwell = params->measurement_dwell;
|
||||
cmd->extended_dwell = params->measurement_dwell;
|
||||
} else {
|
||||
cmd->active_dwell = scan_timing[params->type].dwell_active;
|
||||
cmd->passive_dwell = scan_timing[params->type].dwell_passive;
|
||||
cmd->extended_dwell = scan_timing[params->type].dwell_extended;
|
||||
cmd->active_dwell = timing->dwell_active;
|
||||
cmd->passive_dwell = timing->dwell_passive;
|
||||
cmd->extended_dwell = timing->dwell_extended;
|
||||
}
|
||||
cmd->fragmented_dwell = timing->dwell_fragmented;
|
||||
|
||||
if (iwl_mvm_is_cdb_supported(mvm)) {
|
||||
cmd->cdb.max_out_time[0] = cpu_to_le32(timing->max_out_time);
|
||||
cmd->cdb.suspend_time[0] = cpu_to_le32(timing->suspend_time);
|
||||
cmd->cdb.max_out_time[1] = cpu_to_le32(timing->max_out_time);
|
||||
cmd->cdb.suspend_time[1] = cpu_to_le32(timing->suspend_time);
|
||||
cmd->cdb.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
|
||||
} else {
|
||||
cmd->no_cdb.max_out_time = cpu_to_le32(timing->max_out_time);
|
||||
cmd->no_cdb.suspend_time = cpu_to_le32(timing->suspend_time);
|
||||
cmd->no_cdb.scan_priority =
|
||||
cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
|
||||
}
|
||||
cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
|
||||
cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
|
||||
cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
|
||||
cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
|
||||
|
||||
if (iwl_mvm_is_regular_scan(params))
|
||||
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
|
||||
|
@ -1063,9 +1142,8 @@ static void
|
|||
iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
|
||||
struct ieee80211_channel **channels,
|
||||
int n_channels, u32 ssid_bitmap,
|
||||
struct iwl_scan_req_umac *cmd)
|
||||
struct iwl_scan_channel_cfg_umac *channel_cfg)
|
||||
{
|
||||
struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < n_channels; i++) {
|
||||
|
@ -1088,8 +1166,11 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
|
|||
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
|
||||
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
|
||||
|
||||
if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
|
||||
if (params->type == IWL_SCAN_TYPE_FRAGMENTED) {
|
||||
flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
|
||||
if (iwl_mvm_is_cdb_supported(mvm))
|
||||
flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
|
||||
}
|
||||
|
||||
if (iwl_mvm_rrm_scan_needed(mvm))
|
||||
flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
|
||||
|
@ -1126,11 +1207,14 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
int type)
|
||||
{
|
||||
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
|
||||
struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
|
||||
void *cmd_data = iwl_mvm_is_cdb_supported(mvm) ?
|
||||
(void *)&cmd->cdb.data : (void *)&cmd->no_cdb.data;
|
||||
struct iwl_scan_req_umac_tail *sec_part = cmd_data +
|
||||
sizeof(struct iwl_scan_channel_cfg_umac) *
|
||||
mvm->fw->ucode_capa.n_scan_channels;
|
||||
int uid, i;
|
||||
u32 ssid_bitmap = 0;
|
||||
u8 channel_flags = 0;
|
||||
struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
@ -1157,16 +1241,23 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
|
||||
|
||||
if (iwl_mvm_scan_use_ebs(mvm, vif))
|
||||
cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
|
||||
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
|
||||
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
|
||||
channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
|
||||
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
|
||||
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
|
||||
|
||||
cmd->n_channels = params->n_channels;
|
||||
if (iwl_mvm_is_cdb_supported(mvm)) {
|
||||
cmd->cdb.channel_flags = channel_flags;
|
||||
cmd->cdb.n_channels = params->n_channels;
|
||||
} else {
|
||||
cmd->no_cdb.channel_flags = channel_flags;
|
||||
cmd->no_cdb.n_channels = params->n_channels;
|
||||
}
|
||||
|
||||
iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
|
||||
|
||||
iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
|
||||
params->n_channels, ssid_bitmap, cmd);
|
||||
params->n_channels, ssid_bitmap,
|
||||
cmd_data);
|
||||
|
||||
for (i = 0; i < params->n_scan_plans; i++) {
|
||||
struct cfg80211_sched_scan_plan *scan_plan =
|
||||
|
@ -1601,8 +1692,13 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
|
|||
|
||||
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
|
||||
{
|
||||
int base_size = IWL_SCAN_REQ_UMAC_SIZE;
|
||||
|
||||
if (iwl_mvm_is_cdb_supported(mvm))
|
||||
base_size = IWL_SCAN_REQ_UMAC_SIZE_CDB;
|
||||
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
|
||||
return sizeof(struct iwl_scan_req_umac) +
|
||||
return base_size +
|
||||
sizeof(struct iwl_scan_channel_cfg_umac) *
|
||||
mvm->fw->ucode_capa.n_scan_channels +
|
||||
sizeof(struct iwl_scan_req_umac_tail);
|
||||
|
|
|
@ -202,7 +202,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
|
||||
add_sta_cmd.station_flags |=
|
||||
cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
|
||||
add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
|
||||
if (mvm_sta->associated)
|
||||
add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
|
||||
|
||||
if (sta->wme) {
|
||||
add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
|
||||
|
@ -457,6 +458,52 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
|
|||
return disable_agg_tids;
|
||||
}
|
||||
|
||||
static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
|
||||
bool same_sta)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
u8 txq_curr_ac, sta_id, tid;
|
||||
unsigned long disable_agg_tids = 0;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
tid = mvm->queue_info[queue].txq_tid;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
|
||||
|
||||
disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
|
||||
/* Disable the queue */
|
||||
if (disable_agg_tids)
|
||||
iwl_mvm_invalidate_sta_queue(mvm, queue,
|
||||
disable_agg_tids, false);
|
||||
|
||||
ret = iwl_mvm_disable_txq(mvm, queue,
|
||||
mvmsta->vif->hw_queue[txq_curr_ac],
|
||||
tid, 0);
|
||||
if (ret) {
|
||||
/* Re-mark the inactive queue as inactive */
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
IWL_ERR(mvm,
|
||||
"Failed to free inactive queue %d (ret=%d)\n",
|
||||
queue, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* If TXQ is allocated to another STA, update removal in FW */
|
||||
if (!same_sta)
|
||||
iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
|
||||
unsigned long tfd_queue_mask, u8 ac)
|
||||
{
|
||||
|
@ -645,7 +692,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|||
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
|
||||
u8 mac_queue = mvmsta->vif->hw_queue[ac];
|
||||
int queue = -1;
|
||||
bool using_inactive_queue = false;
|
||||
bool using_inactive_queue = false, same_sta = false;
|
||||
unsigned long disable_agg_tids = 0;
|
||||
enum iwl_mvm_agg_state queue_state;
|
||||
bool shared_queue = false;
|
||||
|
@ -702,6 +749,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|||
mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
|
||||
using_inactive_queue = true;
|
||||
same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
|
||||
queue, mvmsta->sta_id, tid);
|
||||
|
@ -748,38 +796,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|||
* first
|
||||
*/
|
||||
if (using_inactive_queue) {
|
||||
u8 txq_curr_ac, sta_id;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
|
||||
/* Disable the queue */
|
||||
if (disable_agg_tids)
|
||||
iwl_mvm_invalidate_sta_queue(mvm, queue,
|
||||
disable_agg_tids, false);
|
||||
|
||||
ret = iwl_mvm_disable_txq(mvm, queue,
|
||||
mvmsta->vif->hw_queue[txq_curr_ac],
|
||||
tid, 0);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm,
|
||||
"Failed to free inactive queue %d (ret=%d)\n",
|
||||
queue, ret);
|
||||
|
||||
/* Re-mark the inactive queue as inactive */
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* If TXQ is allocated to another STA, update removal in FW */
|
||||
if (sta_id != mvmsta->sta_id)
|
||||
iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
|
@ -1095,6 +1114,7 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
|
|||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
int queue;
|
||||
bool using_inactive_queue = false, same_sta = false;
|
||||
|
||||
/*
|
||||
* Check for inactive queues, so we don't reach a situation where we
|
||||
|
@ -1118,6 +1138,14 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
|
|||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
IWL_ERR(mvm, "No available queues for new station\n");
|
||||
return -ENOSPC;
|
||||
} else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
|
||||
/*
|
||||
* If this queue is already allocated but inactive we'll need to
|
||||
* first free this queue before enabling it again, we'll mark
|
||||
* it as reserved to make sure no new traffic arrives on it
|
||||
*/
|
||||
using_inactive_queue = true;
|
||||
same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
|
||||
}
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
|
||||
|
||||
|
@ -1125,6 +1153,9 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
|
|||
|
||||
mvmsta->reserved_queue = queue;
|
||||
|
||||
if (using_inactive_queue)
|
||||
iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
|
||||
queue, mvmsta->sta_id);
|
||||
|
||||
|
@ -1470,6 +1501,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
|||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
|
||||
u8 sta_id = mvm_sta->sta_id;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
@ -1478,7 +1510,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
|||
kfree(mvm_sta->dup_data);
|
||||
|
||||
if ((vif->type == NL80211_IFTYPE_STATION &&
|
||||
mvmvif->ap_sta_id == mvm_sta->sta_id) ||
|
||||
mvmvif->ap_sta_id == sta_id) ||
|
||||
iwl_mvm_is_dqa_supported(mvm)){
|
||||
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
|
||||
if (ret)
|
||||
|
@ -1494,8 +1526,17 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
|||
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
|
||||
|
||||
/* If DQA is supported - the queues can be disabled now */
|
||||
if (iwl_mvm_is_dqa_supported(mvm))
|
||||
if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
|
||||
/*
|
||||
* If pending_frames is set at this point - it must be
|
||||
* driver internal logic error, since queues are empty
|
||||
* and removed successuly.
|
||||
* warn on it but set it to 0 anyway to avoid station
|
||||
* not being removed later in the function
|
||||
*/
|
||||
WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0));
|
||||
}
|
||||
|
||||
/* If there is a TXQ still marked as reserved - free it */
|
||||
if (iwl_mvm_is_dqa_supported(mvm) &&
|
||||
|
@ -1513,7 +1554,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
|||
if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
|
||||
(*status != IWL_MVM_QUEUE_FREE),
|
||||
"sta_id %d reserved txq %d status %d",
|
||||
mvm_sta->sta_id, reserved_txq, *status)) {
|
||||
sta_id, reserved_txq, *status)) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1523,7 +1564,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
|||
}
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_STATION &&
|
||||
mvmvif->ap_sta_id == mvm_sta->sta_id) {
|
||||
mvmvif->ap_sta_id == sta_id) {
|
||||
/* if associated - we can't remove the AP STA now */
|
||||
if (vif->bss_conf.assoc)
|
||||
return ret;
|
||||
|
@ -1532,7 +1573,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
|||
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
|
||||
|
||||
/* clear d0i3_ap_sta_id if no longer relevant */
|
||||
if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
|
||||
if (mvm->d0i3_ap_sta_id == sta_id)
|
||||
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
|
||||
}
|
||||
}
|
||||
|
@ -1541,7 +1582,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
|||
* This shouldn't happen - the TDLS channel switch should be canceled
|
||||
* before the STA is removed.
|
||||
*/
|
||||
if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
|
||||
if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
|
||||
mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
|
||||
cancel_delayed_work(&mvm->tdls_cs.dwork);
|
||||
}
|
||||
|
@ -1551,21 +1592,20 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
|||
* calls the drain worker.
|
||||
*/
|
||||
spin_lock_bh(&mvm_sta->lock);
|
||||
|
||||
/*
|
||||
* There are frames pending on the AC queues for this station.
|
||||
* We need to wait until all the frames are drained...
|
||||
*/
|
||||
if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
|
||||
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
|
||||
if (atomic_read(&mvm->pending_frames[sta_id])) {
|
||||
rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id],
|
||||
ERR_PTR(-EBUSY));
|
||||
spin_unlock_bh(&mvm_sta->lock);
|
||||
|
||||
/* disable TDLS sta queues on drain complete */
|
||||
if (sta->tdls) {
|
||||
mvm->tfd_drained[mvm_sta->sta_id] =
|
||||
mvm_sta->tfd_queue_msk;
|
||||
IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
|
||||
mvm_sta->sta_id);
|
||||
mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk;
|
||||
IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id);
|
||||
}
|
||||
|
||||
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
|
||||
|
@ -1749,6 +1789,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
|
||||
static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
|
||||
const u8 *baddr = _baddr;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
|
@ -1764,19 +1805,16 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
|
||||
int queue;
|
||||
|
||||
if ((vif->type == NL80211_IFTYPE_AP) &&
|
||||
(mvmvif->bcast_sta.tfd_queue_msk &
|
||||
BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)))
|
||||
if (vif->type == NL80211_IFTYPE_AP)
|
||||
queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
|
||||
else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) &&
|
||||
(mvmvif->bcast_sta.tfd_queue_msk &
|
||||
BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)))
|
||||
else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
|
||||
queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
|
||||
else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
|
||||
else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
|
||||
return -EINVAL;
|
||||
|
||||
iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
|
||||
wdg_timeout);
|
||||
bsta->tfd_queue_msk |= BIT(queue);
|
||||
}
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_ADHOC)
|
||||
|
@ -1785,8 +1823,67 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
|
||||
return -ENOSPC;
|
||||
|
||||
return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
|
||||
mvmvif->id, mvmvif->color);
|
||||
ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
|
||||
mvmvif->id, mvmvif->color);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* In AP vif type, we also need to enable the cab_queue. However, we
|
||||
* have to enable it after the ADD_STA command is sent, otherwise the
|
||||
* FW will throw an assert once we send the ADD_STA command (it'll
|
||||
* detect a mismatch in the tfd_queue_msk, as we can't add the
|
||||
* enabled-cab_queue to the mask)
|
||||
*/
|
||||
if (iwl_mvm_is_dqa_supported(mvm) &&
|
||||
vif->type == NL80211_IFTYPE_AP) {
|
||||
struct iwl_trans_txq_scd_cfg cfg = {
|
||||
.fifo = IWL_MVM_TX_FIFO_MCAST,
|
||||
.sta_id = mvmvif->bcast_sta.sta_id,
|
||||
.tid = IWL_MAX_TID_COUNT,
|
||||
.aggregate = false,
|
||||
.frame_limit = IWL_FRAME_LIMIT,
|
||||
};
|
||||
unsigned int wdg_timeout =
|
||||
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
|
||||
|
||||
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue,
|
||||
0, &cfg, wdg_timeout);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_AP)
|
||||
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
|
||||
IWL_MAX_TID_COUNT, 0);
|
||||
|
||||
if (mvmvif->bcast_sta.tfd_queue_msk &
|
||||
BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)) {
|
||||
iwl_mvm_disable_txq(mvm,
|
||||
IWL_MVM_DQA_AP_PROBE_RESP_QUEUE,
|
||||
vif->hw_queue[0], IWL_MAX_TID_COUNT,
|
||||
0);
|
||||
mvmvif->bcast_sta.tfd_queue_msk &=
|
||||
~BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
|
||||
}
|
||||
|
||||
if (mvmvif->bcast_sta.tfd_queue_msk &
|
||||
BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)) {
|
||||
iwl_mvm_disable_txq(mvm,
|
||||
IWL_MVM_DQA_P2P_DEVICE_QUEUE,
|
||||
vif->hw_queue[0], IWL_MAX_TID_COUNT,
|
||||
0);
|
||||
mvmvif->bcast_sta.tfd_queue_msk &=
|
||||
~BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
|
||||
}
|
||||
}
|
||||
|
||||
/* Send the FW a request to remove the station from it's internal data
|
||||
|
@ -1798,6 +1895,9 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (iwl_mvm_is_dqa_supported(mvm))
|
||||
iwl_mvm_free_bcast_sta_queues(mvm, vif);
|
||||
|
||||
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
|
||||
if (ret)
|
||||
IWL_WARN(mvm, "Failed sending remove station\n");
|
||||
|
@ -1811,22 +1911,16 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (!iwl_mvm_is_dqa_supported(mvm))
|
||||
if (!iwl_mvm_is_dqa_supported(mvm)) {
|
||||
qmask = iwl_mvm_mac_get_queues_mask(vif);
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_AP) {
|
||||
/*
|
||||
* The firmware defines the TFD queue mask to only be relevant
|
||||
* for *unicast* queues, so the multicast (CAB) queue shouldn't
|
||||
* be included.
|
||||
* be included. This only happens in NL80211_IFTYPE_AP vif type,
|
||||
* so the next line will only have an effect there.
|
||||
*/
|
||||
qmask &= ~BIT(vif->cab_queue);
|
||||
|
||||
if (iwl_mvm_is_dqa_supported(mvm))
|
||||
qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
|
||||
} else if (iwl_mvm_is_dqa_supported(mvm) &&
|
||||
vif->type == NL80211_IFTYPE_P2P_DEVICE) {
|
||||
qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
|
||||
}
|
||||
|
||||
return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
|
||||
|
@ -2231,6 +2325,13 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
IWL_ERR(mvm, "Failed to allocate agg queue\n");
|
||||
goto release_locks;
|
||||
}
|
||||
/*
|
||||
* TXQ shouldn't be in inactive mode for non-DQA, so getting
|
||||
* an inactive queue from iwl_mvm_find_free_queue() is
|
||||
* certainly a bug
|
||||
*/
|
||||
WARN_ON(mvm->queue_info[txq_id].status ==
|
||||
IWL_MVM_QUEUE_INACTIVE);
|
||||
|
||||
/* TXQ hasn't yet been enabled, so mark it only as reserved */
|
||||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
|
||||
|
|
|
@ -437,6 +437,7 @@ struct iwl_mvm_sta {
|
|||
bool disable_tx;
|
||||
bool tlc_amsdu;
|
||||
bool sleeping;
|
||||
bool associated;
|
||||
u8 agg_tids;
|
||||
u8 sleep_tx_count;
|
||||
u8 avg_energy;
|
||||
|
|
|
@ -202,7 +202,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
struct iwl_tx_cmd *tx_cmd,
|
||||
struct ieee80211_tx_info *info, u8 sta_id)
|
||||
{
|
||||
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_hdr *hdr = (void *)skb->data;
|
||||
__le16 fc = hdr->frame_control;
|
||||
u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
|
||||
|
@ -284,9 +283,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
|
||||
|
||||
tx_cmd->tx_flags = cpu_to_le32(tx_flags);
|
||||
/* Total # bytes to be transmitted */
|
||||
tx_cmd->len = cpu_to_le16((u16)skb->len +
|
||||
(uintptr_t)skb_info->driver_data[0]);
|
||||
/* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
|
||||
tx_cmd->len = cpu_to_le16((u16)skb->len);
|
||||
tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
|
||||
tx_cmd->sta_id = sta_id;
|
||||
|
||||
|
@ -466,7 +464,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
struct ieee80211_sta *sta, u8 sta_id)
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
|
||||
struct iwl_device_cmd *dev_cmd;
|
||||
struct iwl_tx_cmd *tx_cmd;
|
||||
|
||||
|
@ -486,12 +483,18 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
|
||||
iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
|
||||
|
||||
return dev_cmd;
|
||||
}
|
||||
|
||||
static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
|
||||
struct iwl_device_cmd *cmd)
|
||||
{
|
||||
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
|
||||
|
||||
memset(&skb_info->status, 0, sizeof(skb_info->status));
|
||||
memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
|
||||
|
||||
skb_info->driver_data[1] = dev_cmd;
|
||||
|
||||
return dev_cmd;
|
||||
skb_info->driver_data[1] = cmd;
|
||||
}
|
||||
|
||||
static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
|
||||
|
@ -543,9 +546,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
* queue. STATION (HS2.0) uses the auxiliary context of the FW,
|
||||
* and hence needs to be sent on the aux queue
|
||||
*/
|
||||
if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
|
||||
if (skb_info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
|
||||
skb_info->control.vif->type == NL80211_IFTYPE_STATION)
|
||||
IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
|
||||
skb_info->hw_queue = mvm->aux_queue;
|
||||
|
||||
memcpy(&info, skb->cb, sizeof(info));
|
||||
|
||||
|
@ -557,9 +560,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
info.hw_queue != info.control.vif->cab_queue)))
|
||||
return -1;
|
||||
|
||||
/* This holds the amsdu headers length */
|
||||
skb_info->driver_data[0] = (void *)(uintptr_t)0;
|
||||
|
||||
queue = info.hw_queue;
|
||||
|
||||
/*
|
||||
|
@ -570,9 +570,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
* (this is not possible for unicast packets as a TLDS discovery
|
||||
* response are sent without a station entry); otherwise use the
|
||||
* AUX station.
|
||||
* In DQA mode, if vif is of type STATION and frames are not multicast,
|
||||
* they should be sent from the BSS queue. For example, TDLS setup
|
||||
* frames should be sent on this queue, as they go through the AP.
|
||||
* In DQA mode, if vif is of type STATION and frames are not multicast
|
||||
* or offchannel, they should be sent from the BSS queue.
|
||||
* For example, TDLS setup frames should be sent on this queue,
|
||||
* as they go through the AP.
|
||||
*/
|
||||
sta_id = mvm->aux_sta.sta_id;
|
||||
if (info.control.vif) {
|
||||
|
@ -594,7 +595,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
if (ap_sta_id != IWL_MVM_STATION_COUNT)
|
||||
sta_id = ap_sta_id;
|
||||
} else if (iwl_mvm_is_dqa_supported(mvm) &&
|
||||
info.control.vif->type == NL80211_IFTYPE_STATION) {
|
||||
info.control.vif->type == NL80211_IFTYPE_STATION &&
|
||||
queue != mvm->aux_queue) {
|
||||
queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
|
||||
}
|
||||
}
|
||||
|
@ -605,6 +607,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
if (!dev_cmd)
|
||||
return -1;
|
||||
|
||||
/* From now on, we cannot access info->control */
|
||||
iwl_mvm_skb_prepare_status(skb, dev_cmd);
|
||||
|
||||
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
|
||||
|
||||
/* Copy MAC header from skb into command buffer */
|
||||
|
@ -641,7 +646,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
|
||||
bool ipv4 = (skb->protocol == htons(ETH_P_IP));
|
||||
u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
|
||||
u16 amsdu_add, snap_ip_tcp, pad, i = 0;
|
||||
u16 snap_ip_tcp, pad, i = 0;
|
||||
unsigned int dbg_max_amsdu_len;
|
||||
netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG;
|
||||
u8 *qc, tid, txf;
|
||||
|
@ -743,21 +748,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
|
||||
/* This skb fits in one single A-MSDU */
|
||||
if (num_subframes * mss >= tcp_payload_len) {
|
||||
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
|
||||
|
||||
/*
|
||||
* Compute the length of all the data added for the A-MSDU.
|
||||
* This will be used to compute the length to write in the TX
|
||||
* command. We have: SNAP + IP + TCP for n -1 subframes and
|
||||
* ETH header for n subframes. Note that the original skb
|
||||
* already had one set of SNAP / IP / TCP headers.
|
||||
*/
|
||||
num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
|
||||
amsdu_add = num_subframes * sizeof(struct ethhdr) +
|
||||
(num_subframes - 1) * (snap_ip_tcp + pad);
|
||||
/* This holds the amsdu headers length */
|
||||
skb_info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
|
||||
|
||||
__skb_queue_tail(mpdus_skb, skb);
|
||||
return 0;
|
||||
}
|
||||
|
@ -796,14 +786,6 @@ segment:
|
|||
ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
|
||||
|
||||
if (tcp_payload_len > mss) {
|
||||
struct ieee80211_tx_info *skb_info =
|
||||
IEEE80211_SKB_CB(tmp);
|
||||
|
||||
num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
|
||||
amsdu_add = num_subframes * sizeof(struct ethhdr) +
|
||||
(num_subframes - 1) * (snap_ip_tcp + pad);
|
||||
skb_info->driver_data[0] =
|
||||
(void *)(uintptr_t)amsdu_add;
|
||||
skb_shinfo(tmp)->gso_size = mss;
|
||||
} else {
|
||||
qc = ieee80211_get_qos_ctl((void *)tmp->data);
|
||||
|
@ -915,7 +897,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
goto drop;
|
||||
|
||||
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
|
||||
/* From now on, we cannot access info->control */
|
||||
|
||||
/*
|
||||
* we handle that entirely ourselves -- for uAPSD the firmware
|
||||
|
@ -926,6 +907,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
|
||||
spin_lock(&mvmsta->lock);
|
||||
|
||||
/* nullfunc frames should go to the MGMT queue regardless of QOS,
|
||||
* the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default
|
||||
* assignment of MGMT TID
|
||||
*/
|
||||
if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
|
||||
u8 *qc = NULL;
|
||||
qc = ieee80211_get_qos_ctl(hdr);
|
||||
|
@ -938,27 +923,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
|
||||
hdr->seq_ctrl |= cpu_to_le16(seq_number);
|
||||
is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
|
||||
} else if (iwl_mvm_is_dqa_supported(mvm) &&
|
||||
(ieee80211_is_qos_nullfunc(fc) ||
|
||||
ieee80211_is_nullfunc(fc))) {
|
||||
/*
|
||||
* nullfunc frames should go to the MGMT queue regardless of QOS
|
||||
*/
|
||||
tid = IWL_MAX_TID_COUNT;
|
||||
if (WARN_ON_ONCE(is_ampdu &&
|
||||
mvmsta->tid_data[tid].state != IWL_AGG_ON))
|
||||
goto drop_unlock_sta;
|
||||
}
|
||||
|
||||
if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu)
|
||||
txq_id = mvmsta->tid_data[tid].txq_id;
|
||||
|
||||
if (ieee80211_is_mgmt(fc))
|
||||
tx_cmd->tid_tspec = IWL_TID_NON_QOS;
|
||||
}
|
||||
|
||||
/* Copy MAC header from skb into command buffer */
|
||||
memcpy(tx_cmd->hdr, hdr, hdrlen);
|
||||
|
||||
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
|
||||
|
||||
if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
|
||||
/* default to TID 0 for non-QoS packets */
|
||||
u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
|
||||
|
@ -966,11 +937,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
|
||||
}
|
||||
|
||||
if (is_ampdu) {
|
||||
if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON))
|
||||
goto drop_unlock_sta;
|
||||
txq_id = mvmsta->tid_data[tid].txq_id;
|
||||
}
|
||||
/* Copy MAC header from skb into command buffer */
|
||||
memcpy(tx_cmd->hdr, hdr, hdrlen);
|
||||
|
||||
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
|
||||
|
||||
/* Check if TXQ needs to be allocated or re-activated */
|
||||
if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE ||
|
||||
|
@ -1022,6 +992,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
|
||||
tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
|
||||
|
||||
/* From now on, we cannot access info->control */
|
||||
iwl_mvm_skb_prepare_status(skb, dev_cmd);
|
||||
|
||||
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
|
||||
goto drop_unlock_sta;
|
||||
|
||||
|
@ -1031,7 +1004,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
spin_unlock(&mvmsta->lock);
|
||||
|
||||
/* Increase pending frames count if this isn't AMPDU */
|
||||
if (!is_ampdu)
|
||||
if ((iwl_mvm_is_dqa_supported(mvm) &&
|
||||
mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON &&
|
||||
mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) ||
|
||||
(!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu))
|
||||
atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
|
||||
|
||||
return 0;
|
||||
|
@ -1047,7 +1023,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_tx_info info;
|
||||
struct sk_buff_head mpdus_skbs;
|
||||
unsigned int payload_len;
|
||||
|
@ -1061,9 +1036,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
|
||||
memcpy(&info, skb->cb, sizeof(info));
|
||||
|
||||
/* This holds the amsdu headers length */
|
||||
skb_info->driver_data[0] = (void *)(uintptr_t)0;
|
||||
|
||||
if (!skb_is_gso(skb))
|
||||
return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
|
||||
|
||||
|
|
|
@ -497,13 +497,11 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
|
|||
IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
|
||||
}
|
||||
|
||||
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
|
||||
static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
|
||||
{
|
||||
struct iwl_trans *trans = mvm->trans;
|
||||
struct iwl_error_event_table table;
|
||||
u32 base;
|
||||
|
||||
base = mvm->error_event_table;
|
||||
if (mvm->cur_ucode == IWL_UCODE_INIT) {
|
||||
if (!base)
|
||||
base = mvm->fw->init_errlog_ptr;
|
||||
|
@ -574,6 +572,14 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
|
|||
IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
|
||||
IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
|
||||
IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
|
||||
}
|
||||
|
||||
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
|
||||
{
|
||||
iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]);
|
||||
|
||||
if (mvm->error_event_table[1])
|
||||
iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]);
|
||||
|
||||
if (mvm->support_umac_log)
|
||||
iwl_mvm_dump_umac_error_log(mvm);
|
||||
|
@ -649,8 +655,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
|||
/* Make sure this TID isn't already enabled */
|
||||
if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
IWL_ERR(mvm, "Trying to enable TXQ with existing TID %d\n",
|
||||
cfg->tid);
|
||||
IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
|
||||
queue, cfg->tid);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -533,7 +533,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
|
||||
|
||||
/* a000 Series */
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr)},
|
||||
#endif /* CONFIG_IWLMVM */
|
||||
|
||||
{0}
|
||||
|
@ -673,11 +673,17 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
cfg = &iwl9000lc_2ac_cfg;
|
||||
iwl_trans->cfg = cfg;
|
||||
}
|
||||
|
||||
if (cfg == &iwla000_2ac_cfg_hr &&
|
||||
iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) {
|
||||
cfg = &iwla000_2ac_cfg_jf;
|
||||
iwl_trans->cfg = cfg;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
pci_set_drvdata(pdev, iwl_trans);
|
||||
iwl_trans->drv = iwl_drv_start(iwl_trans, cfg);
|
||||
iwl_trans->drv = iwl_drv_start(iwl_trans);
|
||||
|
||||
if (IS_ERR(iwl_trans->drv)) {
|
||||
ret = PTR_ERR(iwl_trans->drv);
|
||||
|
@ -778,13 +784,14 @@ static int iwl_pci_resume(struct device *device)
|
|||
|
||||
/*
|
||||
* Enable rfkill interrupt (in order to keep track of
|
||||
* the rfkill status)
|
||||
* the rfkill status). Must be locked to avoid processing
|
||||
* a possible rfkill interrupt between reading the state
|
||||
* and calling iwl_trans_pcie_rf_kill() with it.
|
||||
*/
|
||||
mutex_lock(&trans_pcie->mutex);
|
||||
iwl_enable_rfkill_int(trans);
|
||||
|
||||
hw_rfkill = iwl_is_rfkill_set(trans);
|
||||
|
||||
mutex_lock(&trans_pcie->mutex);
|
||||
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
|
||||
mutex_unlock(&trans_pcie->mutex);
|
||||
|
||||
|
|
|
@ -670,6 +670,8 @@ static inline u8 get_cmd_index(struct iwl_txq *q, u32 index)
|
|||
|
||||
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
|
||||
{
|
||||
lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->mutex);
|
||||
|
||||
return !(iwl_read32(trans, CSR_GP_CNTRL) &
|
||||
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
|
||||
}
|
||||
|
|
|
@ -1607,13 +1607,13 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
|
|||
if (inta & CSR_INT_BIT_RF_KILL) {
|
||||
bool hw_rfkill;
|
||||
|
||||
mutex_lock(&trans_pcie->mutex);
|
||||
hw_rfkill = iwl_is_rfkill_set(trans);
|
||||
IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
|
||||
hw_rfkill ? "disable radio" : "enable radio");
|
||||
|
||||
isr_stats->rfkill++;
|
||||
|
||||
mutex_lock(&trans_pcie->mutex);
|
||||
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
|
||||
mutex_unlock(&trans_pcie->mutex);
|
||||
if (hw_rfkill) {
|
||||
|
@ -1952,13 +1952,13 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
|
|||
if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) {
|
||||
bool hw_rfkill;
|
||||
|
||||
mutex_lock(&trans_pcie->mutex);
|
||||
hw_rfkill = iwl_is_rfkill_set(trans);
|
||||
IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
|
||||
hw_rfkill ? "disable radio" : "enable radio");
|
||||
|
||||
isr_stats->rfkill++;
|
||||
|
||||
mutex_lock(&trans_pcie->mutex);
|
||||
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
|
||||
mutex_unlock(&trans_pcie->mutex);
|
||||
if (hw_rfkill) {
|
||||
|
|
|
@ -2953,16 +2953,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
PCIE_LINK_STATE_CLKPM);
|
||||
}
|
||||
|
||||
if (cfg->mq_rx_supported)
|
||||
addr_size = 64;
|
||||
else
|
||||
addr_size = 36;
|
||||
|
||||
if (cfg->use_tfh) {
|
||||
addr_size = 64;
|
||||
trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
|
||||
trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
|
||||
|
||||
} else {
|
||||
addr_size = 36;
|
||||
trans_pcie->max_tbs = IWL_NUM_OF_TBS;
|
||||
trans_pcie->tfd_size = sizeof(struct iwl_tfd);
|
||||
}
|
||||
|
|
|
@ -2096,6 +2096,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
struct iwl_cmd_meta *out_meta,
|
||||
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
|
||||
{
|
||||
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
|
||||
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
|
||||
struct ieee80211_hdr *hdr = (void *)skb->data;
|
||||
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
|
||||
|
@ -2145,6 +2146,13 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
*/
|
||||
skb_pull(skb, hdr_len + iv_len);
|
||||
|
||||
/*
|
||||
* Remove the length of all the headers that we don't actually
|
||||
* have in the MPDU by themselves, but that we duplicate into
|
||||
* all the different MSDUs inside the A-MSDU.
|
||||
*/
|
||||
le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
|
||||
|
||||
tso_start(skb, &tso);
|
||||
|
||||
while (total_len) {
|
||||
|
@ -2155,7 +2163,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
unsigned int hdr_tb_len;
|
||||
dma_addr_t hdr_tb_phys;
|
||||
struct tcphdr *tcph;
|
||||
u8 *iph;
|
||||
u8 *iph, *subf_hdrs_start = hdr_page->pos;
|
||||
|
||||
total_len -= data_left;
|
||||
|
||||
|
@ -2216,6 +2224,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
hdr_tb_len, false);
|
||||
trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr,
|
||||
hdr_tb_len);
|
||||
/* add this subframe's headers' length to the tx_cmd */
|
||||
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
|
||||
|
||||
/* prepare the start_hdr for the next subframe */
|
||||
start_hdr = hdr_page->pos;
|
||||
|
@ -2408,9 +2418,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
tb1_len = len;
|
||||
}
|
||||
|
||||
/* The first TB points to bi-directional DMA data */
|
||||
memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
|
||||
IWL_FIRST_TB_SIZE);
|
||||
/*
|
||||
* The first TB points to bi-directional DMA data, we'll
|
||||
* memcpy the data into it later.
|
||||
*/
|
||||
iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
|
||||
IWL_FIRST_TB_SIZE, true);
|
||||
|
||||
|
@ -2434,6 +2445,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
/* building the A-MSDU might have changed this data, so memcpy it now */
|
||||
memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
|
||||
IWL_FIRST_TB_SIZE);
|
||||
|
||||
tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
|
||||
/* Set up entry for this TFD in Tx byte-count array */
|
||||
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
|
||||
|
|
Loading…
Reference in New Issue