* add support for thermal device / cooling device (Chaya Rachel)
* fixes for 9000 devices data path (Sara Sharon) * improvements in scheduled scan w/o profiles (Luca) * new firmware support (-21.ucode) * add MSIX support for 9000 devices (Haim Dreyfuss) * cleanup in PCIe initialization * enable MU-MIMO and take care of firmware restart(Sara Sharon) ===> This needs mac80211-next * add support for large SKBs in mvm to reach A-MSDU ===> This needs mac80211-next * add support for filtering frames from a BA session (Sara Sharon) ===> This needs mac80211-next * start implementing the new Rx path for 9000 devices (Sara Sharon) * enable the new RRM feature flag (Beni Lev) * fix U-APSD enablement on P2P Client (Avri Altman) * fix beacon abort enablement (Avri Altman) * forbid beacon storing with WoWLAN (Matti Gottlieb) * support unified uSniffer / regular firmware image (Golan Ben-Ami) * fix a race between debugfs hooks and iface up (Chaya Rachel Ivgi) * fixes for runtime PM (Luca) * add a new module paramater to disable VHT (Andrei Otcheretianski) * build infrastructure for Dynamic Queue Allocation (Liad Kaufman) -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJW1pfvAAoJEC0Llv5uNjIB3ycP/15AJ0U+NxNjRzuMG3IJH9SD aU8ciy+kWhwXLbla5dVOIOOuLit/fHwhwPUk00E7kw0kuYBVFPWdfaePEHHhbxls 0/VGHNYrrnB7lFWvBn3eYE/99ufQ/hmT0ilcJzla/OaL8atmhRXf/MXd3e78hpF7 IGmRh9j6hPa8/3UD0gwsxeQRx+XflCIf6Ef0ZNGO8IMM/KgnHj9Xk6yN8qgQdf7E jHbaYux8P2SVBlrw9gbPfZ24BTmuHc9xx4sa/8uCJmlMQ730LfpR3Q3VAS9T5ss0 id115MLs/RhwmEu4PHXUIc5/5o7ZFXh2FuhCOnLZECPC5WFwCwL6tf7cdZRUj+qT 9/uoy4FnVzUZCbxvF8VBxRIR7b9Ap7zN7hR/Bwgx1hJrm7OC6pUhgXnZL/bxwKqQ CW9n0tddyRvwq4E9R9gRdHW1qApZYlIEX0yEUIsDFICRSkRBq7mUgHjSHfChBnxj gUSK2pF7xLgHrUqRxX+9EAMHtDel+iZvX4iEnkE3P+vjv22raUwT0Ev1Qh928spS MwGIUt0aelwG5Gl40rxbESamwqNQI+YwnwacZA4QE2xDZdkSZqXtCKj1fsOc2ki/ LRwCGTyPLGszq1du6GksEMiXrpI7HLOG5GOMlrxOUMWW4N77R0J/3/TQHreH3IMF tPapAy+7X9qSbPgUb7fi =b5Ae -----END PGP SIGNATURE----- Merge tag 'iwlwifi-next-for-kalle-2016-03-02' of https://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next * add support for thermal device / cooling device (Chaya Rachel) * fixes for 9000 devices data path (Sara Sharon) * improvements in scheduled scan w/o profiles (Luca) * new firmware support (-21.ucode) * add MSIX support for 9000 devices (Haim Dreyfuss) * cleanup in PCIe initialization * enable MU-MIMO and take care of firmware restart(Sara Sharon) ===> This needs mac80211-next * add support for large SKBs in mvm to reach A-MSDU ===> This needs mac80211-next * add support for filtering frames from a BA session (Sara Sharon) ===> This needs mac80211-next * start implementing the new Rx path for 9000 devices (Sara Sharon) * enable the new RRM feature flag (Beni Lev) * fix U-APSD enablement on P2P Client (Avri Altman) * fix beacon abort enablement (Avri Altman) * forbid beacon storing with WoWLAN (Matti Gottlieb) * support unified uSniffer / regular firmware image (Golan Ben-Ami) * fix a race between debugfs hooks and iface up (Chaya Rachel Ivgi) * fixes for runtime PM (Luca) * add a new module paramater to disable VHT (Andrei Otcheretianski) * build infrastructure for Dynamic Queue Allocation (Liad Kaufman)
This commit is contained in:
commit
739596b09b
|
@ -73,8 +73,8 @@
|
|||
/* Highest firmware API version supported */
|
||||
#define IWL7260_UCODE_API_MAX 17
|
||||
#define IWL7265_UCODE_API_MAX 17
|
||||
#define IWL7265D_UCODE_API_MAX 20
|
||||
#define IWL3168_UCODE_API_MAX 20
|
||||
#define IWL7265D_UCODE_API_MAX 21
|
||||
#define IWL3168_UCODE_API_MAX 21
|
||||
|
||||
/* Oldest version we won't warn about */
|
||||
#define IWL7260_UCODE_API_OK 13
|
||||
|
|
|
@ -70,8 +70,8 @@
|
|||
#include "iwl-agn-hw.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL8000_UCODE_API_MAX 20
|
||||
#define IWL8265_UCODE_API_MAX 20
|
||||
#define IWL8000_UCODE_API_MAX 21
|
||||
#define IWL8265_UCODE_API_MAX 21
|
||||
|
||||
/* Oldest version we won't warn about */
|
||||
#define IWL8000_UCODE_API_OK 13
|
||||
|
@ -217,6 +217,7 @@ const struct iwl_cfg iwl8265_2ac_cfg = {
|
|||
.nvm_ver = IWL8000_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
|
||||
.vht_mu_mimo_supported = true,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl4165_2ac_cfg = {
|
||||
|
|
|
@ -55,7 +55,7 @@
|
|||
#include "iwl-agn-hw.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL9000_UCODE_API_MAX 20
|
||||
#define IWL9000_UCODE_API_MAX 21
|
||||
|
||||
/* Oldest version we won't warn about */
|
||||
#define IWL9000_UCODE_API_OK 13
|
||||
|
@ -139,7 +139,8 @@ static const struct iwl_tt_params iwl9000_tt_params = {
|
|||
.smem_len = IWL9000_SMEM_LEN, \
|
||||
.thermal_params = &iwl9000_tt_params, \
|
||||
.apmg_not_supported = true, \
|
||||
.mq_rx_supported = true
|
||||
.mq_rx_supported = true, \
|
||||
.vht_mu_mimo_supported = true
|
||||
|
||||
const struct iwl_cfg iwl9260_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9260",
|
||||
|
|
|
@ -312,6 +312,7 @@ struct iwl_pwr_tx_backoff {
|
|||
* @smem_offset: offset from which the SMEM begins
|
||||
* @smem_len: the length of SMEM
|
||||
* @mq_rx_supported: multi-queue rx support
|
||||
* @vht_mu_mimo_supported: VHT MU-MIMO support
|
||||
*
|
||||
* We enable the driver to be backward compatible wrt. hardware features.
|
||||
* API differences in uCode shouldn't be handled here but through TLVs
|
||||
|
@ -364,6 +365,7 @@ struct iwl_cfg {
|
|||
const struct iwl_tt_params *thermal_params;
|
||||
bool apmg_not_supported;
|
||||
bool mq_rx_supported;
|
||||
bool vht_mu_mimo_supported;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -549,4 +550,52 @@ enum dtd_diode_reg {
|
|||
DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */
|
||||
};
|
||||
|
||||
/*****************************************************************************
|
||||
* MSIX related registers *
|
||||
*****************************************************************************/
|
||||
|
||||
#define CSR_MSIX_BASE (0x2000)
|
||||
#define CSR_MSIX_FH_INT_CAUSES_AD (CSR_MSIX_BASE + 0x800)
|
||||
#define CSR_MSIX_FH_INT_MASK_AD (CSR_MSIX_BASE + 0x804)
|
||||
#define CSR_MSIX_HW_INT_CAUSES_AD (CSR_MSIX_BASE + 0x808)
|
||||
#define CSR_MSIX_HW_INT_MASK_AD (CSR_MSIX_BASE + 0x80C)
|
||||
#define CSR_MSIX_AUTOMASK_ST_AD (CSR_MSIX_BASE + 0x810)
|
||||
#define CSR_MSIX_RX_IVAR_AD_REG (CSR_MSIX_BASE + 0x880)
|
||||
#define CSR_MSIX_IVAR_AD_REG (CSR_MSIX_BASE + 0x890)
|
||||
#define CSR_MSIX_PENDING_PBA_AD (CSR_MSIX_BASE + 0x1000)
|
||||
#define CSR_MSIX_RX_IVAR(cause) (CSR_MSIX_RX_IVAR_AD_REG + (cause))
|
||||
#define CSR_MSIX_IVAR(cause) (CSR_MSIX_IVAR_AD_REG + (cause))
|
||||
|
||||
#define MSIX_FH_INT_CAUSES_Q(q) (q)
|
||||
|
||||
/*
|
||||
* Causes for the FH register interrupts
|
||||
*/
|
||||
enum msix_fh_int_causes {
|
||||
MSIX_FH_INT_CAUSES_D2S_CH0_NUM = BIT(16),
|
||||
MSIX_FH_INT_CAUSES_D2S_CH1_NUM = BIT(17),
|
||||
MSIX_FH_INT_CAUSES_S2D = BIT(19),
|
||||
MSIX_FH_INT_CAUSES_FH_ERR = BIT(21),
|
||||
};
|
||||
|
||||
/*
|
||||
* Causes for the HW register interrupts
|
||||
*/
|
||||
enum msix_hw_int_causes {
|
||||
MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0),
|
||||
MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
|
||||
MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
|
||||
MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
|
||||
MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8),
|
||||
MSIX_HW_INT_CAUSES_REG_SW_ERR = BIT(25),
|
||||
MSIX_HW_INT_CAUSES_REG_SCD = BIT(26),
|
||||
MSIX_HW_INT_CAUSES_REG_FH_TX = BIT(27),
|
||||
MSIX_HW_INT_CAUSES_REG_HW_ERR = BIT(29),
|
||||
MSIX_HW_INT_CAUSES_REG_HAP = BIT(30),
|
||||
};
|
||||
|
||||
#define MSIX_MIN_INTERRUPT_VECTORS 2
|
||||
#define MSIX_AUTO_CLEAR_CAUSE 0
|
||||
#define MSIX_NON_AUTO_CLEAR_CAUSE BIT(7)
|
||||
|
||||
#endif /* !__iwl_csr_h__ */
|
||||
|
|
|
@ -73,12 +73,12 @@ TRACE_EVENT(iwlwifi_dev_rx,
|
|||
TP_ARGS(dev, trans, pkt, len),
|
||||
TP_STRUCT__entry(
|
||||
DEV_ENTRY
|
||||
__field(u8, cmd)
|
||||
__field(u16, cmd)
|
||||
__dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len))
|
||||
),
|
||||
TP_fast_assign(
|
||||
DEV_ASSIGN;
|
||||
__entry->cmd = pkt->hdr.cmd;
|
||||
__entry->cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
|
||||
memcpy(__get_dynamic_array(rxbuf), pkt,
|
||||
iwl_rx_trace_len(trans, pkt, len));
|
||||
),
|
||||
|
|
|
@ -1033,7 +1033,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
|||
}
|
||||
}
|
||||
|
||||
if (usniffer_req && !*usniffer_images) {
|
||||
if (!fw_has_capa(capa, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED) &&
|
||||
usniffer_req && !*usniffer_images) {
|
||||
IWL_ERR(drv,
|
||||
"user selected to work with usniffer but usniffer image isn't available in ucode package\n");
|
||||
return -EINVAL;
|
||||
|
@ -1718,3 +1719,7 @@ MODULE_PARM_DESC(fw_monitor,
|
|||
module_param_named(d0i3_timeout, iwlwifi_mod_params.d0i3_entry_delay,
|
||||
uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)");
|
||||
|
||||
module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool,
|
||||
S_IRUGO);
|
||||
MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities");
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2015 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -32,7 +32,7 @@
|
|||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2015 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -368,20 +368,24 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
|
|||
#define RFH_RXF_DMA_RBDCB_SIZE_512 (0x9 << RFH_RXF_DMA_RBDCB_SIZE_POS)
|
||||
#define RFH_RXF_DMA_RBDCB_SIZE_1024 (0xA << RFH_RXF_DMA_RBDCB_SIZE_POS)
|
||||
#define RFH_RXF_DMA_RBDCB_SIZE_2048 (0xB << RFH_RXF_DMA_RBDCB_SIZE_POS)
|
||||
#define RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */
|
||||
#define RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */
|
||||
#define RFH_RXF_DMA_MIN_RB_SIZE_POS 24
|
||||
#define RFH_RXF_DMA_MIN_RB_4_8 (3 << RFH_RXF_DMA_MIN_RB_SIZE_POS)
|
||||
#define RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */
|
||||
#define RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/
|
||||
#define RFH_DMA_EN_ENABLE_VAL BIT(31)
|
||||
#define RFH_RXF_DMA_MIN_RB_4_8 (3 << RFH_RXF_DMA_MIN_RB_SIZE_POS)
|
||||
#define RFH_RXF_DMA_DROP_TOO_LARGE_MASK (0x04000000) /* bit 26 */
|
||||
#define RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */
|
||||
#define RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/
|
||||
#define RFH_DMA_EN_ENABLE_VAL BIT(31)
|
||||
|
||||
#define RFH_RXF_RXQ_ACTIVE 0xA0980C
|
||||
|
||||
#define RFH_GEN_CFG 0xA09800
|
||||
#define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0)
|
||||
#define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1)
|
||||
#define RFH_GEN_CFG_RB_CHUNK_SIZE BIT(4) /* 0 - 64B, 1- 128B */
|
||||
#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_MASK 0xF00
|
||||
#define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0)
|
||||
#define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1)
|
||||
#define DEFAULT_RXQ_NUM 8
|
||||
#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS 8
|
||||
|
||||
#define DEFAULT_RXQ_NUM 0
|
||||
|
||||
/* end of 9000 rx series registers */
|
||||
|
||||
|
|
|
@ -318,6 +318,12 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
|
|||
* @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon
|
||||
* from AP and will send it upon d0i3 exit.
|
||||
* @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2
|
||||
* @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill
|
||||
* @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature
|
||||
* thresholds reporting
|
||||
* @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command
|
||||
* @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in
|
||||
* regular image.
|
||||
*
|
||||
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
|
||||
*/
|
||||
|
@ -351,6 +357,10 @@ enum iwl_ucode_tlv_capa {
|
|||
IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71,
|
||||
IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72,
|
||||
IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73,
|
||||
IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74,
|
||||
IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75,
|
||||
IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76,
|
||||
IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77,
|
||||
|
||||
NUM_IWL_UCODE_TLV_CAPA
|
||||
#ifdef __CHECKER__
|
||||
|
|
|
@ -115,6 +115,7 @@ enum iwl_amsdu_size {
|
|||
* entering D0i3 (in msecs)
|
||||
* @lar_disable: disable LAR (regulatory), default = 0
|
||||
* @fw_monitor: allow to use firmware monitor
|
||||
* @disable_11ac: disable VHT capabilities, default = false.
|
||||
*/
|
||||
struct iwl_mod_params {
|
||||
int sw_crypto;
|
||||
|
@ -135,6 +136,7 @@ struct iwl_mod_params {
|
|||
unsigned int d0i3_entry_delay;
|
||||
bool lar_disable;
|
||||
bool fw_monitor;
|
||||
bool disable_11ac;
|
||||
};
|
||||
|
||||
#endif /* #__iwl_modparams_h__ */
|
||||
|
|
|
@ -366,6 +366,9 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
|
|||
max_ampdu_exponent <<
|
||||
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
|
||||
|
||||
if (cfg->vht_mu_mimo_supported)
|
||||
vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
|
||||
|
||||
if (cfg->ht_params->ldpc)
|
||||
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
|
||||
|
||||
|
@ -449,7 +452,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
|
|||
IEEE80211_BAND_5GHZ);
|
||||
iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
|
||||
tx_chains, rx_chains);
|
||||
if (data->sku_cap_11ac_enable)
|
||||
if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac)
|
||||
iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
|
||||
tx_chains, rx_chains);
|
||||
|
||||
|
|
|
@ -404,4 +404,6 @@ enum {
|
|||
LMPM_PAGE_PASS_NOTIF_POS = BIT(20),
|
||||
};
|
||||
|
||||
#define UREG_CHICK (0xA05C00)
|
||||
#define UREG_CHICK_MSIX_ENABLE BIT(25)
|
||||
#endif /* __iwl_prph_h__ */
|
||||
|
|
|
@ -836,6 +836,7 @@ struct iwl_trans {
|
|||
|
||||
enum iwl_plat_pm_mode system_pm_mode;
|
||||
enum iwl_plat_pm_mode runtime_pm_mode;
|
||||
bool suspending;
|
||||
|
||||
/* pointer to trans specific struct */
|
||||
/*Ensure that this pointer will always be aligned to sizeof pointer */
|
||||
|
|
|
@ -816,8 +816,7 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
|
|||
{
|
||||
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
|
||||
|
||||
iwl_trans_stop_device(mvm->trans);
|
||||
|
||||
iwl_mvm_stop_device(mvm);
|
||||
/*
|
||||
* Set the HW restart bit -- this is mostly true as we're
|
||||
* going to load new firmware and reprogram that, though
|
||||
|
@ -856,8 +855,7 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
|
|||
wowlan_config_cmd->is_11n_connection =
|
||||
ap_sta->ht_cap.ht_supported;
|
||||
wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
|
||||
ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING |
|
||||
ENABLE_STORE_BEACON;
|
||||
ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
|
||||
|
||||
/* Query the last used seqno and set it */
|
||||
ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
|
||||
|
|
|
@ -64,6 +64,7 @@
|
|||
*
|
||||
*****************************************************************************/
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/ieee80211.h>
|
||||
|
||||
#include "mvm.h"
|
||||
#include "fw-dbg.h"
|
||||
|
@ -976,7 +977,7 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
|
|||
memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table,
|
||||
ARRAY_SIZE(cmd.indirection_table) % nbytes);
|
||||
|
||||
memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key));
|
||||
memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key));
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
|
||||
|
@ -1080,6 +1081,22 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
|
|||
return count;
|
||||
}
|
||||
|
||||
static ssize_t iwl_dbgfs_max_amsdu_len_write(struct iwl_mvm *mvm,
|
||||
char *buf, size_t count,
|
||||
loff_t *ppos)
|
||||
{
|
||||
unsigned int max_amsdu_len;
|
||||
int ret;
|
||||
|
||||
ret = kstrtouint(buf, 0, &max_amsdu_len);
|
||||
|
||||
if (max_amsdu_len > IEEE80211_MAX_MPDU_LEN_VHT_11454)
|
||||
return -EINVAL;
|
||||
mvm->max_amsdu_len = max_amsdu_len;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
|
||||
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
|
||||
static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
|
||||
|
@ -1497,7 +1514,9 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
|
|||
MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
|
||||
MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
|
||||
MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8);
|
||||
MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, 16);
|
||||
MVM_DEBUGFS_WRITE_FILE_OPS(max_amsdu_len, 8);
|
||||
MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
|
||||
(IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
|
||||
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
|
||||
|
@ -1540,6 +1559,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
|
|||
MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
|
||||
MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
|
||||
MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR);
|
||||
MVM_DEBUGFS_ADD_FILE(max_amsdu_len, mvm->debugfs_dir, S_IWUSR);
|
||||
MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR);
|
||||
MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR);
|
||||
MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, S_IWUSR);
|
||||
|
|
|
@ -391,4 +391,56 @@ struct iwl_rss_config_cmd {
|
|||
u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE];
|
||||
} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */
|
||||
|
||||
#define IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE 128
|
||||
#define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0
|
||||
#define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf
|
||||
|
||||
/**
|
||||
* struct iwl_rxq_sync_cmd - RXQ notification trigger
|
||||
*
|
||||
* @flags: flags of the notification. bit 0:3 are the sender queue
|
||||
* @rxq_mask: rx queues to send the notification on
|
||||
* @count: number of bytes in payload, should be DWORD aligned
|
||||
* @payload: data to send to rx queues
|
||||
*/
|
||||
struct iwl_rxq_sync_cmd {
|
||||
__le32 flags;
|
||||
__le32 rxq_mask;
|
||||
__le32 count;
|
||||
u8 payload[];
|
||||
} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_rxq_sync_notification - Notification triggered by RXQ
|
||||
* sync command
|
||||
*
|
||||
* @count: number of bytes in payload
|
||||
* @payload: data to send to rx queues
|
||||
*/
|
||||
struct iwl_rxq_sync_notification {
|
||||
__le32 count;
|
||||
u8 payload[];
|
||||
} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* Internal message identifier
|
||||
*
|
||||
* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
|
||||
*/
|
||||
enum iwl_mvm_rxq_notif_type {
|
||||
IWL_MVM_RXQ_NOTIF_DEL_BA,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent
|
||||
* in &iwl_rxq_sync_cmd. Should be DWORD aligned.
|
||||
*
|
||||
* @type: value from &iwl_mvm_rxq_notif_type
|
||||
* @data: payload
|
||||
*/
|
||||
struct iwl_mvm_internal_rxq_notif {
|
||||
u32 type;
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
#endif /* __fw_api_rx_h__ */
|
||||
|
|
|
@ -119,6 +119,8 @@ enum {
|
|||
SCAN_ABORT_UMAC = 0xe,
|
||||
SCAN_COMPLETE_UMAC = 0xf,
|
||||
|
||||
BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13,
|
||||
|
||||
/* station table */
|
||||
ADD_STA_KEY = 0x17,
|
||||
ADD_STA = 0x18,
|
||||
|
@ -279,9 +281,19 @@ enum {
|
|||
*/
|
||||
enum iwl_phy_ops_subcmd_ids {
|
||||
CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
|
||||
CTDP_CONFIG_CMD = 0x03,
|
||||
TEMP_REPORTING_THRESHOLDS_CMD = 0x04,
|
||||
CT_KILL_NOTIFICATION = 0xFE,
|
||||
DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
|
||||
};
|
||||
|
||||
enum iwl_data_path_subcmd_ids {
|
||||
UPDATE_MU_GROUPS_CMD = 0x1,
|
||||
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
|
||||
MU_GROUP_MGMT_NOTIF = 0xFE,
|
||||
RX_QUEUES_NOTIFICATION = 0xFF,
|
||||
};
|
||||
|
||||
enum iwl_prot_offload_subcmd_ids {
|
||||
STORED_BEACON_NTF = 0xFF,
|
||||
};
|
||||
|
@ -291,6 +303,7 @@ enum {
|
|||
LEGACY_GROUP = 0x0,
|
||||
LONG_GROUP = 0x1,
|
||||
PHY_OPS_GROUP = 0x4,
|
||||
DATA_PATH_GROUP = 0x5,
|
||||
PROT_OFFLOAD_GROUP = 0xb,
|
||||
};
|
||||
|
||||
|
@ -1278,6 +1291,26 @@ struct iwl_fw_bcast_filter {
|
|||
struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
|
||||
} __packed; /* BCAST_FILTER_S_VER_1 */
|
||||
|
||||
#define BA_WINDOW_STREAMS_MAX 16
|
||||
#define BA_WINDOW_STATUS_TID_MSK 0x000F
|
||||
#define BA_WINDOW_STATUS_STA_ID_POS 4
|
||||
#define BA_WINDOW_STATUS_STA_ID_MSK 0x01F0
|
||||
#define BA_WINDOW_STATUS_VALID_MSK BIT(9)
|
||||
|
||||
/**
|
||||
* struct iwl_ba_window_status_notif - reordering window's status notification
|
||||
* @bitmap: bitmap of received frames [start_seq_num + 0]..[start_seq_num + 63]
|
||||
* @ra_tid: bit 3:0 - TID, bit 8:4 - STA_ID, bit 9 - valid
|
||||
* @start_seq_num: the start sequence number of the bitmap
|
||||
* @mpdu_rx_count: the number of received MPDUs since entering D0i3
|
||||
*/
|
||||
struct iwl_ba_window_status_notif {
|
||||
__le64 bitmap[BA_WINDOW_STREAMS_MAX];
|
||||
__le16 ra_tid[BA_WINDOW_STREAMS_MAX];
|
||||
__le32 start_seq_num[BA_WINDOW_STREAMS_MAX];
|
||||
__le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX];
|
||||
} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
|
||||
* @default_discard: default action for this mac (discard (1) / pass (0)).
|
||||
|
@ -1675,15 +1708,77 @@ struct iwl_ext_dts_measurement_cmd {
|
|||
} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */
|
||||
|
||||
/**
|
||||
* iwl_dts_measurement_notif - notification received with the measurements
|
||||
* struct iwl_dts_measurement_notif_v1 - measurements notification
|
||||
*
|
||||
* @temp: the measured temperature
|
||||
* @voltage: the measured voltage
|
||||
*/
|
||||
struct iwl_dts_measurement_notif {
|
||||
struct iwl_dts_measurement_notif_v1 {
|
||||
__le32 temp;
|
||||
__le32 voltage;
|
||||
} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */
|
||||
} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/
|
||||
|
||||
/**
|
||||
* struct iwl_dts_measurement_notif_v2 - measurements notification
|
||||
*
|
||||
* @temp: the measured temperature
|
||||
* @voltage: the measured voltage
|
||||
* @threshold_idx: the trip index that was crossed
|
||||
*/
|
||||
struct iwl_dts_measurement_notif_v2 {
|
||||
__le32 temp;
|
||||
__le32 voltage;
|
||||
__le32 threshold_idx;
|
||||
} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct ct_kill_notif - CT-kill entry notification
|
||||
*
|
||||
* @temperature: the current temperature in celsius
|
||||
* @reserved: reserved
|
||||
*/
|
||||
struct ct_kill_notif {
|
||||
__le16 temperature;
|
||||
__le16 reserved;
|
||||
} __packed; /* GRP_PHY_CT_KILL_NTF */
|
||||
|
||||
/**
|
||||
* enum ctdp_cmd_operation - CTDP command operations
|
||||
* @CTDP_CMD_OPERATION_START: update the current budget
|
||||
* @CTDP_CMD_OPERATION_STOP: stop ctdp
|
||||
* @CTDP_CMD_OPERATION_REPORT: get the avgerage budget
|
||||
*/
|
||||
enum iwl_mvm_ctdp_cmd_operation {
|
||||
CTDP_CMD_OPERATION_START = 0x1,
|
||||
CTDP_CMD_OPERATION_STOP = 0x2,
|
||||
CTDP_CMD_OPERATION_REPORT = 0x4,
|
||||
};/* CTDP_CMD_OPERATION_TYPE_E */
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget
|
||||
*
|
||||
* @operation: see &enum iwl_mvm_ctdp_cmd_operation
|
||||
* @budget: the budget in milliwatt
|
||||
* @window_size: defined in API but not used
|
||||
*/
|
||||
struct iwl_mvm_ctdp_cmd {
|
||||
__le32 operation;
|
||||
__le32 budget;
|
||||
__le32 window_size;
|
||||
} __packed;
|
||||
|
||||
#define IWL_MAX_DTS_TRIPS 8
|
||||
|
||||
/**
|
||||
* struct iwl_temp_report_ths_cmd - set temperature thresholds
|
||||
*
|
||||
* @num_temps: number of temperature thresholds passed
|
||||
* @thresholds: array with the thresholds to be configured
|
||||
*/
|
||||
struct temp_report_ths_cmd {
|
||||
__le32 num_temps;
|
||||
__le16 thresholds[IWL_MAX_DTS_TRIPS];
|
||||
} __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */
|
||||
|
||||
/***********************************
|
||||
* TDLS API
|
||||
|
@ -1858,6 +1953,31 @@ struct iwl_shared_mem_cfg {
|
|||
__le32 page_buff_size;
|
||||
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* VHT MU-MIMO group configuration
|
||||
*
|
||||
* @membership_status: a bitmap of MU groups
|
||||
* @user_position:the position of station in a group. If the station is in the
|
||||
* group then bits (group * 2) is the position -1
|
||||
*/
|
||||
struct iwl_mu_group_mgmt_cmd {
|
||||
__le32 reserved;
|
||||
__le32 membership_status[2];
|
||||
__le32 user_position[4];
|
||||
} __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_mu_group_mgmt_notif - VHT MU-MIMO group id notification
|
||||
*
|
||||
* @membership_status: a bitmap of MU groups
|
||||
* @user_position: the position of station in a group. If the station is in the
|
||||
* group then bits (group * 2) is the position -1
|
||||
*/
|
||||
struct iwl_mu_group_mgmt_notif {
|
||||
__le32 membership_status[2];
|
||||
__le32 user_position[4];
|
||||
} __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */
|
||||
|
||||
#define MAX_STORED_BEACON_SIZE 600
|
||||
|
||||
/**
|
||||
|
|
|
@ -121,12 +121,12 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
|
|||
|
||||
for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
|
||||
cmd.indirection_table[i] = i % mvm->trans->num_rx_queues;
|
||||
memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key));
|
||||
memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key));
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
|
||||
}
|
||||
|
||||
static void iwl_free_fw_paging(struct iwl_mvm *mvm)
|
||||
void iwl_free_fw_paging(struct iwl_mvm *mvm)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -146,6 +146,8 @@ static void iwl_free_fw_paging(struct iwl_mvm *mvm)
|
|||
get_order(mvm->fw_paging_db[i].fw_paging_size));
|
||||
}
|
||||
kfree(mvm->trans->paging_download_buf);
|
||||
mvm->trans->paging_download_buf = NULL;
|
||||
|
||||
memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
|
||||
}
|
||||
|
||||
|
@ -537,7 +539,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
|||
struct iwl_sf_region st_fwrd_space;
|
||||
|
||||
if (ucode_type == IWL_UCODE_REGULAR &&
|
||||
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE))
|
||||
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
|
||||
!(fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
|
||||
fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
|
||||
else
|
||||
fw = iwl_get_ucode_image(mvm, ucode_type);
|
||||
|
@ -952,8 +956,26 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
|||
goto error;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_THERMAL
|
||||
if (iwl_mvm_is_tt_in_fw(mvm)) {
|
||||
/* in order to give the responsibility of ct-kill and
|
||||
* TX backoff to FW we need to send empty temperature reporting
|
||||
* cmd during init time
|
||||
*/
|
||||
iwl_mvm_send_temp_report_ths_cmd(mvm);
|
||||
} else {
|
||||
/* Initialize tx backoffs to the minimal possible */
|
||||
iwl_mvm_tt_tx_backoff(mvm, 0);
|
||||
}
|
||||
|
||||
/* TODO: read the budget from BIOS / Platform NVM */
|
||||
if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0)
|
||||
ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
|
||||
mvm->cooling_dev.cur_state);
|
||||
#else
|
||||
/* Initialize tx backoffs to the minimal possible */
|
||||
iwl_mvm_tt_tx_backoff(mvm, 0);
|
||||
#endif
|
||||
|
||||
WARN_ON(iwl_mvm_config_ltr(mvm));
|
||||
|
||||
|
@ -989,7 +1011,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
|||
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
|
||||
return 0;
|
||||
error:
|
||||
iwl_trans_stop_device(mvm->trans);
|
||||
iwl_mvm_stop_device(mvm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1033,7 +1055,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
|
|||
|
||||
return 0;
|
||||
error:
|
||||
iwl_trans_stop_device(mvm->trans);
|
||||
iwl_mvm_stop_device(mvm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1484,6 +1484,8 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
|
|||
/* update rx_status according to the notification's metadata */
|
||||
memset(&rx_status, 0, sizeof(rx_status));
|
||||
rx_status.mactime = le64_to_cpu(sb->tsf);
|
||||
/* TSF as indicated by the firmware is at INA time */
|
||||
rx_status.flag |= RX_FLAG_MACTIME_PLCP_START;
|
||||
rx_status.device_timestamp = le32_to_cpu(sb->system_time);
|
||||
rx_status.band =
|
||||
(sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -69,7 +70,6 @@
|
|||
#include <linux/etherdevice.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/devcoredump.h>
|
||||
#include <linux/time.h>
|
||||
#include <net/mac80211.h>
|
||||
#include <net/ieee80211_radiotap.h>
|
||||
|
@ -85,7 +85,6 @@
|
|||
#include "testmode.h"
|
||||
#include "iwl-fw-error-dump.h"
|
||||
#include "iwl-prph.h"
|
||||
#include "iwl-csr.h"
|
||||
#include "iwl-nvm-parse.h"
|
||||
#include "fw-dbg.h"
|
||||
|
||||
|
@ -611,6 +610,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
|||
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
|
||||
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
|
||||
|
||||
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_RRM);
|
||||
|
||||
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
@ -847,6 +848,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
|
|||
u16 tid = params->tid;
|
||||
u16 *ssn = ¶ms->ssn;
|
||||
u8 buf_size = params->buf_size;
|
||||
bool amsdu = params->amsdu;
|
||||
|
||||
IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
|
||||
sta->addr, tid, action);
|
||||
|
@ -907,7 +909,8 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
|
|||
ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_OPERATIONAL:
|
||||
ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size);
|
||||
ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid,
|
||||
buf_size, amsdu);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
|
@ -969,7 +972,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
|
|||
*/
|
||||
iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
|
||||
|
||||
iwl_trans_stop_device(mvm->trans);
|
||||
iwl_mvm_stop_device(mvm);
|
||||
|
||||
mvm->scan_status = 0;
|
||||
mvm->ps_disabled = false;
|
||||
|
@ -1138,7 +1141,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
|
|||
*/
|
||||
flush_work(&mvm->roc_done_wk);
|
||||
|
||||
iwl_trans_stop_device(mvm->trans);
|
||||
iwl_mvm_stop_device(mvm);
|
||||
|
||||
iwl_mvm_async_handlers_purge(mvm);
|
||||
/* async_handlers_list is empty and will stay empty: HW is stopped */
|
||||
|
@ -1169,8 +1172,6 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
|
|||
mvm->scan_uid_status[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
mvm->ucode_loaded = false;
|
||||
}
|
||||
|
||||
static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
|
||||
|
@ -1762,6 +1763,50 @@ static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
|
|||
}
|
||||
#endif
|
||||
|
||||
static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mu_group_mgmt_cmd cmd = {};
|
||||
|
||||
memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership,
|
||||
WLAN_MEMBERSHIP_LEN);
|
||||
memcpy(cmd.user_position, vif->bss_conf.mu_group.position,
|
||||
WLAN_USER_POSITION_LEN);
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm,
|
||||
WIDE_ID(DATA_PATH_GROUP,
|
||||
UPDATE_MU_GROUPS_CMD),
|
||||
0, sizeof(cmd), &cmd);
|
||||
}
|
||||
|
||||
static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
if (vif->mu_mimo_owner) {
|
||||
struct iwl_mu_group_mgmt_notif *notif = _data;
|
||||
|
||||
/*
|
||||
* MU-MIMO Group Id action frame is little endian. We treat
|
||||
* the data received from firmware as if it came from the
|
||||
* action frame, so no conversion is needed.
|
||||
*/
|
||||
ieee80211_update_mu_groups(vif,
|
||||
(u8 *)¬if->membership_status,
|
||||
(u8 *)¬if->user_position);
|
||||
}
|
||||
}
|
||||
|
||||
void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
|
||||
|
||||
ieee80211_iterate_active_interfaces_atomic(
|
||||
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
|
||||
iwl_mvm_mu_mimo_iface_iterator, notif);
|
||||
}
|
||||
|
||||
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_bss_conf *bss_conf,
|
||||
|
@ -1870,6 +1915,18 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
|
|||
vif->addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* The firmware tracks the MU-MIMO group on its own.
|
||||
* However, on HW restart we should restore this data.
|
||||
*/
|
||||
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
|
||||
(changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) {
|
||||
ret = iwl_mvm_update_mu_groups(mvm, vif);
|
||||
if (ret)
|
||||
IWL_ERR(mvm,
|
||||
"failed to update VHT MU_MIMO groups\n");
|
||||
}
|
||||
|
||||
iwl_mvm_recalc_multicast(mvm);
|
||||
iwl_mvm_configure_bcast_filter(mvm);
|
||||
|
||||
|
@ -1896,7 +1953,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
|
|||
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
|
||||
}
|
||||
|
||||
if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) {
|
||||
if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
|
||||
/*
|
||||
* Send power command on every beacon change,
|
||||
* because we may have not enabled beacon abort yet.
|
||||
*/
|
||||
BSS_CHANGED_BEACON_INFO)) {
|
||||
ret = iwl_mvm_power_update_mac(mvm);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "failed to update power mode\n");
|
||||
|
@ -2083,7 +2145,6 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
|
|||
bss_conf->txpower);
|
||||
iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
|
||||
|
@ -2276,6 +2337,11 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
|
||||
return;
|
||||
|
||||
if (vif->p2p && !iwl_mvm_is_p2p_standalone_uapsd_supported(mvm)) {
|
||||
vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
|
||||
return;
|
||||
}
|
||||
|
||||
if (iwlwifi_mod_params.uapsd_disable) {
|
||||
vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
|
||||
return;
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -33,6 +34,7 @@
|
|||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -71,6 +73,10 @@
|
|||
#include <linux/leds.h>
|
||||
#include <linux/in6.h>
|
||||
|
||||
#ifdef CONFIG_THERMAL
|
||||
#include <linux/thermal.h>
|
||||
#endif
|
||||
|
||||
#include "iwl-op-mode.h"
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-notif-wait.h"
|
||||
|
@ -487,6 +493,12 @@ enum iwl_mvm_scan_type {
|
|||
IWL_SCAN_TYPE_FRAGMENTED,
|
||||
};
|
||||
|
||||
enum iwl_mvm_sched_scan_pass_all_states {
|
||||
SCHED_SCAN_PASS_ALL_DISABLED,
|
||||
SCHED_SCAN_PASS_ALL_ENABLED,
|
||||
SCHED_SCAN_PASS_ALL_FOUND,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_nvm_section - describes an NVM section in memory.
|
||||
*
|
||||
|
@ -517,6 +529,30 @@ struct iwl_mvm_tt_mgmt {
|
|||
bool throttle;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_THERMAL
|
||||
/**
|
||||
*struct iwl_mvm_thermal_device - thermal zone related data
|
||||
* @temp_trips: temperature thresholds for report
|
||||
* @fw_trips_index: keep indexes to original array - temp_trips
|
||||
* @tzone: thermal zone device data
|
||||
*/
|
||||
struct iwl_mvm_thermal_device {
|
||||
s16 temp_trips[IWL_MAX_DTS_TRIPS];
|
||||
u8 fw_trips_index[IWL_MAX_DTS_TRIPS];
|
||||
struct thermal_zone_device *tzone;
|
||||
};
|
||||
|
||||
/*
|
||||
* iwl_mvm_cooling_device
|
||||
* @cur_state: current state in milliwatts
|
||||
* @cdev: struct thermal cooling device
|
||||
*/
|
||||
struct iwl_mvm_cooling_device {
|
||||
u32 cur_state;
|
||||
struct thermal_cooling_device *cdev;
|
||||
};
|
||||
#endif
|
||||
|
||||
#define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8
|
||||
|
||||
struct iwl_mvm_frame_stats {
|
||||
|
@ -657,6 +693,7 @@ struct iwl_mvm {
|
|||
void *scan_cmd;
|
||||
struct iwl_mcast_filter_cmd *mcast_filter_cmd;
|
||||
enum iwl_mvm_scan_type scan_type;
|
||||
enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
|
||||
|
||||
/* max number of simultaneous scans the FW supports */
|
||||
unsigned int max_scans;
|
||||
|
@ -797,6 +834,11 @@ struct iwl_mvm {
|
|||
|
||||
/* Thermal Throttling and CTkill */
|
||||
struct iwl_mvm_tt_mgmt thermal_throttle;
|
||||
#ifdef CONFIG_THERMAL
|
||||
struct iwl_mvm_thermal_device tz_device;
|
||||
struct iwl_mvm_cooling_device cooling_dev;
|
||||
#endif
|
||||
|
||||
s32 temperature; /* Celsius */
|
||||
/*
|
||||
* Debug option to set the NIC temperature. This option makes the
|
||||
|
@ -819,6 +861,7 @@ struct iwl_mvm {
|
|||
|
||||
/* Indicate if device power save is allowed */
|
||||
u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */
|
||||
unsigned int max_amsdu_len; /* used for debugfs only */
|
||||
|
||||
struct ieee80211_vif __rcu *csa_vif;
|
||||
struct ieee80211_vif __rcu *csa_tx_blocked_vif;
|
||||
|
@ -943,8 +986,9 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
|
|||
|
||||
static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm)
|
||||
{
|
||||
return fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
|
||||
/* Make sure DQA isn't allowed in driver until feature is complete */
|
||||
return false && fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
|
||||
}
|
||||
|
||||
static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
|
||||
|
@ -1028,6 +1072,28 @@ static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
|
|||
IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
|
||||
}
|
||||
|
||||
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
|
||||
{
|
||||
#ifdef CONFIG_THERMAL
|
||||
/* these two TLV are redundant since the responsibility to CT-kill by
|
||||
* FW happens only after we send at least one command of
|
||||
* temperature THs report.
|
||||
*/
|
||||
return fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW) &&
|
||||
fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT);
|
||||
#else /* CONFIG_THERMAL */
|
||||
return false;
|
||||
#endif /* CONFIG_THERMAL */
|
||||
}
|
||||
|
||||
static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm)
|
||||
{
|
||||
return fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_CTDP_SUPPORT);
|
||||
}
|
||||
|
||||
extern const u8 iwl_mvm_ac_to_tx_fifo[];
|
||||
|
||||
struct iwl_rate_info {
|
||||
|
@ -1160,6 +1226,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|||
struct iwl_rx_cmd_buffer *rxb, int queue);
|
||||
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb, int queue);
|
||||
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
|
||||
const u8 *data, u32 count);
|
||||
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
||||
int queue);
|
||||
void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
|
||||
|
@ -1203,6 +1273,10 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
|
|||
struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif);
|
||||
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
|
||||
|
@ -1244,6 +1318,9 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
|
|||
void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb);
|
||||
|
||||
/* Paging */
|
||||
void iwl_free_fw_paging(struct iwl_mvm *mvm);
|
||||
|
||||
/* MVM debugfs */
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
|
||||
|
@ -1476,32 +1553,30 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
|||
iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
|
||||
}
|
||||
|
||||
static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
|
||||
int mac80211_queue, int fifo,
|
||||
int sta_id, int tid, int frame_limit,
|
||||
u16 ssn, unsigned int wdg_timeout)
|
||||
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_trans_txq_scd_cfg cfg = {
|
||||
.fifo = fifo,
|
||||
.sta_id = sta_id,
|
||||
.tid = tid,
|
||||
.frame_limit = frame_limit,
|
||||
.aggregate = true,
|
||||
};
|
||||
|
||||
iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
|
||||
mvm->ucode_loaded = false;
|
||||
iwl_trans_stop_device(mvm->trans);
|
||||
}
|
||||
|
||||
/* Stop/start all mac queues in a given bitmap */
|
||||
void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
|
||||
void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
|
||||
|
||||
/* Thermal management and CT-kill */
|
||||
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
|
||||
void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
|
||||
void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
|
||||
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
|
||||
void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
|
||||
void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff);
|
||||
void iwl_mvm_thermal_exit(struct iwl_mvm *mvm);
|
||||
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
|
||||
int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp);
|
||||
void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
|
||||
int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm);
|
||||
int iwl_mvm_cooling_device_register(struct iwl_mvm *mvm);
|
||||
int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget);
|
||||
|
||||
/* Location Aware Regulatory */
|
||||
struct iwl_mcc_update_resp *
|
||||
|
|
|
@ -236,6 +236,9 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
|
|||
RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
|
||||
iwl_mvm_rx_ant_coupling_notif, true),
|
||||
|
||||
RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
|
||||
iwl_mvm_window_status_notif, false),
|
||||
|
||||
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
|
||||
RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true),
|
||||
|
||||
|
@ -263,6 +266,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
|
|||
RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true),
|
||||
RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
|
||||
iwl_mvm_temp_notif, true),
|
||||
RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
|
||||
iwl_mvm_ct_kill_notif, false),
|
||||
|
||||
RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
|
||||
true),
|
||||
|
@ -270,6 +275,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
|
|||
RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true),
|
||||
RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
|
||||
iwl_mvm_rx_stored_beacon_notif, false),
|
||||
RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
|
||||
iwl_mvm_mu_mimo_grp_notif, false),
|
||||
|
||||
};
|
||||
#undef RX_HANDLER
|
||||
|
@ -292,6 +299,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
|
|||
HCMD_NAME(SCAN_COMPLETE_UMAC),
|
||||
HCMD_NAME(TOF_CMD),
|
||||
HCMD_NAME(TOF_NOTIFICATION),
|
||||
HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
|
||||
HCMD_NAME(ADD_STA_KEY),
|
||||
HCMD_NAME(ADD_STA),
|
||||
HCMD_NAME(REMOVE_STA),
|
||||
|
@ -387,9 +395,22 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
|
|||
*/
|
||||
static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
|
||||
HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
|
||||
HCMD_NAME(CTDP_CONFIG_CMD),
|
||||
HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
|
||||
HCMD_NAME(CT_KILL_NOTIFICATION),
|
||||
HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
|
||||
};
|
||||
|
||||
/* Please keep this array *SORTED* by hex value.
|
||||
* Access is done through binary search
|
||||
*/
|
||||
static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
|
||||
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
|
||||
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
|
||||
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
|
||||
HCMD_NAME(RX_QUEUES_NOTIFICATION),
|
||||
};
|
||||
|
||||
/* Please keep this array *SORTED* by hex value.
|
||||
* Access is done through binary search
|
||||
*/
|
||||
|
@ -401,6 +422,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
|
|||
[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
|
||||
[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
|
||||
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
|
||||
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
|
||||
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
|
||||
};
|
||||
|
||||
|
@ -474,8 +496,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
|
||||
if (iwl_mvm_has_new_rx_api(mvm)) {
|
||||
op_mode->ops = &iwl_mvm_ops_mq;
|
||||
trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc);
|
||||
} else {
|
||||
op_mode->ops = &iwl_mvm_ops;
|
||||
trans->rx_mpdu_cmd_hdr_size =
|
||||
sizeof(struct iwl_rx_mpdu_res_start);
|
||||
|
||||
if (WARN_ON(trans->num_rx_queues > 1))
|
||||
goto out_free;
|
||||
|
@ -567,7 +592,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
iwl_trans_configure(mvm->trans, &trans_cfg);
|
||||
|
||||
trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
|
||||
trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
|
||||
trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv;
|
||||
trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
|
||||
memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
|
||||
|
@ -588,7 +612,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
mvm->cfg->name, mvm->trans->hw_rev);
|
||||
|
||||
min_backoff = calc_min_backoff(trans, cfg);
|
||||
iwl_mvm_tt_initialize(mvm, min_backoff);
|
||||
iwl_mvm_thermal_initialize(mvm, min_backoff);
|
||||
|
||||
if (iwlwifi_mod_params.nvm_file)
|
||||
mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
|
||||
|
@ -619,7 +643,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
|
||||
err = iwl_run_init_mvm_ucode(mvm, true);
|
||||
if (!err || !iwlmvm_mod_params.init_dbg)
|
||||
iwl_trans_stop_device(trans);
|
||||
iwl_mvm_stop_device(mvm);
|
||||
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
|
||||
mutex_unlock(&mvm->mutex);
|
||||
/* returns 0 if successful, 1 if success but in rfkill */
|
||||
|
@ -648,19 +672,22 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
|
||||
memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
|
||||
|
||||
/* rpm starts with a taken reference, we can release it now */
|
||||
iwl_trans_unref(mvm->trans);
|
||||
/* The transport always starts with a taken reference, we can
|
||||
* release it now if d0i3 is supported */
|
||||
if (iwl_mvm_is_d0i3_supported(mvm))
|
||||
iwl_trans_unref(mvm->trans);
|
||||
|
||||
iwl_mvm_tof_init(mvm);
|
||||
|
||||
/* init RSS hash key */
|
||||
get_random_bytes(mvm->secret_key, ARRAY_SIZE(mvm->secret_key));
|
||||
get_random_bytes(mvm->secret_key, sizeof(mvm->secret_key));
|
||||
|
||||
return op_mode;
|
||||
|
||||
out_unregister:
|
||||
ieee80211_unregister_hw(mvm->hw);
|
||||
iwl_mvm_leds_exit(mvm);
|
||||
iwl_mvm_thermal_exit(mvm);
|
||||
out_free:
|
||||
flush_delayed_work(&mvm->fw_dump_wk);
|
||||
iwl_phy_db_free(mvm->phy_db);
|
||||
|
@ -676,9 +703,16 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
|
|||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
int i;
|
||||
|
||||
/* If d0i3 is supported, we have released the reference that
|
||||
* the transport started with, so we should take it back now
|
||||
* that we are leaving.
|
||||
*/
|
||||
if (iwl_mvm_is_d0i3_supported(mvm))
|
||||
iwl_trans_ref(mvm->trans);
|
||||
|
||||
iwl_mvm_leds_exit(mvm);
|
||||
|
||||
iwl_mvm_tt_exit(mvm);
|
||||
iwl_mvm_thermal_exit(mvm);
|
||||
|
||||
ieee80211_unregister_hw(mvm->hw);
|
||||
|
||||
|
@ -699,6 +733,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
|
|||
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
|
||||
kfree(mvm->nvm_sections[i].data);
|
||||
|
||||
iwl_free_fw_paging(mvm);
|
||||
|
||||
iwl_mvm_tof_clean(mvm);
|
||||
|
||||
ieee80211_free_hw(mvm->hw);
|
||||
|
@ -856,28 +892,24 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
|
|||
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
|
||||
else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
|
||||
iwl_mvm_rx_phy_cmd_mq(mvm, rxb);
|
||||
else if (unlikely(pkt->hdr.group_id == DATA_PATH_GROUP &&
|
||||
pkt->hdr.cmd == RX_QUEUES_NOTIFICATION))
|
||||
iwl_mvm_rx_queue_notif(mvm, rxb, 0);
|
||||
else
|
||||
iwl_mvm_rx_common(mvm, rxb, pkt);
|
||||
}
|
||||
|
||||
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
|
||||
void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
unsigned long mq;
|
||||
int q;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mq = mvm->queue_info[queue].hw_queue_to_mac80211;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
if (WARN_ON_ONCE(!mq))
|
||||
return;
|
||||
|
||||
for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
|
||||
if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"queue %d (mac80211 %d) already stopped\n",
|
||||
queue, q);
|
||||
"mac80211 %d already stopped\n", q);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -897,24 +929,29 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
|
|||
iwl_trans_block_txq_ptrs(mvm->trans, false);
|
||||
}
|
||||
|
||||
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
|
||||
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
unsigned long mq;
|
||||
int q;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mq = mvm->queue_info[queue].hw_queue_to_mac80211;
|
||||
mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
iwl_mvm_stop_mac_queues(mvm, mq);
|
||||
}
|
||||
|
||||
void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
|
||||
{
|
||||
int q;
|
||||
|
||||
if (WARN_ON_ONCE(!mq))
|
||||
return;
|
||||
|
||||
for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
|
||||
if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"queue %d (mac80211 %d) still stopped\n",
|
||||
queue, q);
|
||||
"mac80211 %d still stopped\n", q);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -922,6 +959,18 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
|
|||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
unsigned long mq;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
iwl_mvm_start_mac_queues(mvm, mq);
|
||||
}
|
||||
|
||||
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
|
||||
{
|
||||
if (state)
|
||||
|
@ -1528,6 +1577,9 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
|
|||
|
||||
if (unlikely(pkt->hdr.cmd == FRAME_RELEASE))
|
||||
iwl_mvm_rx_frame_release(mvm, rxb, queue);
|
||||
else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION &&
|
||||
pkt->hdr.group_id == DATA_PATH_GROUP))
|
||||
iwl_mvm_rx_queue_notif(mvm, rxb, queue);
|
||||
else
|
||||
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
|
||||
}
|
||||
|
|
|
@ -556,6 +556,7 @@ static char *rs_pretty_rate(const struct rs_rate *rate)
|
|||
if (is_type_legacy(rate->type) && (rate->index <= IWL_RATE_54M_INDEX))
|
||||
rate_str = legacy_rates[rate->index];
|
||||
else if ((is_type_ht(rate->type) || is_type_vht(rate->type)) &&
|
||||
(rate->index >= IWL_RATE_MCS_0_INDEX) &&
|
||||
(rate->index <= IWL_RATE_MCS_9_INDEX))
|
||||
rate_str = ht_vht_rates[rate->index];
|
||||
else
|
||||
|
@ -1672,6 +1673,20 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
|
|||
}
|
||||
}
|
||||
|
||||
static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
struct iwl_scale_tbl_info *tbl,
|
||||
enum rs_action scale_action)
|
||||
{
|
||||
struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
|
||||
|
||||
if ((!is_vht(&tbl->rate) && !is_ht(&tbl->rate)) ||
|
||||
tbl->rate.index < IWL_RATE_MCS_5_INDEX ||
|
||||
scale_action == RS_ACTION_DOWNSCALE)
|
||||
sta_priv->tlc_amsdu = false;
|
||||
else
|
||||
sta_priv->tlc_amsdu = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* setup rate table in uCode
|
||||
*/
|
||||
|
@ -2415,6 +2430,7 @@ lq_update:
|
|||
tbl->rate.index = index;
|
||||
if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK)
|
||||
rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action);
|
||||
rs_set_amsdu_len(mvm, sta, tbl, scale_action);
|
||||
rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
|
||||
}
|
||||
|
||||
|
@ -3098,6 +3114,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
sband = hw->wiphy->bands[band];
|
||||
|
||||
lq_sta->lq.sta_id = sta_priv->sta_id;
|
||||
sta_priv->tlc_amsdu = false;
|
||||
|
||||
for (j = 0; j < LQ_SIZE; j++)
|
||||
rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
|
||||
|
@ -3657,10 +3674,13 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
|
|||
ssize_t ret;
|
||||
|
||||
struct iwl_lq_sta *lq_sta = file->private_data;
|
||||
struct iwl_mvm_sta *mvmsta =
|
||||
container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
|
||||
struct iwl_mvm *mvm;
|
||||
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
|
||||
struct rs_rate *rate = &tbl->rate;
|
||||
u32 ss_params;
|
||||
|
||||
mvm = lq_sta->pers.drv;
|
||||
buff = kmalloc(2048, GFP_KERNEL);
|
||||
if (!buff)
|
||||
|
@ -3686,10 +3706,11 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
|
|||
(is_ht20(rate)) ? "20MHz" :
|
||||
(is_ht40(rate)) ? "40MHz" :
|
||||
(is_ht80(rate)) ? "80Mhz" : "BAD BW");
|
||||
desc += sprintf(buff + desc, " %s %s %s\n",
|
||||
desc += sprintf(buff + desc, " %s %s %s %s\n",
|
||||
(rate->sgi) ? "SGI" : "NGI",
|
||||
(rate->ldpc) ? "LDPC" : "BCC",
|
||||
(lq_sta->is_agg) ? "AGG on" : "");
|
||||
(lq_sta->is_agg) ? "AGG on" : "",
|
||||
(mvmsta->tlc_amsdu) ? "AMSDU on" : "");
|
||||
}
|
||||
desc += sprintf(buff+desc, "last tx rate=0x%X\n",
|
||||
lq_sta->last_rate_n_flags);
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -322,11 +323,9 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|||
rx_status->freq =
|
||||
ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
|
||||
rx_status->band);
|
||||
/*
|
||||
* TSF as indicated by the fw is at INA time, but mac80211 expects the
|
||||
* TSF at the beginning of the MPDU.
|
||||
*/
|
||||
/*rx_status->flag |= RX_FLAG_MACTIME_MPDU;*/
|
||||
|
||||
/* TSF as indicated by the firmware is at INA time */
|
||||
rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
|
||||
|
||||
iwl_mvm_get_signal_strength(mvm, phy_info, rx_status);
|
||||
|
||||
|
@ -448,6 +447,12 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|||
iwl_mvm_update_frame_stats(mvm, rate_n_flags,
|
||||
rx_status->flag & RX_FLAG_AMPDU_DETAILS);
|
||||
#endif
|
||||
|
||||
if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
|
||||
ieee80211_is_probe_resp(hdr->frame_control)) &&
|
||||
mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED))
|
||||
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
|
||||
|
||||
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status,
|
||||
crypt_len, rxb);
|
||||
}
|
||||
|
@ -622,3 +627,51 @@ void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
|||
{
|
||||
iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
|
||||
}
|
||||
|
||||
void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_ba_window_status_notif *notif = (void *)pkt->data;
|
||||
int i;
|
||||
u32 pkt_len = iwl_rx_packet_payload_len(pkt);
|
||||
|
||||
if (WARN_ONCE(pkt_len != sizeof(*notif),
|
||||
"Received window status notification of wrong size (%u)\n",
|
||||
pkt_len))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < BA_WINDOW_STREAMS_MAX; i++) {
|
||||
struct ieee80211_sta *sta;
|
||||
u8 sta_id, tid;
|
||||
u64 bitmap;
|
||||
u32 ssn;
|
||||
u16 ratid;
|
||||
u16 received_mpdu;
|
||||
|
||||
ratid = le16_to_cpu(notif->ra_tid[i]);
|
||||
/* check that this TID is valid */
|
||||
if (!(ratid & BA_WINDOW_STATUS_VALID_MSK))
|
||||
continue;
|
||||
|
||||
received_mpdu = le16_to_cpu(notif->mpdu_rx_count[i]);
|
||||
if (received_mpdu == 0)
|
||||
continue;
|
||||
|
||||
tid = ratid & BA_WINDOW_STATUS_TID_MSK;
|
||||
/* get the station */
|
||||
sta_id = (ratid & BA_WINDOW_STATUS_STA_ID_MSK)
|
||||
>> BA_WINDOW_STATUS_STA_ID_POS;
|
||||
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
|
||||
if (IS_ERR_OR_NULL(sta))
|
||||
continue;
|
||||
bitmap = le64_to_cpu(notif->bitmap[i]);
|
||||
ssn = le32_to_cpu(notif->start_seq_num[i]);
|
||||
|
||||
/* update mac80211 with the bitmap for the reordering buffer */
|
||||
ieee80211_mark_rx_ba_filtered_frames(sta, tid, ssn, bitmap,
|
||||
received_mpdu);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -29,7 +29,7 @@
|
|||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -156,7 +156,14 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
|
|||
u16 len, u8 crypt_len,
|
||||
struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
unsigned int hdrlen, fraglen;
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
|
||||
unsigned int headlen, fraglen, pad_len = 0;
|
||||
unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
||||
|
||||
if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD)
|
||||
pad_len = 2;
|
||||
len -= pad_len;
|
||||
|
||||
/* If frame is small enough to fit in skb->head, pull it completely.
|
||||
* If not, only pull ieee80211_hdr (including crypto if present, and
|
||||
|
@ -170,14 +177,23 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
|
|||
* If the latter changes (there are efforts in the standards group
|
||||
* to do so) we should revisit this and ieee80211_data_to_8023().
|
||||
*/
|
||||
hdrlen = (len <= skb_tailroom(skb)) ? len :
|
||||
sizeof(*hdr) + crypt_len + 8;
|
||||
headlen = (len <= skb_tailroom(skb)) ? len :
|
||||
hdrlen + crypt_len + 8;
|
||||
|
||||
/* The firmware may align the packet to DWORD.
|
||||
* The padding is inserted after the IV.
|
||||
* After copying the header + IV skip the padding if
|
||||
* present before copying packet data.
|
||||
*/
|
||||
hdrlen += crypt_len;
|
||||
memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
|
||||
fraglen = len - hdrlen;
|
||||
memcpy(skb_put(skb, headlen - hdrlen), (u8 *)hdr + hdrlen + pad_len,
|
||||
headlen - hdrlen);
|
||||
|
||||
fraglen = len - headlen;
|
||||
|
||||
if (fraglen) {
|
||||
int offset = (void *)hdr + hdrlen -
|
||||
int offset = (void *)hdr + headlen + pad_len -
|
||||
rxb_addr(rxb) + rxb_offset(rxb);
|
||||
|
||||
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
|
||||
|
@ -285,6 +301,114 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
|
|||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
}
|
||||
|
||||
/*
|
||||
* returns true if a packet outside BA session is a duplicate and
|
||||
* should be dropped
|
||||
*/
|
||||
static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
|
||||
struct ieee80211_rx_status *rx_status,
|
||||
struct ieee80211_hdr *hdr,
|
||||
struct iwl_rx_mpdu_desc *desc)
|
||||
{
|
||||
struct iwl_mvm_sta *mvm_sta;
|
||||
struct iwl_mvm_rxq_dup_data *dup_data;
|
||||
u8 baid, tid, sub_frame_idx;
|
||||
|
||||
if (WARN_ON(IS_ERR_OR_NULL(sta)))
|
||||
return false;
|
||||
|
||||
baid = (le32_to_cpu(desc->reorder_data) &
|
||||
IWL_RX_MPDU_REORDER_BAID_MASK) >>
|
||||
IWL_RX_MPDU_REORDER_BAID_SHIFT;
|
||||
|
||||
if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
|
||||
return false;
|
||||
|
||||
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
|
||||
dup_data = &mvm_sta->dup_data[queue];
|
||||
|
||||
/*
|
||||
* Drop duplicate 802.11 retransmissions
|
||||
* (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
|
||||
*/
|
||||
if (ieee80211_is_ctl(hdr->frame_control) ||
|
||||
ieee80211_is_qos_nullfunc(hdr->frame_control) ||
|
||||
is_multicast_ether_addr(hdr->addr1)) {
|
||||
rx_status->flag |= RX_FLAG_DUP_VALIDATED;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ieee80211_is_data_qos(hdr->frame_control))
|
||||
/* frame has qos control */
|
||||
tid = *ieee80211_get_qos_ctl(hdr) &
|
||||
IEEE80211_QOS_CTL_TID_MASK;
|
||||
else
|
||||
tid = IWL_MAX_TID_COUNT;
|
||||
|
||||
/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
|
||||
sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
|
||||
|
||||
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
|
||||
dup_data->last_seq[tid] == hdr->seq_ctrl &&
|
||||
dup_data->last_sub_frame[tid] >= sub_frame_idx))
|
||||
return true;
|
||||
|
||||
dup_data->last_seq[tid] = hdr->seq_ctrl;
|
||||
dup_data->last_sub_frame[tid] = sub_frame_idx;
|
||||
|
||||
rx_status->flag |= RX_FLAG_DUP_VALIDATED;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
|
||||
const u8 *data, u32 count)
|
||||
{
|
||||
struct iwl_rxq_sync_cmd *cmd;
|
||||
u32 data_size = sizeof(*cmd) + count;
|
||||
int ret;
|
||||
|
||||
/* should be DWORD aligned */
|
||||
if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
cmd = kzalloc(data_size, GFP_KERNEL);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
cmd->rxq_mask = cpu_to_le32(rxq_mask);
|
||||
cmd->count = cpu_to_le32(count);
|
||||
cmd->flags = 0;
|
||||
memcpy(cmd->payload, data, count);
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm,
|
||||
WIDE_ID(DATA_PATH_GROUP,
|
||||
TRIGGER_RX_QUEUES_NOTIF_CMD),
|
||||
0, data_size, cmd);
|
||||
|
||||
kfree(cmd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
||||
int queue)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_rxq_sync_notification *notif;
|
||||
struct iwl_mvm_internal_rxq_notif *internal_notif;
|
||||
|
||||
notif = (void *)pkt->data;
|
||||
internal_notif = (void *)notif->payload;
|
||||
|
||||
switch (internal_notif->type) {
|
||||
case IWL_MVM_RXQ_NOTIF_DEL_BA:
|
||||
/* TODO */
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
|
||||
}
|
||||
}
|
||||
|
||||
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
struct iwl_rx_cmd_buffer *rxb, int queue)
|
||||
{
|
||||
|
@ -332,6 +456,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|||
rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
|
||||
rx_status->band);
|
||||
iwl_mvm_get_signal_strength(mvm, desc, rx_status);
|
||||
/* TSF as indicated by the firmware is at INA time */
|
||||
rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
|
@ -387,6 +513,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|||
|
||||
if (ieee80211_is_data(hdr->frame_control))
|
||||
iwl_mvm_rx_csum(sta, skb, desc);
|
||||
|
||||
if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
|
||||
kfree_skb(skb);
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -33,6 +34,7 @@
|
|||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -297,6 +299,12 @@ void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
|
|||
iwl_mvm_dump_channel_list(notif->results,
|
||||
notif->scanned_channels, buf,
|
||||
sizeof(buf)));
|
||||
|
||||
if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
|
||||
IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
|
||||
ieee80211_sched_scan_results(mvm->hw);
|
||||
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
|
||||
}
|
||||
}
|
||||
|
||||
void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
|
||||
|
@ -380,6 +388,7 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
|
|||
|
||||
mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
|
||||
ieee80211_sched_scan_stopped(mvm->hw);
|
||||
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
|
||||
} else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
|
||||
IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
|
||||
aborted ? "aborted" : "completed",
|
||||
|
@ -533,10 +542,13 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
|
|||
IWL_DEBUG_SCAN(mvm,
|
||||
"Sending scheduled scan with filtering, n_match_sets %d\n",
|
||||
req->n_match_sets);
|
||||
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
|
||||
return false;
|
||||
}
|
||||
|
||||
IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
|
||||
|
||||
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -788,6 +800,9 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
|
|||
flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
|
||||
#endif
|
||||
|
||||
if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
|
||||
flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
|
||||
|
||||
if (iwl_mvm_is_regular_scan(params) &&
|
||||
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
|
||||
params->type != IWL_SCAN_TYPE_FRAGMENTED)
|
||||
|
@ -1074,6 +1089,9 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
|
|||
flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
|
||||
#endif
|
||||
|
||||
if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
|
||||
flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
|
||||
|
||||
if (iwl_mvm_is_regular_scan(params) &&
|
||||
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
|
||||
params->type != IWL_SCAN_TYPE_FRAGMENTED)
|
||||
|
@ -1301,10 +1319,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* we don't support "match all" in the firmware */
|
||||
if (!req->n_match_sets)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ret = iwl_mvm_check_running_scans(mvm, type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1400,6 +1414,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
|
|||
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
|
||||
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
|
||||
ieee80211_sched_scan_stopped(mvm->hw);
|
||||
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
|
||||
}
|
||||
|
||||
mvm->scan_status &= ~mvm->scan_uid_status[uid];
|
||||
|
@ -1434,6 +1449,12 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
|
|||
iwl_mvm_dump_channel_list(notif->results,
|
||||
notif->scanned_channels, buf,
|
||||
sizeof(buf)));
|
||||
|
||||
if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
|
||||
IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
|
||||
ieee80211_sched_scan_results(mvm->hw);
|
||||
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
|
||||
}
|
||||
}
|
||||
|
||||
static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
|
||||
|
@ -1528,6 +1549,7 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
|
|||
uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
|
||||
if (uid >= 0 && !mvm->restart_fw) {
|
||||
ieee80211_sched_scan_stopped(mvm->hw);
|
||||
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
|
||||
mvm->scan_uid_status[uid] = 0;
|
||||
}
|
||||
|
||||
|
@ -1549,8 +1571,11 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
|
|||
* restart_hw, so do not report if FW is about to be
|
||||
* restarted.
|
||||
*/
|
||||
if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && !mvm->restart_fw)
|
||||
if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) &&
|
||||
!mvm->restart_fw) {
|
||||
ieee80211_sched_scan_stopped(mvm->hw);
|
||||
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1586,6 +1611,7 @@ out:
|
|||
ieee80211_scan_completed(mvm->hw, true);
|
||||
} else if (notify) {
|
||||
ieee80211_sched_scan_stopped(mvm->hw);
|
||||
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -280,6 +280,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
|||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_mvm_rxq_dup_data *dup_data;
|
||||
int i, ret, sta_id;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
@ -327,6 +328,16 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
|||
}
|
||||
mvm_sta->agg_tids = 0;
|
||||
|
||||
if (iwl_mvm_has_new_rx_api(mvm) &&
|
||||
!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
|
||||
dup_data = kcalloc(mvm->trans->num_rx_queues,
|
||||
sizeof(*dup_data),
|
||||
GFP_KERNEL);
|
||||
if (!dup_data)
|
||||
return -ENOMEM;
|
||||
mvm_sta->dup_data = dup_data;
|
||||
}
|
||||
|
||||
ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -508,6 +519,9 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
|||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (iwl_mvm_has_new_rx_api(mvm))
|
||||
kfree(mvm_sta->dup_data);
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_STATION &&
|
||||
mvmvif->ap_sta_id == mvm_sta->sta_id) {
|
||||
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
|
||||
|
@ -1031,15 +1045,23 @@ release_locks:
|
|||
}
|
||||
|
||||
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta, u16 tid, u8 buf_size)
|
||||
struct ieee80211_sta *sta, u16 tid, u8 buf_size,
|
||||
bool amsdu)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
|
||||
unsigned int wdg_timeout =
|
||||
iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
|
||||
int queue, fifo, ret;
|
||||
int queue, ret;
|
||||
u16 ssn;
|
||||
|
||||
struct iwl_trans_txq_scd_cfg cfg = {
|
||||
.sta_id = mvmsta->sta_id,
|
||||
.tid = tid,
|
||||
.frame_limit = buf_size,
|
||||
.aggregate = true,
|
||||
};
|
||||
|
||||
BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
|
||||
!= IWL_MAX_TID_COUNT);
|
||||
|
||||
|
@ -1051,13 +1073,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
tid_data->state = IWL_AGG_ON;
|
||||
mvmsta->agg_tids |= BIT(tid);
|
||||
tid_data->ssn = 0xffff;
|
||||
tid_data->amsdu_in_ampdu_allowed = amsdu;
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
|
||||
fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
|
||||
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
|
||||
|
||||
iwl_mvm_enable_agg_txq(mvm, queue,
|
||||
vif->hw_queue[tid_to_mac80211_ac[tid]], fifo,
|
||||
mvmsta->sta_id, tid, buf_size, ssn, wdg_timeout);
|
||||
iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[tid_to_mac80211_ac[tid]],
|
||||
ssn, &cfg, wdg_timeout);
|
||||
|
||||
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
|
||||
if (ret)
|
||||
|
|
|
@ -258,8 +258,7 @@ enum iwl_mvm_agg_state {
|
|||
* This is basically (last acked packet++).
|
||||
* @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
|
||||
* Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
|
||||
* @reduced_tpc: Reduced tx power. Holds the data between the
|
||||
* Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
|
||||
* @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
|
||||
* @state: state of the BA agreement establishment / tear down.
|
||||
* @txq_id: Tx queue used by the BA session
|
||||
* @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
|
||||
|
@ -273,7 +272,7 @@ struct iwl_mvm_tid_data {
|
|||
u16 next_reclaimed;
|
||||
/* The rest is Tx AGG related */
|
||||
u32 rate_n_flags;
|
||||
u8 reduced_tpc;
|
||||
bool amsdu_in_ampdu_allowed;
|
||||
enum iwl_mvm_agg_state state;
|
||||
u16 txq_id;
|
||||
u16 ssn;
|
||||
|
@ -293,6 +292,16 @@ struct iwl_mvm_key_pn {
|
|||
} ____cacheline_aligned_in_smp q[];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_rxq_dup_data - per station per rx queue data
|
||||
* @last_seq: last sequence per tid for duplicate packet detection
|
||||
* @last_sub_frame: last subframe packet
|
||||
*/
|
||||
struct iwl_mvm_rxq_dup_data {
|
||||
__le16 last_seq[IWL_MAX_TID_COUNT + 1];
|
||||
u8 last_sub_frame[IWL_MAX_TID_COUNT + 1];
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_sta - representation of a station in the driver
|
||||
* @sta_id: the index of the station in the fw (will be replaced by id_n_color)
|
||||
|
@ -311,6 +320,7 @@ struct iwl_mvm_key_pn {
|
|||
* @tx_protection: reference counter for controlling the Tx protection.
|
||||
* @tt_tx_protection: is thermal throttling enable Tx protection?
|
||||
* @disable_tx: is tx to this STA disabled?
|
||||
* @tlc_amsdu: true if A-MSDU is allowed
|
||||
* @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON)
|
||||
* @sleep_tx_count: the number of frames that we told the firmware to let out
|
||||
* even when that station is asleep. This is useful in case the queue
|
||||
|
@ -318,6 +328,7 @@ struct iwl_mvm_key_pn {
|
|||
* we are sending frames from an AMPDU queue and there was a hole in
|
||||
* the BA window. To be used for UAPSD only.
|
||||
* @ptk_pn: per-queue PTK PN data structures
|
||||
* @dup_data: per queue duplicate packet detection data
|
||||
*
|
||||
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
|
||||
* in the structure for use by driver. This structure is placed in that
|
||||
|
@ -337,14 +348,15 @@ struct iwl_mvm_sta {
|
|||
struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
|
||||
struct iwl_lq_sta lq_sta;
|
||||
struct ieee80211_vif *vif;
|
||||
|
||||
struct iwl_mvm_key_pn __rcu *ptk_pn[4];
|
||||
struct iwl_mvm_rxq_dup_data *dup_data;
|
||||
|
||||
/* Temporary, until the new TLC will control the Tx protection */
|
||||
s8 tx_protection;
|
||||
bool tt_tx_protection;
|
||||
|
||||
bool disable_tx;
|
||||
bool tlc_amsdu;
|
||||
u8 agg_tids;
|
||||
u8 sleep_tx_count;
|
||||
};
|
||||
|
@ -405,7 +417,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
|
||||
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta, u16 tid, u8 buf_size);
|
||||
struct ieee80211_sta *sta, u16 tid, u8 buf_size,
|
||||
bool amsdu);
|
||||
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta, u16 tid);
|
||||
int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*
|
||||
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -33,7 +34,7 @@
|
|||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -64,6 +65,8 @@
|
|||
*
|
||||
*****************************************************************************/
|
||||
|
||||
#include <linux/sort.h>
|
||||
|
||||
#include "mvm.h"
|
||||
|
||||
#define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ
|
||||
|
@ -79,8 +82,10 @@ static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
|
|||
IWL_ERR(mvm, "Enter CT Kill\n");
|
||||
iwl_mvm_set_hw_ctkill_state(mvm, true);
|
||||
|
||||
tt->throttle = false;
|
||||
tt->dynamic_smps = false;
|
||||
if (!iwl_mvm_is_tt_in_fw(mvm)) {
|
||||
tt->throttle = false;
|
||||
tt->dynamic_smps = false;
|
||||
}
|
||||
|
||||
/* Don't schedule an exit work if we're in test mode, since
|
||||
* the temperature will not change unless we manually set it
|
||||
|
@ -116,18 +121,21 @@ void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp)
|
|||
static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_packet *pkt)
|
||||
{
|
||||
struct iwl_dts_measurement_notif *notif;
|
||||
struct iwl_dts_measurement_notif_v1 *notif_v1;
|
||||
int len = iwl_rx_packet_payload_len(pkt);
|
||||
int temp;
|
||||
|
||||
if (WARN_ON_ONCE(len < sizeof(*notif))) {
|
||||
/* we can use notif_v1 only, because v2 only adds an additional
|
||||
* parameter, which is not used in this function.
|
||||
*/
|
||||
if (WARN_ON_ONCE(len < sizeof(*notif_v1))) {
|
||||
IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
notif = (void *)pkt->data;
|
||||
notif_v1 = (void *)pkt->data;
|
||||
|
||||
temp = le32_to_cpu(notif->temp);
|
||||
temp = le32_to_cpu(notif_v1->temp);
|
||||
|
||||
/* shouldn't be negative, but since it's s32, make sure it isn't */
|
||||
if (WARN_ON_ONCE(temp < 0))
|
||||
|
@ -158,17 +166,74 @@ static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait,
|
|||
void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_dts_measurement_notif_v2 *notif_v2;
|
||||
int len = iwl_rx_packet_payload_len(pkt);
|
||||
int temp;
|
||||
u32 ths_crossed;
|
||||
|
||||
/* the notification is handled synchronously in ctkill, so skip here */
|
||||
if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
|
||||
return;
|
||||
|
||||
temp = iwl_mvm_temp_notif_parse(mvm, pkt);
|
||||
if (temp < 0)
|
||||
|
||||
if (!iwl_mvm_is_tt_in_fw(mvm)) {
|
||||
if (temp >= 0)
|
||||
iwl_mvm_tt_temp_changed(mvm, temp);
|
||||
return;
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(len < sizeof(*notif_v2))) {
|
||||
IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
|
||||
return;
|
||||
}
|
||||
|
||||
notif_v2 = (void *)pkt->data;
|
||||
ths_crossed = le32_to_cpu(notif_v2->threshold_idx);
|
||||
|
||||
/* 0xFF in ths_crossed means the notification is not related
|
||||
* to a trip, so we can ignore it here.
|
||||
*/
|
||||
if (ths_crossed == 0xFF)
|
||||
return;
|
||||
|
||||
iwl_mvm_tt_temp_changed(mvm, temp);
|
||||
IWL_DEBUG_TEMP(mvm, "Temp = %d Threshold crossed = %d\n",
|
||||
temp, ths_crossed);
|
||||
|
||||
#ifdef CONFIG_THERMAL
|
||||
if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS))
|
||||
return;
|
||||
|
||||
/*
|
||||
* We are now handling a temperature notification from the firmware
|
||||
* in ASYNC and hold the mutex. thermal_notify_framework will call
|
||||
* us back through get_temp() which ought to send a SYNC command to
|
||||
* the firmware and hence to take the mutex.
|
||||
* Avoid the deadlock by unlocking the mutex here.
|
||||
*/
|
||||
mutex_unlock(&mvm->mutex);
|
||||
thermal_notify_framework(mvm->tz_device.tzone,
|
||||
mvm->tz_device.fw_trips_index[ths_crossed]);
|
||||
mutex_lock(&mvm->mutex);
|
||||
#endif /* CONFIG_THERMAL */
|
||||
}
|
||||
|
||||
void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct ct_kill_notif *notif;
|
||||
int len = iwl_rx_packet_payload_len(pkt);
|
||||
|
||||
if (WARN_ON_ONCE(len != sizeof(*notif))) {
|
||||
IWL_ERR(mvm, "Invalid CT_KILL_NOTIFICATION\n");
|
||||
return;
|
||||
}
|
||||
|
||||
notif = (struct ct_kill_notif *)pkt->data;
|
||||
IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n",
|
||||
notif->temperature);
|
||||
|
||||
iwl_mvm_enter_ctkill(mvm);
|
||||
}
|
||||
|
||||
static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
|
||||
|
@ -236,6 +301,12 @@ static void check_exit_ctkill(struct work_struct *work)
|
|||
tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
|
||||
mvm = container_of(tt, struct iwl_mvm, thermal_throttle);
|
||||
|
||||
if (iwl_mvm_is_tt_in_fw(mvm)) {
|
||||
iwl_mvm_exit_ctkill(mvm);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
duration = tt->params.ct_kill_duration;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
@ -435,7 +506,365 @@ static const struct iwl_tt_params iwl_mvm_default_tt_params = {
|
|||
.support_tx_backoff = true,
|
||||
};
|
||||
|
||||
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
|
||||
#ifdef CONFIG_THERMAL
|
||||
static int compare_temps(const void *a, const void *b)
|
||||
{
|
||||
return ((s16)le16_to_cpu(*(__le16 *)a) -
|
||||
(s16)le16_to_cpu(*(__le16 *)b));
|
||||
}
|
||||
|
||||
int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct temp_report_ths_cmd cmd = {0};
|
||||
int ret, i, j, idx = 0;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
/* The driver holds array of temperature trips that are unsorted
|
||||
* and uncompressed, the FW should get it compressed and sorted
|
||||
*/
|
||||
|
||||
/* compress temp_trips to cmd array, remove uninitialized values*/
|
||||
for (i = 0; i < IWL_MAX_DTS_TRIPS; i++)
|
||||
if (mvm->tz_device.temp_trips[i] != S16_MIN) {
|
||||
cmd.thresholds[idx++] =
|
||||
cpu_to_le16(mvm->tz_device.temp_trips[i]);
|
||||
}
|
||||
cmd.num_temps = cpu_to_le32(idx);
|
||||
|
||||
if (!idx)
|
||||
goto send;
|
||||
|
||||
/*sort cmd array*/
|
||||
sort(cmd.thresholds, idx, sizeof(s16), compare_temps, NULL);
|
||||
|
||||
/* we should save the indexes of trips because we sort
|
||||
* and compress the orginal array
|
||||
*/
|
||||
for (i = 0; i < idx; i++) {
|
||||
for (j = 0; j < IWL_MAX_DTS_TRIPS; j++) {
|
||||
if (le16_to_cpu(cmd.thresholds[i]) ==
|
||||
mvm->tz_device.temp_trips[j])
|
||||
mvm->tz_device.fw_trips_index[i] = j;
|
||||
}
|
||||
}
|
||||
|
||||
send:
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
|
||||
TEMP_REPORTING_THRESHOLDS_CMD),
|
||||
0, sizeof(cmd), &cmd);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "TEMP_REPORT_THS_CMD command failed (err=%d)\n",
|
||||
ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
|
||||
int *temperature)
|
||||
{
|
||||
struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata;
|
||||
int ret;
|
||||
int temp;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = iwl_mvm_get_temp(mvm, &temp);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
*temperature = temp * 1000;
|
||||
|
||||
out:
|
||||
mutex_unlock(&mvm->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_mvm_tzone_get_trip_temp(struct thermal_zone_device *device,
|
||||
int trip, int *temp)
|
||||
{
|
||||
struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata;
|
||||
|
||||
if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS)
|
||||
return -EINVAL;
|
||||
|
||||
*temp = mvm->tz_device.temp_trips[trip] * 1000;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_mvm_tzone_get_trip_type(struct thermal_zone_device *device,
|
||||
int trip, enum thermal_trip_type *type)
|
||||
{
|
||||
if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS)
|
||||
return -EINVAL;
|
||||
|
||||
*type = THERMAL_TRIP_PASSIVE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device,
|
||||
int trip, int temp)
|
||||
{
|
||||
struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata;
|
||||
struct iwl_mvm_thermal_device *tzone;
|
||||
int i, ret;
|
||||
s16 temperature;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((temp / 1000) > S16_MAX) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
temperature = (s16)(temp / 1000);
|
||||
tzone = &mvm->tz_device;
|
||||
|
||||
if (!tzone) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* no updates*/
|
||||
if (tzone->temp_trips[trip] == temperature) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* already existing temperature */
|
||||
for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) {
|
||||
if (tzone->temp_trips[i] == temperature) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
tzone->temp_trips[trip] = temperature;
|
||||
|
||||
ret = iwl_mvm_send_temp_report_ths_cmd(mvm);
|
||||
out:
|
||||
mutex_unlock(&mvm->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct thermal_zone_device_ops tzone_ops = {
|
||||
.get_temp = iwl_mvm_tzone_get_temp,
|
||||
.get_trip_temp = iwl_mvm_tzone_get_trip_temp,
|
||||
.get_trip_type = iwl_mvm_tzone_get_trip_type,
|
||||
.set_trip_temp = iwl_mvm_tzone_set_trip_temp,
|
||||
};
|
||||
|
||||
/* make all trips writable */
|
||||
#define IWL_WRITABLE_TRIPS_MSK (BIT(IWL_MAX_DTS_TRIPS) - 1)
|
||||
|
||||
static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
|
||||
{
|
||||
int i;
|
||||
char name[] = "iwlwifi";
|
||||
|
||||
if (!iwl_mvm_is_tt_in_fw(mvm)) {
|
||||
mvm->tz_device.tzone = NULL;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
|
||||
|
||||
mvm->tz_device.tzone = thermal_zone_device_register(name,
|
||||
IWL_MAX_DTS_TRIPS,
|
||||
IWL_WRITABLE_TRIPS_MSK,
|
||||
mvm, &tzone_ops,
|
||||
NULL, 0, 0);
|
||||
if (IS_ERR(mvm->tz_device.tzone)) {
|
||||
IWL_DEBUG_TEMP(mvm,
|
||||
"Failed to register to thermal zone (err = %ld)\n",
|
||||
PTR_ERR(mvm->tz_device.tzone));
|
||||
return;
|
||||
}
|
||||
|
||||
/* 0 is a valid temperature,
|
||||
* so initialize the array with S16_MIN which invalid temperature
|
||||
*/
|
||||
for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++)
|
||||
mvm->tz_device.temp_trips[i] = S16_MIN;
|
||||
}
|
||||
|
||||
static const u32 iwl_mvm_cdev_budgets[] = {
|
||||
2000, /* cooling state 0 */
|
||||
1800, /* cooling state 1 */
|
||||
1600, /* cooling state 2 */
|
||||
1400, /* cooling state 3 */
|
||||
1200, /* cooling state 4 */
|
||||
1000, /* cooling state 5 */
|
||||
900, /* cooling state 6 */
|
||||
800, /* cooling state 7 */
|
||||
700, /* cooling state 8 */
|
||||
650, /* cooling state 9 */
|
||||
600, /* cooling state 10 */
|
||||
550, /* cooling state 11 */
|
||||
500, /* cooling state 12 */
|
||||
450, /* cooling state 13 */
|
||||
400, /* cooling state 14 */
|
||||
350, /* cooling state 15 */
|
||||
300, /* cooling state 16 */
|
||||
250, /* cooling state 17 */
|
||||
200, /* cooling state 18 */
|
||||
150, /* cooling state 19 */
|
||||
};
|
||||
|
||||
int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget)
|
||||
{
|
||||
struct iwl_mvm_ctdp_cmd cmd = {
|
||||
.operation = cpu_to_le32(op),
|
||||
.budget = cpu_to_le32(budget),
|
||||
.window_size = 0,
|
||||
};
|
||||
int ret;
|
||||
u32 status;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP,
|
||||
CTDP_CONFIG_CMD),
|
||||
sizeof(cmd), &cmd, &status);
|
||||
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "cTDP command failed (err=%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (op == CTDP_CMD_OPERATION_START)
|
||||
mvm->cooling_dev.cur_state = budget;
|
||||
|
||||
else if (op == CTDP_CMD_OPERATION_REPORT)
|
||||
IWL_DEBUG_TEMP(mvm, "cTDP avg energy in mWatt = %d\n", status);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_mvm_tcool_get_max_state(struct thermal_cooling_device *cdev,
|
||||
unsigned long *state)
|
||||
{
|
||||
*state = ARRAY_SIZE(iwl_mvm_cdev_budgets) - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev,
|
||||
unsigned long *state)
|
||||
{
|
||||
struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
|
||||
|
||||
if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
|
||||
return -EBUSY;
|
||||
|
||||
*state = mvm->cooling_dev.cur_state;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
|
||||
unsigned long new_state)
|
||||
{
|
||||
struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
|
||||
int ret;
|
||||
|
||||
if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
|
||||
return -EIO;
|
||||
|
||||
if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
|
||||
return -EBUSY;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
|
||||
iwl_mvm_cdev_budgets[new_state]);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&mvm->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct thermal_cooling_device_ops tcooling_ops = {
|
||||
.get_max_state = iwl_mvm_tcool_get_max_state,
|
||||
.get_cur_state = iwl_mvm_tcool_get_cur_state,
|
||||
.set_cur_state = iwl_mvm_tcool_set_cur_state,
|
||||
};
|
||||
|
||||
int iwl_mvm_cooling_device_register(struct iwl_mvm *mvm)
|
||||
{
|
||||
char name[] = "iwlwifi";
|
||||
|
||||
if (!iwl_mvm_is_ctdp_supported(mvm)) {
|
||||
mvm->cooling_dev.cdev = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
|
||||
|
||||
mvm->cooling_dev.cdev =
|
||||
thermal_cooling_device_register(name,
|
||||
mvm,
|
||||
&tcooling_ops);
|
||||
|
||||
if (IS_ERR(mvm->cooling_dev.cdev)) {
|
||||
IWL_DEBUG_TEMP(mvm,
|
||||
"Failed to register to cooling device (err = %ld)\n",
|
||||
PTR_ERR(mvm->cooling_dev.cdev));
|
||||
return PTR_ERR(mvm->cooling_dev.cdev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
|
||||
{
|
||||
if (!iwl_mvm_is_tt_in_fw(mvm))
|
||||
return;
|
||||
|
||||
if (mvm->tz_device.tzone) {
|
||||
IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
|
||||
thermal_zone_device_unregister(mvm->tz_device.tzone);
|
||||
mvm->tz_device.tzone = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
|
||||
{
|
||||
if (!iwl_mvm_is_ctdp_supported(mvm))
|
||||
return;
|
||||
|
||||
if (mvm->cooling_dev.cdev) {
|
||||
IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
|
||||
thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
|
||||
mvm->cooling_dev.cdev = NULL;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_THERMAL */
|
||||
|
||||
void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff)
|
||||
{
|
||||
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
|
||||
|
||||
|
@ -450,10 +879,20 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
|
|||
tt->dynamic_smps = false;
|
||||
tt->min_backoff = min_backoff;
|
||||
INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
|
||||
|
||||
#ifdef CONFIG_THERMAL
|
||||
iwl_mvm_cooling_device_register(mvm);
|
||||
iwl_mvm_thermal_zone_register(mvm);
|
||||
#endif
|
||||
}
|
||||
|
||||
void iwl_mvm_tt_exit(struct iwl_mvm *mvm)
|
||||
void iwl_mvm_thermal_exit(struct iwl_mvm *mvm)
|
||||
{
|
||||
cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit);
|
||||
IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n");
|
||||
|
||||
#ifdef CONFIG_THERMAL
|
||||
iwl_mvm_cooling_device_unregister(mvm);
|
||||
iwl_mvm_thermal_zone_unregister(mvm);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -65,6 +65,7 @@
|
|||
#include <linux/ieee80211.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <net/ip.h>
|
||||
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-eeprom-parse.h"
|
||||
|
@ -182,7 +183,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
|
||||
tx_cmd->tx_flags = cpu_to_le32(tx_flags);
|
||||
/* Total # bytes to be transmitted */
|
||||
tx_cmd->len = cpu_to_le16((u16)skb->len);
|
||||
tx_cmd->len = cpu_to_le16((u16)skb->len +
|
||||
(uintptr_t)info->driver_data[0]);
|
||||
tx_cmd->next_frame_len = 0;
|
||||
tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
|
||||
tx_cmd->sta_id = sta_id;
|
||||
|
@ -372,6 +374,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
info->hw_queue != info->control.vif->cab_queue)))
|
||||
return -1;
|
||||
|
||||
/* This holds the amsdu headers length */
|
||||
info->driver_data[0] = (void *)(uintptr_t)0;
|
||||
|
||||
/*
|
||||
* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
|
||||
* in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
|
||||
|
@ -425,36 +430,206 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Increase the pending frames counter, so that later when a reply comes
|
||||
* in and the counter is decreased - we don't start getting negative
|
||||
* values.
|
||||
* Note that we don't need to make sure it isn't agg'd, since we're
|
||||
* TXing non-sta
|
||||
*/
|
||||
atomic_inc(&mvm->pending_frames[sta_id]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb_gso,
|
||||
#ifdef CONFIG_INET
|
||||
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_sta *sta,
|
||||
struct sk_buff_head *mpdus_skb)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_hdr *hdr = (void *)skb->data;
|
||||
unsigned int mss = skb_shinfo(skb)->gso_size;
|
||||
struct sk_buff *tmp, *next;
|
||||
char cb[sizeof(skb_gso->cb)];
|
||||
char cb[sizeof(skb->cb)];
|
||||
unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
|
||||
bool ipv4 = (skb->protocol == htons(ETH_P_IP));
|
||||
u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
|
||||
u16 amsdu_add, snap_ip_tcp, pad, i = 0;
|
||||
unsigned int dbg_max_amsdu_len;
|
||||
u8 *qc, tid, txf;
|
||||
|
||||
memcpy(cb, skb_gso->cb, sizeof(cb));
|
||||
next = skb_gso_segment(skb_gso, 0);
|
||||
if (IS_ERR(next))
|
||||
snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
|
||||
tcp_hdrlen(skb);
|
||||
|
||||
qc = ieee80211_get_qos_ctl(hdr);
|
||||
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
|
||||
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
|
||||
return -EINVAL;
|
||||
|
||||
if (!sta->max_amsdu_len ||
|
||||
!ieee80211_is_data_qos(hdr->frame_control) ||
|
||||
!mvmsta->tlc_amsdu) {
|
||||
num_subframes = 1;
|
||||
pad = 0;
|
||||
goto segment;
|
||||
}
|
||||
|
||||
/*
|
||||
* No need to lock amsdu_in_ampdu_allowed since it can't be modified
|
||||
* during an BA session.
|
||||
*/
|
||||
if (info->flags & IEEE80211_TX_CTL_AMPDU &&
|
||||
!mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) {
|
||||
num_subframes = 1;
|
||||
pad = 0;
|
||||
goto segment;
|
||||
}
|
||||
|
||||
max_amsdu_len = sta->max_amsdu_len;
|
||||
dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
|
||||
|
||||
/* the Tx FIFO to which this A-MSDU will be routed */
|
||||
txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
|
||||
|
||||
/*
|
||||
* Don't send an AMSDU that will be longer than the TXF.
|
||||
* Add a security margin of 256 for the TX command + headers.
|
||||
* We also want to have the start of the next packet inside the
|
||||
* fifo to be able to send bursts.
|
||||
*/
|
||||
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
|
||||
mvm->shared_mem_cfg.txfifo_size[txf] - 256);
|
||||
|
||||
if (dbg_max_amsdu_len)
|
||||
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
|
||||
dbg_max_amsdu_len);
|
||||
|
||||
/*
|
||||
* Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
|
||||
* supported. This is a spec requirement (IEEE 802.11-2015
|
||||
* section 8.7.3 NOTE 3).
|
||||
*/
|
||||
if (info->flags & IEEE80211_TX_CTL_AMPDU &&
|
||||
!sta->vht_cap.vht_supported)
|
||||
max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095);
|
||||
|
||||
/* Sub frame header + SNAP + IP header + TCP header + MSS */
|
||||
subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
|
||||
pad = (4 - subf_len) & 0x3;
|
||||
|
||||
/*
|
||||
* If we have N subframes in the A-MSDU, then the A-MSDU's size is
|
||||
* N * subf_len + (N - 1) * pad.
|
||||
*/
|
||||
num_subframes = (max_amsdu_len + pad) / (subf_len + pad);
|
||||
if (num_subframes > 1)
|
||||
*qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
|
||||
|
||||
tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
|
||||
tcp_hdrlen(skb) + skb->data_len;
|
||||
|
||||
/*
|
||||
* Make sure we have enough TBs for the A-MSDU:
|
||||
* 2 for each subframe
|
||||
* 1 more for each fragment
|
||||
* 1 more for the potential data in the header
|
||||
*/
|
||||
num_subframes =
|
||||
min_t(unsigned int, num_subframes,
|
||||
(mvm->trans->max_skb_frags - 1 -
|
||||
skb_shinfo(skb)->nr_frags) / 2);
|
||||
|
||||
/* This skb fits in one single A-MSDU */
|
||||
if (num_subframes * mss >= tcp_payload_len) {
|
||||
/*
|
||||
* Compute the length of all the data added for the A-MSDU.
|
||||
* This will be used to compute the length to write in the TX
|
||||
* command. We have: SNAP + IP + TCP for n -1 subframes and
|
||||
* ETH header for n subframes. Note that the original skb
|
||||
* already had one set of SNAP / IP / TCP headers.
|
||||
*/
|
||||
num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
|
||||
info = IEEE80211_SKB_CB(skb);
|
||||
amsdu_add = num_subframes * sizeof(struct ethhdr) +
|
||||
(num_subframes - 1) * (snap_ip_tcp + pad);
|
||||
/* This holds the amsdu headers length */
|
||||
info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
|
||||
|
||||
__skb_queue_tail(mpdus_skb, skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Trick the segmentation function to make it
|
||||
* create SKBs that can fit into one A-MSDU.
|
||||
*/
|
||||
segment:
|
||||
skb_shinfo(skb)->gso_size = num_subframes * mss;
|
||||
memcpy(cb, skb->cb, sizeof(cb));
|
||||
|
||||
next = skb_gso_segment(skb, NETIF_F_CSUM_MASK | NETIF_F_SG);
|
||||
skb_shinfo(skb)->gso_size = mss;
|
||||
if (WARN_ON_ONCE(IS_ERR(next)))
|
||||
return -EINVAL;
|
||||
else if (next)
|
||||
consume_skb(skb_gso);
|
||||
consume_skb(skb);
|
||||
|
||||
while (next) {
|
||||
tmp = next;
|
||||
next = tmp->next;
|
||||
|
||||
memcpy(tmp->cb, cb, sizeof(tmp->cb));
|
||||
/*
|
||||
* Compute the length of all the data added for the A-MSDU.
|
||||
* This will be used to compute the length to write in the TX
|
||||
* command. We have: SNAP + IP + TCP for n -1 subframes and
|
||||
* ETH header for n subframes.
|
||||
*/
|
||||
tcp_payload_len = skb_tail_pointer(tmp) -
|
||||
skb_transport_header(tmp) -
|
||||
tcp_hdrlen(tmp) + tmp->data_len;
|
||||
|
||||
if (ipv4)
|
||||
ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
|
||||
|
||||
if (tcp_payload_len > mss) {
|
||||
num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
|
||||
info = IEEE80211_SKB_CB(tmp);
|
||||
amsdu_add = num_subframes * sizeof(struct ethhdr) +
|
||||
(num_subframes - 1) * (snap_ip_tcp + pad);
|
||||
info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
|
||||
skb_shinfo(tmp)->gso_size = mss;
|
||||
} else {
|
||||
qc = ieee80211_get_qos_ctl((void *)tmp->data);
|
||||
|
||||
if (ipv4)
|
||||
ip_send_check(ip_hdr(tmp));
|
||||
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
|
||||
skb_shinfo(tmp)->gso_size = 0;
|
||||
}
|
||||
|
||||
tmp->prev = NULL;
|
||||
tmp->next = NULL;
|
||||
|
||||
__skb_queue_tail(mpdus_skb, tmp);
|
||||
i++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else /* CONFIG_INET */
|
||||
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_sta *sta,
|
||||
struct sk_buff_head *mpdus_skb)
|
||||
{
|
||||
/* Impossible to get TSO with CONFIG_INET */
|
||||
WARN_ON(1);
|
||||
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Sets the fields in the Tx cmd that are crypto related
|
||||
|
@ -560,6 +735,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct sk_buff_head mpdus_skbs;
|
||||
unsigned int payload_len;
|
||||
int ret;
|
||||
|
@ -570,6 +746,9 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
|
||||
return -1;
|
||||
|
||||
/* This holds the amsdu headers length */
|
||||
info->driver_data[0] = (void *)(uintptr_t)0;
|
||||
|
||||
if (!skb_is_gso(skb))
|
||||
return iwl_mvm_tx_mpdu(mvm, skb, sta);
|
||||
|
||||
|
@ -589,7 +768,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
return ret;
|
||||
|
||||
while (!skb_queue_empty(&mpdus_skbs)) {
|
||||
struct sk_buff *skb = __skb_dequeue(&mpdus_skbs);
|
||||
skb = __skb_dequeue(&mpdus_skbs);
|
||||
|
||||
ret = iwl_mvm_tx_mpdu(mvm, skb, sta);
|
||||
if (ret) {
|
||||
|
|
|
@ -811,6 +811,45 @@ static int iwl_pci_runtime_resume(struct device *device)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_pci_system_prepare(struct device *device)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct iwl_trans *trans = pci_get_drvdata(pdev);
|
||||
|
||||
IWL_DEBUG_RPM(trans, "preparing for system suspend\n");
|
||||
|
||||
/* This is called before entering system suspend and before
|
||||
* the runtime resume is called. Set the suspending flag to
|
||||
* prevent the wakelock from being taken.
|
||||
*/
|
||||
trans->suspending = true;
|
||||
|
||||
/* Wake the device up from runtime suspend before going to
|
||||
* platform suspend. This is needed because we don't know
|
||||
* whether wowlan any is set and, if it's not, mac80211 will
|
||||
* disconnect (in which case, we can't be in D0i3).
|
||||
*/
|
||||
pm_runtime_resume(device);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwl_pci_system_complete(struct device *device)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct iwl_trans *trans = pci_get_drvdata(pdev);
|
||||
|
||||
IWL_DEBUG_RPM(trans, "completing system suspend\n");
|
||||
|
||||
/* This is called as a counterpart to the prepare op. It is
|
||||
* called either when suspending fails or when suspend
|
||||
* completed successfully. Now there's no risk of grabbing
|
||||
* the wakelock anymore, so we can release the suspending
|
||||
* flag.
|
||||
*/
|
||||
trans->suspending = false;
|
||||
}
|
||||
#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
|
||||
|
||||
static const struct dev_pm_ops iwl_dev_pm_ops = {
|
||||
|
@ -820,6 +859,8 @@ static const struct dev_pm_ops iwl_dev_pm_ops = {
|
|||
SET_RUNTIME_PM_OPS(iwl_pci_runtime_suspend,
|
||||
iwl_pci_runtime_resume,
|
||||
NULL)
|
||||
.prepare = iwl_pci_system_prepare,
|
||||
.complete = iwl_pci_system_complete,
|
||||
#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
|
||||
};
|
||||
|
||||
|
|
|
@ -336,6 +336,14 @@ struct iwl_tso_hdr_page {
|
|||
* @fw_mon_phys: physical address of the buffer for the firmware monitor
|
||||
* @fw_mon_page: points to the first page of the buffer for the firmware monitor
|
||||
* @fw_mon_size: size of the buffer for the firmware monitor
|
||||
* @msix_entries: array of MSI-X entries
|
||||
* @msix_enabled: true if managed to enable MSI-X
|
||||
* @allocated_vector: the number of interrupt vector allocated by the OS
|
||||
* @default_irq_num: default irq for non rx interrupt
|
||||
* @fh_init_mask: initial unmasked fh causes
|
||||
* @hw_init_mask: initial unmasked hw causes
|
||||
* @fh_mask: current unmasked fh causes
|
||||
* @hw_mask: current unmasked hw causes
|
||||
*/
|
||||
struct iwl_trans_pcie {
|
||||
struct iwl_rxq *rxq;
|
||||
|
@ -402,6 +410,15 @@ struct iwl_trans_pcie {
|
|||
dma_addr_t fw_mon_phys;
|
||||
struct page *fw_mon_page;
|
||||
u32 fw_mon_size;
|
||||
|
||||
struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
|
||||
bool msix_enabled;
|
||||
u32 allocated_vector;
|
||||
u32 default_irq_num;
|
||||
u32 fh_init_mask;
|
||||
u32 hw_init_mask;
|
||||
u32 fh_mask;
|
||||
u32 hw_mask;
|
||||
};
|
||||
|
||||
static inline struct iwl_trans_pcie *
|
||||
|
@ -430,7 +447,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
|
|||
* RX
|
||||
******************************************************/
|
||||
int iwl_pcie_rx_init(struct iwl_trans *trans);
|
||||
irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
|
||||
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
|
||||
irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
|
||||
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
|
||||
int iwl_pcie_rx_stop(struct iwl_trans *trans);
|
||||
void iwl_pcie_rx_free(struct iwl_trans *trans);
|
||||
|
||||
|
@ -485,15 +505,24 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans);
|
|||
******************************************************/
|
||||
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
clear_bit(STATUS_INT_ENABLED, &trans->status);
|
||||
if (!trans_pcie->msix_enabled) {
|
||||
/* disable interrupts from uCode/NIC to host */
|
||||
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
|
||||
|
||||
/* disable interrupts from uCode/NIC to host */
|
||||
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
|
||||
|
||||
/* acknowledge/clear/reset any interrupts still pending
|
||||
* from uCode or flow handler (Rx/Tx DMA) */
|
||||
iwl_write32(trans, CSR_INT, 0xffffffff);
|
||||
iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
|
||||
/* acknowledge/clear/reset any interrupts still pending
|
||||
* from uCode or flow handler (Rx/Tx DMA) */
|
||||
iwl_write32(trans, CSR_INT, 0xffffffff);
|
||||
iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
|
||||
} else {
|
||||
/* disable all the interrupt we might use */
|
||||
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
|
||||
trans_pcie->fh_init_mask);
|
||||
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
|
||||
trans_pcie->hw_init_mask);
|
||||
}
|
||||
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
|
||||
}
|
||||
|
||||
|
@ -503,8 +532,37 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
|
|||
|
||||
IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
|
||||
set_bit(STATUS_INT_ENABLED, &trans->status);
|
||||
trans_pcie->inta_mask = CSR_INI_SET_MASK;
|
||||
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
|
||||
if (!trans_pcie->msix_enabled) {
|
||||
trans_pcie->inta_mask = CSR_INI_SET_MASK;
|
||||
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
|
||||
} else {
|
||||
/*
|
||||
* fh/hw_mask keeps all the unmasked causes.
|
||||
* Unlike msi, in msix cause is enabled when it is unset.
|
||||
*/
|
||||
trans_pcie->hw_mask = trans_pcie->hw_init_mask;
|
||||
trans_pcie->fh_mask = trans_pcie->fh_init_mask;
|
||||
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
|
||||
~trans_pcie->fh_mask);
|
||||
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
|
||||
~trans_pcie->hw_mask);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
|
||||
trans_pcie->hw_mask = msk;
|
||||
}
|
||||
|
||||
static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
|
||||
trans_pcie->fh_mask = msk;
|
||||
}
|
||||
|
||||
static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
|
||||
|
@ -512,8 +570,15 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
|
|||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
|
||||
trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
|
||||
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
|
||||
if (!trans_pcie->msix_enabled) {
|
||||
trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
|
||||
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
|
||||
} else {
|
||||
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
|
||||
trans_pcie->hw_init_mask);
|
||||
iwl_enable_fh_int_msk_msix(trans,
|
||||
MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
|
||||
|
@ -521,8 +586,15 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
|
|||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
|
||||
trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
|
||||
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
|
||||
if (!trans_pcie->msix_enabled) {
|
||||
trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
|
||||
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
|
||||
} else {
|
||||
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
|
||||
trans_pcie->fh_init_mask);
|
||||
iwl_enable_hw_int_msk_msix(trans,
|
||||
MSIX_HW_INT_CAUSES_REG_RF_KILL);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void iwl_wake_queue(struct iwl_trans *trans,
|
||||
|
|
|
@ -783,16 +783,26 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
|
|||
* Single frame mode
|
||||
* Rx buffer size 4 or 8k or 12k
|
||||
* Min RB size 4 or 8
|
||||
* Drop frames that exceed RB size
|
||||
* 512 RBDs
|
||||
*/
|
||||
iwl_write_prph(trans, RFH_RXF_DMA_CFG,
|
||||
RFH_DMA_EN_ENABLE_VAL |
|
||||
rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
|
||||
RFH_RXF_DMA_MIN_RB_4_8 |
|
||||
RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
|
||||
RFH_RXF_DMA_RBDCB_SIZE_512);
|
||||
|
||||
/*
|
||||
* Activate DMA snooping.
|
||||
* Set RX DMA chunk size to 128 bit
|
||||
* Default queue is 0
|
||||
*/
|
||||
iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
|
||||
RFH_GEN_CFG_SERVICE_DMA_SNOOP);
|
||||
RFH_GEN_CFG_RB_CHUNK_SIZE |
|
||||
(DEFAULT_RXQ_NUM << RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
|
||||
RFH_GEN_CFG_SERVICE_DMA_SNOOP);
|
||||
/* Enable the relevant rx queues */
|
||||
iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled);
|
||||
|
||||
/* Set interrupt coalescing timer to default (2048 usecs) */
|
||||
|
@ -1135,10 +1145,10 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
|||
/*
|
||||
* iwl_pcie_rx_handle - Main entry function for receiving responses from fw
|
||||
*/
|
||||
static void iwl_pcie_rx_handle(struct iwl_trans *trans)
|
||||
static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
|
||||
u32 r, i, j, count = 0;
|
||||
bool emergency = false;
|
||||
|
||||
|
@ -1149,9 +1159,12 @@ restart:
|
|||
r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
|
||||
i = rxq->read;
|
||||
|
||||
/* W/A 9000 device step A0 wrap-around bug */
|
||||
r &= (rxq->queue_size - 1);
|
||||
|
||||
/* Rx interrupt, but nothing sent from uCode */
|
||||
if (i == r)
|
||||
IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
|
||||
IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
|
||||
|
||||
while (i != r) {
|
||||
struct iwl_rx_mem_buffer *rxb;
|
||||
|
@ -1164,15 +1177,18 @@ restart:
|
|||
* used_bd is a 32 bit but only 12 are used to retrieve
|
||||
* the vid
|
||||
*/
|
||||
u16 vid = (u16)le32_to_cpu(rxq->used_bd[i]);
|
||||
u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
|
||||
|
||||
if (WARN(vid >= ARRAY_SIZE(trans_pcie->global_table),
|
||||
"Invalid rxb index from HW %u\n", (u32)vid))
|
||||
goto out;
|
||||
rxb = trans_pcie->global_table[vid];
|
||||
} else {
|
||||
rxb = rxq->queue[i];
|
||||
rxq->queue[i] = NULL;
|
||||
}
|
||||
|
||||
IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i);
|
||||
IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
|
||||
iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
|
||||
|
||||
i = (i + 1) & (rxq->queue_size - 1);
|
||||
|
@ -1235,7 +1251,7 @@ restart:
|
|||
goto restart;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
/* Backtrack one entry */
|
||||
rxq->read = i;
|
||||
spin_unlock(&rxq->lock);
|
||||
|
@ -1259,6 +1275,54 @@ restart:
|
|||
napi_gro_flush(&rxq->napi, false);
|
||||
}
|
||||
|
||||
static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
|
||||
{
|
||||
u8 queue = entry->entry;
|
||||
struct msix_entry *entries = entry - queue;
|
||||
|
||||
return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
|
||||
}
|
||||
|
||||
static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
|
||||
struct msix_entry *entry)
|
||||
{
|
||||
/*
|
||||
* Before sending the interrupt the HW disables it to prevent
|
||||
* a nested interrupt. This is done by writing 1 to the corresponding
|
||||
* bit in the mask register. After handling the interrupt, it should be
|
||||
* re-enabled by clearing this bit. This register is defined as
|
||||
* write 1 clear (W1C) register, meaning that it's being clear
|
||||
* by writing 1 to the bit.
|
||||
*/
|
||||
iwl_write_direct32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
|
||||
}
|
||||
|
||||
/*
|
||||
* iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
|
||||
* This interrupt handler should be used with RSS queue only.
|
||||
*/
|
||||
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct msix_entry *entry = dev_id;
|
||||
struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
|
||||
struct iwl_trans *trans = trans_pcie->trans;
|
||||
|
||||
if (WARN_ON(entry->entry >= trans->num_rx_queues))
|
||||
return IRQ_NONE;
|
||||
|
||||
lock_map_acquire(&trans->sync_cmd_lockdep_map);
|
||||
|
||||
local_bh_disable();
|
||||
iwl_pcie_rx_handle(trans, entry->entry);
|
||||
local_bh_enable();
|
||||
|
||||
iwl_pcie_clear_irq(trans, entry);
|
||||
|
||||
lock_map_release(&trans->sync_cmd_lockdep_map);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/*
|
||||
* iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
|
||||
*/
|
||||
|
@ -1589,7 +1653,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
|
|||
isr_stats->rx++;
|
||||
|
||||
local_bh_disable();
|
||||
iwl_pcie_rx_handle(trans);
|
||||
iwl_pcie_rx_handle(trans, 0);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
|
@ -1732,3 +1796,129 @@ irqreturn_t iwl_pcie_isr(int irq, void *data)
|
|||
|
||||
return IRQ_WAKE_THREAD;
|
||||
}
|
||||
|
||||
irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
|
||||
{
|
||||
return IRQ_WAKE_THREAD;
|
||||
}
|
||||
|
||||
irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct msix_entry *entry = dev_id;
|
||||
struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
|
||||
struct iwl_trans *trans = trans_pcie->trans;
|
||||
struct isr_statistics *isr_stats = isr_stats = &trans_pcie->isr_stats;
|
||||
u32 inta_fh, inta_hw;
|
||||
|
||||
lock_map_acquire(&trans->sync_cmd_lockdep_map);
|
||||
|
||||
spin_lock(&trans_pcie->irq_lock);
|
||||
inta_fh = iwl_read_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
|
||||
inta_hw = iwl_read_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
|
||||
/*
|
||||
* Clear causes registers to avoid being handling the same cause.
|
||||
*/
|
||||
iwl_write_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
|
||||
iwl_write_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
|
||||
spin_unlock(&trans_pcie->irq_lock);
|
||||
|
||||
if (unlikely(!(inta_fh | inta_hw))) {
|
||||
IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
|
||||
lock_map_release(&trans->sync_cmd_lockdep_map);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
if (iwl_have_debug_level(IWL_DL_ISR))
|
||||
IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
|
||||
inta_fh,
|
||||
iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
|
||||
|
||||
/* This "Tx" DMA channel is used only for loading uCode */
|
||||
if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
|
||||
IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
|
||||
isr_stats->tx++;
|
||||
/*
|
||||
* Wake up uCode load routine,
|
||||
* now that load is complete
|
||||
*/
|
||||
trans_pcie->ucode_write_complete = true;
|
||||
wake_up(&trans_pcie->ucode_write_waitq);
|
||||
}
|
||||
|
||||
/* Error detected by uCode */
|
||||
if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
|
||||
(inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
|
||||
IWL_ERR(trans,
|
||||
"Microcode SW error detected. Restarting 0x%X.\n",
|
||||
inta_fh);
|
||||
isr_stats->sw++;
|
||||
iwl_pcie_irq_handle_error(trans);
|
||||
}
|
||||
|
||||
/* After checking FH register check HW register */
|
||||
if (iwl_have_debug_level(IWL_DL_ISR))
|
||||
IWL_DEBUG_ISR(trans,
|
||||
"ISR inta_hw 0x%08x, enabled 0x%08x\n",
|
||||
inta_hw,
|
||||
iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
|
||||
|
||||
/* Alive notification via Rx interrupt will do the real work */
|
||||
if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
|
||||
IWL_DEBUG_ISR(trans, "Alive interrupt\n");
|
||||
isr_stats->alive++;
|
||||
}
|
||||
|
||||
/* uCode wakes up after power-down sleep */
|
||||
if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
|
||||
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
|
||||
iwl_pcie_rxq_check_wrptr(trans);
|
||||
iwl_pcie_txq_check_wrptrs(trans);
|
||||
|
||||
isr_stats->wakeup++;
|
||||
}
|
||||
|
||||
/* Chip got too hot and stopped itself */
|
||||
if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
|
||||
IWL_ERR(trans, "Microcode CT kill error detected.\n");
|
||||
isr_stats->ctkill++;
|
||||
}
|
||||
|
||||
/* HW RF KILL switch toggled */
|
||||
if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) {
|
||||
bool hw_rfkill;
|
||||
|
||||
hw_rfkill = iwl_is_rfkill_set(trans);
|
||||
IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
|
||||
hw_rfkill ? "disable radio" : "enable radio");
|
||||
|
||||
isr_stats->rfkill++;
|
||||
|
||||
mutex_lock(&trans_pcie->mutex);
|
||||
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
|
||||
mutex_unlock(&trans_pcie->mutex);
|
||||
if (hw_rfkill) {
|
||||
set_bit(STATUS_RFKILL, &trans->status);
|
||||
if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
|
||||
&trans->status))
|
||||
IWL_DEBUG_RF_KILL(trans,
|
||||
"Rfkill while SYNC HCMD in flight\n");
|
||||
wake_up(&trans_pcie->wait_command_queue);
|
||||
} else {
|
||||
clear_bit(STATUS_RFKILL, &trans->status);
|
||||
}
|
||||
}
|
||||
|
||||
if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
|
||||
IWL_ERR(trans,
|
||||
"Hardware error detected. Restarting.\n");
|
||||
|
||||
isr_stats->hw++;
|
||||
iwl_pcie_irq_handle_error(trans);
|
||||
}
|
||||
|
||||
iwl_pcie_clear_irq(trans, entry);
|
||||
|
||||
lock_map_release(&trans->sync_cmd_lockdep_map);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
@ -616,38 +616,38 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
|
|||
dma_addr_t phy_addr, u32 byte_cnt)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
trans_pcie->ucode_write_complete = false;
|
||||
|
||||
iwl_write_direct32(trans,
|
||||
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
|
||||
if (!iwl_trans_grab_nic_access(trans, &flags))
|
||||
return -EIO;
|
||||
|
||||
iwl_write_direct32(trans,
|
||||
FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
|
||||
dst_addr);
|
||||
iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
|
||||
|
||||
iwl_write_direct32(trans,
|
||||
FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
|
||||
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
|
||||
iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
|
||||
dst_addr);
|
||||
|
||||
iwl_write_direct32(trans,
|
||||
FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
|
||||
(iwl_get_dma_hi_addr(phy_addr)
|
||||
<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
|
||||
iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
|
||||
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
|
||||
|
||||
iwl_write_direct32(trans,
|
||||
FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
|
||||
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
|
||||
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
|
||||
FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
|
||||
iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
|
||||
(iwl_get_dma_hi_addr(phy_addr)
|
||||
<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
|
||||
|
||||
iwl_write_direct32(trans,
|
||||
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
|
||||
iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
|
||||
BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
|
||||
BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
|
||||
FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
|
||||
|
||||
iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
|
||||
|
||||
iwl_trans_release_nic_access(trans, &flags);
|
||||
|
||||
ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
|
||||
trans_pcie->ucode_write_complete, 5 * HZ);
|
||||
|
@ -1123,6 +1123,20 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
|
|||
iwl_pcie_prepare_card_hw(trans);
|
||||
}
|
||||
|
||||
static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
if (trans_pcie->msix_enabled) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < trans_pcie->allocated_vector; i++)
|
||||
synchronize_irq(trans_pcie->msix_entries[i].vector);
|
||||
} else {
|
||||
synchronize_irq(trans_pcie->pci_dev->irq);
|
||||
}
|
||||
}
|
||||
|
||||
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
|
||||
const struct fw_img *fw, bool run_in_rfkill)
|
||||
{
|
||||
|
@ -1149,7 +1163,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
|
|||
iwl_disable_interrupts(trans);
|
||||
|
||||
/* Make sure it finished running */
|
||||
synchronize_irq(trans_pcie->pci_dev->irq);
|
||||
iwl_pcie_synchronize_irqs(trans);
|
||||
|
||||
mutex_lock(&trans_pcie->mutex);
|
||||
|
||||
|
@ -1252,8 +1266,6 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
|
|||
static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
|
||||
bool reset)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
if (!reset) {
|
||||
/* Enable persistence mode to avoid reset */
|
||||
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
|
||||
|
@ -1271,7 +1283,7 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
|
|||
|
||||
iwl_pcie_disable_ict(trans);
|
||||
|
||||
synchronize_irq(trans_pcie->pci_dev->irq);
|
||||
iwl_pcie_synchronize_irqs(trans);
|
||||
|
||||
iwl_clear_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
|
@ -1350,6 +1362,153 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct iwl_causes_list {
|
||||
u32 cause_num;
|
||||
u32 mask_reg;
|
||||
u8 addr;
|
||||
};
|
||||
|
||||
static struct iwl_causes_list causes_list[] = {
|
||||
{MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
|
||||
{MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
|
||||
{MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
|
||||
{MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
|
||||
{MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
|
||||
{MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11},
|
||||
{MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
|
||||
{MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
|
||||
{MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
|
||||
{MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29},
|
||||
{MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
|
||||
{MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
|
||||
{MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
|
||||
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
|
||||
};
|
||||
|
||||
static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
|
||||
{
|
||||
u32 val, max_rx_vector, i;
|
||||
struct iwl_trans *trans = trans_pcie->trans;
|
||||
|
||||
max_rx_vector = trans_pcie->allocated_vector - 1;
|
||||
|
||||
if (!trans_pcie->msix_enabled)
|
||||
return;
|
||||
|
||||
iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
|
||||
|
||||
/*
|
||||
* Each cause from the list above and the RX causes is represented as
|
||||
* a byte in the IVAR table. We access the first (N - 1) bytes and map
|
||||
* them to the (N - 1) vectors so these vectors will be used as rx
|
||||
* vectors. Then access all non rx causes and map them to the
|
||||
* default queue (N'th queue).
|
||||
*/
|
||||
for (i = 0; i < max_rx_vector; i++) {
|
||||
iwl_write8(trans, CSR_MSIX_RX_IVAR(i), MSIX_FH_INT_CAUSES_Q(i));
|
||||
iwl_clear_bit(trans, CSR_MSIX_FH_INT_MASK_AD,
|
||||
BIT(MSIX_FH_INT_CAUSES_Q(i)));
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
|
||||
val = trans_pcie->default_irq_num |
|
||||
MSIX_NON_AUTO_CLEAR_CAUSE;
|
||||
iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
|
||||
iwl_clear_bit(trans, causes_list[i].mask_reg,
|
||||
causes_list[i].cause_num);
|
||||
}
|
||||
trans_pcie->fh_init_mask =
|
||||
~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
|
||||
trans_pcie->fh_mask = trans_pcie->fh_init_mask;
|
||||
trans_pcie->hw_init_mask =
|
||||
~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
|
||||
trans_pcie->hw_mask = trans_pcie->hw_init_mask;
|
||||
}
|
||||
|
||||
static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
|
||||
struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
u16 pci_cmd;
|
||||
int max_vector;
|
||||
int ret, i;
|
||||
|
||||
if (trans->cfg->mq_rx_supported) {
|
||||
max_vector = min_t(u32, (num_possible_cpus() + 1),
|
||||
IWL_MAX_RX_HW_QUEUES);
|
||||
for (i = 0; i < max_vector; i++)
|
||||
trans_pcie->msix_entries[i].entry = i;
|
||||
|
||||
ret = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
|
||||
MSIX_MIN_INTERRUPT_VECTORS,
|
||||
max_vector);
|
||||
if (ret > 1) {
|
||||
IWL_DEBUG_INFO(trans,
|
||||
"Enable MSI-X allocate %d interrupt vector\n",
|
||||
ret);
|
||||
trans_pcie->allocated_vector = ret;
|
||||
trans_pcie->default_irq_num =
|
||||
trans_pcie->allocated_vector - 1;
|
||||
trans_pcie->trans->num_rx_queues =
|
||||
trans_pcie->allocated_vector - 1;
|
||||
trans_pcie->msix_enabled = true;
|
||||
|
||||
return;
|
||||
}
|
||||
IWL_DEBUG_INFO(trans,
|
||||
"ret = %d %s move to msi mode\n", ret,
|
||||
(ret == 1) ?
|
||||
"can't allocate more than 1 interrupt vector" :
|
||||
"failed to enable msi-x mode");
|
||||
pci_disable_msix(pdev);
|
||||
}
|
||||
|
||||
ret = pci_enable_msi(pdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
|
||||
/* enable rfkill interrupt: hw bug w/a */
|
||||
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
|
||||
if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
|
||||
pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
|
||||
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
|
||||
struct iwl_trans_pcie *trans_pcie)
|
||||
{
|
||||
int i, last_vector;
|
||||
|
||||
last_vector = trans_pcie->trans->num_rx_queues;
|
||||
|
||||
for (i = 0; i < trans_pcie->allocated_vector; i++) {
|
||||
int ret;
|
||||
|
||||
ret = request_threaded_irq(trans_pcie->msix_entries[i].vector,
|
||||
iwl_pcie_msix_isr,
|
||||
(i == last_vector) ?
|
||||
iwl_pcie_irq_msix_handler :
|
||||
iwl_pcie_irq_rx_msix_handler,
|
||||
IRQF_SHARED,
|
||||
DRV_NAME,
|
||||
&trans_pcie->msix_entries[i]);
|
||||
if (ret) {
|
||||
int j;
|
||||
|
||||
IWL_ERR(trans_pcie->trans,
|
||||
"Error allocating IRQ %d\n", i);
|
||||
for (j = 0; j < i; j++)
|
||||
free_irq(trans_pcie->msix_entries[i].vector,
|
||||
&trans_pcie->msix_entries[i]);
|
||||
pci_disable_msix(pdev);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
@ -1371,6 +1530,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
|
|||
|
||||
iwl_pcie_apm_init(trans);
|
||||
|
||||
iwl_pcie_init_msix(trans_pcie);
|
||||
/* From now on, the op_mode will be kept updated about RF kill state */
|
||||
iwl_enable_rfkill_int(trans);
|
||||
|
||||
|
@ -1425,7 +1585,7 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
|
|||
|
||||
mutex_unlock(&trans_pcie->mutex);
|
||||
|
||||
synchronize_irq(trans_pcie->pci_dev->irq);
|
||||
iwl_pcie_synchronize_irqs(trans);
|
||||
}
|
||||
|
||||
static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
|
||||
|
@ -1506,15 +1666,25 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
|
|||
/* TODO: check if this is really needed */
|
||||
pm_runtime_disable(trans->dev);
|
||||
|
||||
synchronize_irq(trans_pcie->pci_dev->irq);
|
||||
iwl_pcie_synchronize_irqs(trans);
|
||||
|
||||
iwl_pcie_tx_free(trans);
|
||||
iwl_pcie_rx_free(trans);
|
||||
|
||||
free_irq(trans_pcie->pci_dev->irq, trans);
|
||||
iwl_pcie_free_ict(trans);
|
||||
if (trans_pcie->msix_enabled) {
|
||||
for (i = 0; i < trans_pcie->allocated_vector; i++)
|
||||
free_irq(trans_pcie->msix_entries[i].vector,
|
||||
&trans_pcie->msix_entries[i]);
|
||||
|
||||
pci_disable_msi(trans_pcie->pci_dev);
|
||||
pci_disable_msix(trans_pcie->pci_dev);
|
||||
trans_pcie->msix_enabled = false;
|
||||
} else {
|
||||
free_irq(trans_pcie->pci_dev->irq, trans);
|
||||
|
||||
iwl_pcie_free_ict(trans);
|
||||
|
||||
pci_disable_msi(trans_pcie->pci_dev);
|
||||
}
|
||||
iounmap(trans_pcie->hw_base);
|
||||
pci_release_regions(trans_pcie->pci_dev);
|
||||
pci_disable_device(trans_pcie->pci_dev);
|
||||
|
@ -2069,7 +2239,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
|
|||
} else {
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"\tclosed_rb_num: Not Allocated\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||
kfree(buf);
|
||||
|
@ -2615,7 +2785,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
{
|
||||
struct iwl_trans_pcie *trans_pcie;
|
||||
struct iwl_trans *trans;
|
||||
u16 pci_cmd;
|
||||
int ret, addr_size;
|
||||
|
||||
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
|
||||
|
@ -2698,17 +2867,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
trans_pcie->pci_dev = pdev;
|
||||
iwl_disable_interrupts(trans);
|
||||
|
||||
ret = pci_enable_msi(pdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
|
||||
/* enable rfkill interrupt: hw bug w/a */
|
||||
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
|
||||
if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
|
||||
pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
|
||||
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
|
||||
}
|
||||
}
|
||||
|
||||
trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
|
||||
/*
|
||||
* In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
|
||||
|
@ -2760,6 +2918,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
}
|
||||
}
|
||||
|
||||
iwl_pcie_set_interrupt_capa(pdev, trans);
|
||||
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
|
||||
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
|
||||
"PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
|
||||
|
@ -2769,19 +2928,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
|
||||
init_waitqueue_head(&trans_pcie->d0i3_waitq);
|
||||
|
||||
ret = iwl_pcie_alloc_ict(trans);
|
||||
if (ret)
|
||||
goto out_pci_disable_msi;
|
||||
if (trans_pcie->msix_enabled) {
|
||||
if (iwl_pcie_init_msix_handler(pdev, trans_pcie))
|
||||
goto out_pci_release_regions;
|
||||
} else {
|
||||
ret = iwl_pcie_alloc_ict(trans);
|
||||
if (ret)
|
||||
goto out_pci_disable_msi;
|
||||
|
||||
ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
|
||||
iwl_pcie_irq_handler,
|
||||
IRQF_SHARED, DRV_NAME, trans);
|
||||
if (ret) {
|
||||
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
|
||||
goto out_free_ict;
|
||||
}
|
||||
|
||||
trans_pcie->inta_mask = CSR_INI_SET_MASK;
|
||||
ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
|
||||
iwl_pcie_irq_handler,
|
||||
IRQF_SHARED, DRV_NAME, trans);
|
||||
if (ret) {
|
||||
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
|
||||
goto out_free_ict;
|
||||
}
|
||||
trans_pcie->inta_mask = CSR_INI_SET_MASK;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_PCIE_RTPM
|
||||
trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
|
||||
|
|
|
@ -1062,10 +1062,10 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
|||
|
||||
if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
|
||||
test_bit(txq_id, trans_pcie->queue_stopped)) {
|
||||
struct sk_buff_head skbs;
|
||||
struct sk_buff_head overflow_skbs;
|
||||
|
||||
__skb_queue_head_init(&skbs);
|
||||
skb_queue_splice_init(&txq->overflow_q, &skbs);
|
||||
__skb_queue_head_init(&overflow_skbs);
|
||||
skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
|
||||
|
||||
/*
|
||||
* This is tricky: we are in reclaim path which is non
|
||||
|
@ -1076,8 +1076,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
|||
*/
|
||||
spin_unlock_bh(&txq->lock);
|
||||
|
||||
while (!skb_queue_empty(&skbs)) {
|
||||
struct sk_buff *skb = __skb_dequeue(&skbs);
|
||||
while (!skb_queue_empty(&overflow_skbs)) {
|
||||
struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
u8 dev_cmd_idx = IWL_TRANS_FIRST_DRIVER_DATA + 1;
|
||||
struct iwl_device_cmd *dev_cmd =
|
||||
|
|
Loading…
Reference in New Issue