Some improvements, bugfixes and new features:
* A bunch of cleanups here and there; * A few simple bugfixes; * Some more work in preparation for A000 family support; * Add support for radiotap timestamps; * Some work on our firmware debugging capabilities; -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEF3LNfgb2BPWm68smoUecoho8xfoFAliKWqIACgkQoUecoho8 xfqipxAAkesi5DqpHKXsLUq3OqBGzWVkgth5LEyeCGYGE5VeaPvrDpsTt6J31+n8 4hT2eHpVs+lh30oDhy/ApFmOJgrtKpX3vTlEPPiZqSMqibwxvQzHDHqQLm4vWXG0 dpg02Hd7Dl7+KQ2L9cuSjKlgjGbFWVPOoJyRHK36wq4ry00Ots+A4HxBOqTUT6UT S4TTMLoHqiEZP2yIBS4jiG9fKhDZR3meILfYXHjgXpi8n+9MKEZ+LjpjowUp3S1S UcSggpr5u6wik4JznAz9pTCvQNptBbyy8OFUcQY90SA9uSvPO5J9w32uvDHTg0l1 IaqfiH2CXs0GiD7o7SHyZcdWTFeFPbPxq0GnpIWf5kHKJRYlO5t369TtXQ1zExSN h+Tvr9P6cfBQ7dGbV3YHu6zsvA067LkKpH7ivMX8cuCvmP+VtsOnLXi35t1OiJ+m SnIC0Jw8kVJCgJuRYgaQ0AAuQq6JkEaXLQU8RsHhge0MRfe4bAAEmut6PX7bh6EK YVxVoGs4m+q8nUDS+lC3I16U8pakKy1NCtRDkJIQ+JUsoCsov0AodN2ndxB9xBXb 3j9kG5cPUksosKX2eGCc99vayYz+0IhIeyOenWpDEsvTtzAWHe7Z8jc9St6sIT18 QRoJXg/6v1+/5GlQ5+naAuLpz96W+vOJqsxB59vIO3kJC7nPZNg= =6IIg -----END PGP SIGNATURE----- Merge tag 'iwlwifi-next-for-kalle-2017-01-26' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next Some improvements, bugfixes and new features: * A bunch of cleanups here and there; * A few simple bugfixes; * Some more work in preparation for A000 family support; * Add support for radiotap timestamps; * Some work on our firmware debugging capabilities;
This commit is contained in:
commit
0b9c1ac193
|
@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
|
|||
{
|
||||
struct iwl_priv *priv = file->private_data;
|
||||
bool restart_fw = iwlwifi_mod_params.restart_fw;
|
||||
int ret;
|
||||
int __maybe_unused ret;
|
||||
|
||||
iwlwifi_mod_params.restart_fw = true;
|
||||
|
||||
|
|
|
@ -163,7 +163,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
|
|||
REGULATORY_DISABLE_BEACON_HINTS;
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
|
||||
if (priv->fw->img[IWL_UCODE_WOWLAN].num_sec &&
|
||||
priv->trans->ops->d3_suspend &&
|
||||
priv->trans->ops->d3_resume &&
|
||||
device_can_wakeup(priv->trans->dev)) {
|
||||
|
|
|
@ -364,7 +364,7 @@ static void rs_program_fix_rate(struct iwl_priv *priv,
|
|||
/*
|
||||
get the traffic load value for tid
|
||||
*/
|
||||
static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
|
||||
static void rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
|
||||
{
|
||||
u32 curr_time = jiffies_to_msecs(jiffies);
|
||||
u32 time_diff;
|
||||
|
@ -372,14 +372,14 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
|
|||
struct iwl_traffic_load *tl = NULL;
|
||||
|
||||
if (tid >= IWL_MAX_TID_COUNT)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
tl = &(lq_data->load[tid]);
|
||||
|
||||
curr_time -= curr_time % TID_ROUND_VALUE;
|
||||
|
||||
if (!(tl->queue_count))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
|
||||
index = time_diff / TID_QUEUE_CELL_SPACING;
|
||||
|
@ -388,8 +388,6 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
|
|||
/* TID_MAX_TIME_DIFF */
|
||||
if (index >= TID_QUEUE_MAX_SIZE)
|
||||
rs_tl_rm_old_stats(tl, curr_time);
|
||||
|
||||
return tl->total;
|
||||
}
|
||||
|
||||
static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
|
||||
|
@ -397,7 +395,6 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
|
|||
struct ieee80211_sta *sta)
|
||||
{
|
||||
int ret = -EAGAIN;
|
||||
u32 load;
|
||||
|
||||
/*
|
||||
* Don't create TX aggregation sessions when in high
|
||||
|
@ -410,7 +407,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
|
|||
return ret;
|
||||
}
|
||||
|
||||
load = rs_tl_get_load(lq_data, tid);
|
||||
rs_tl_get_load(lq_data, tid);
|
||||
|
||||
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
|
||||
sta->addr, tid);
|
||||
|
|
|
@ -407,7 +407,7 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
|
|||
lockdep_assert_held(&priv->mutex);
|
||||
|
||||
/* No init ucode required? Curious, but maybe ok */
|
||||
if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
|
||||
if (!priv->fw->img[IWL_UCODE_INIT].num_sec)
|
||||
return 0;
|
||||
|
||||
iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
|
||||
|
|
|
@ -371,4 +371,4 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
|
|||
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
|
||||
|
|
|
@ -73,8 +73,8 @@
|
|||
/* Highest firmware API version supported */
|
||||
#define IWL7260_UCODE_API_MAX 17
|
||||
#define IWL7265_UCODE_API_MAX 17
|
||||
#define IWL7265D_UCODE_API_MAX 26
|
||||
#define IWL3168_UCODE_API_MAX 26
|
||||
#define IWL7265D_UCODE_API_MAX 28
|
||||
#define IWL3168_UCODE_API_MAX 28
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL7260_UCODE_API_MIN 17
|
||||
|
|
|
@ -70,8 +70,8 @@
|
|||
#include "iwl-agn-hw.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL8000_UCODE_API_MAX 26
|
||||
#define IWL8265_UCODE_API_MAX 26
|
||||
#define IWL8000_UCODE_API_MAX 28
|
||||
#define IWL8265_UCODE_API_MAX 28
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL8000_UCODE_API_MIN 17
|
||||
|
|
|
@ -55,7 +55,7 @@
|
|||
#include "iwl-agn-hw.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL9000_UCODE_API_MAX 26
|
||||
#define IWL9000_UCODE_API_MAX 28
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL9000_UCODE_API_MIN 17
|
||||
|
|
|
@ -55,7 +55,7 @@
|
|||
#include "iwl-agn-hw.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL_A000_UCODE_API_MAX 26
|
||||
#define IWL_A000_UCODE_API_MAX 28
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL_A000_UCODE_API_MIN 24
|
||||
|
|
|
@ -166,8 +166,9 @@ static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
|
|||
static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < IWL_UCODE_SECTION_MAX; i++)
|
||||
for (i = 0; i < img->num_sec; i++)
|
||||
iwl_free_fw_desc(drv, &img->sec[i]);
|
||||
kfree(img->sec);
|
||||
}
|
||||
|
||||
static void iwl_dealloc_ucode(struct iwl_drv *drv)
|
||||
|
@ -179,8 +180,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
|
|||
kfree(drv->fw.dbg_conf_tlv[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
|
||||
kfree(drv->fw.dbg_trigger_tlv[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++)
|
||||
kfree(drv->fw.dbg_mem_tlv[i]);
|
||||
kfree(drv->fw.dbg_mem_tlv);
|
||||
|
||||
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
|
||||
iwl_free_fw_img(drv, drv->fw.img + i);
|
||||
|
@ -241,7 +241,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
|
|||
}
|
||||
|
||||
struct fw_img_parsing {
|
||||
struct fw_sec sec[IWL_UCODE_SECTION_MAX];
|
||||
struct fw_sec *sec;
|
||||
int sec_counter;
|
||||
};
|
||||
|
||||
|
@ -276,7 +276,8 @@ struct iwl_firmware_pieces {
|
|||
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
|
||||
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
|
||||
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
|
||||
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
|
||||
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
|
||||
size_t n_dbg_mem_tlv;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -383,6 +384,7 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
|
|||
struct fw_img_parsing *img;
|
||||
struct fw_sec *sec;
|
||||
struct fw_sec_parsing *sec_parse;
|
||||
size_t alloc_size;
|
||||
|
||||
if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX))
|
||||
return -1;
|
||||
|
@ -390,6 +392,13 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
|
|||
sec_parse = (struct fw_sec_parsing *)data;
|
||||
|
||||
img = &pieces->img[type];
|
||||
|
||||
alloc_size = sizeof(*img->sec) * (img->sec_counter + 1);
|
||||
sec = krealloc(img->sec, alloc_size, GFP_KERNEL);
|
||||
if (!sec)
|
||||
return -ENOMEM;
|
||||
img->sec = sec;
|
||||
|
||||
sec = &img->sec[img->sec_counter];
|
||||
|
||||
sec->offset = le32_to_cpu(sec_parse->offset);
|
||||
|
@ -1009,31 +1018,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
|||
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
|
||||
(void *)tlv_data;
|
||||
u32 type;
|
||||
size_t size;
|
||||
struct iwl_fw_dbg_mem_seg_tlv *n;
|
||||
|
||||
if (tlv_len != (sizeof(*dbg_mem)))
|
||||
goto invalid_tlv_len;
|
||||
|
||||
type = le32_to_cpu(dbg_mem->data_type);
|
||||
drv->fw.dbg_dynamic_mem = true;
|
||||
|
||||
if (type >= ARRAY_SIZE(drv->fw.dbg_mem_tlv)) {
|
||||
IWL_ERR(drv,
|
||||
"Skip unknown dbg mem segment: %u\n",
|
||||
dbg_mem->data_type);
|
||||
break;
|
||||
}
|
||||
|
||||
if (pieces->dbg_mem_tlv[type]) {
|
||||
IWL_ERR(drv,
|
||||
"Ignore duplicate mem segment: %u\n",
|
||||
dbg_mem->data_type);
|
||||
break;
|
||||
}
|
||||
|
||||
IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n",
|
||||
dbg_mem->data_type);
|
||||
|
||||
pieces->dbg_mem_tlv[type] = dbg_mem;
|
||||
switch (type & FW_DBG_MEM_TYPE_MASK) {
|
||||
case FW_DBG_MEM_TYPE_REGULAR:
|
||||
case FW_DBG_MEM_TYPE_PRPH:
|
||||
/* we know how to handle these */
|
||||
break;
|
||||
default:
|
||||
IWL_ERR(drv,
|
||||
"Found debug memory segment with invalid type: 0x%x\n",
|
||||
type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
size = sizeof(*pieces->dbg_mem_tlv) *
|
||||
(pieces->n_dbg_mem_tlv + 1);
|
||||
n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL);
|
||||
if (!n)
|
||||
return -ENOMEM;
|
||||
pieces->dbg_mem_tlv = n;
|
||||
pieces->dbg_mem_tlv[pieces->n_dbg_mem_tlv] = *dbg_mem;
|
||||
pieces->n_dbg_mem_tlv++;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
@ -1083,12 +1098,18 @@ static int iwl_alloc_ucode(struct iwl_drv *drv,
|
|||
enum iwl_ucode_type type)
|
||||
{
|
||||
int i;
|
||||
for (i = 0;
|
||||
i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i);
|
||||
i++)
|
||||
if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]),
|
||||
get_sec(pieces, type, i)))
|
||||
struct fw_desc *sec;
|
||||
|
||||
sec = kcalloc(pieces->img[type].sec_counter, sizeof(*sec), GFP_KERNEL);
|
||||
if (!sec)
|
||||
return -ENOMEM;
|
||||
drv->fw.img[type].sec = sec;
|
||||
drv->fw.img[type].num_sec = pieces->img[type].sec_counter;
|
||||
|
||||
for (i = 0; i < pieces->img[type].sec_counter; i++)
|
||||
if (iwl_alloc_fw_desc(drv, &sec[i], get_sec(pieces, type, i)))
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1345,19 +1366,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
|||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++) {
|
||||
if (pieces->dbg_mem_tlv[i]) {
|
||||
drv->fw.dbg_mem_tlv[i] =
|
||||
kmemdup(pieces->dbg_mem_tlv[i],
|
||||
sizeof(*drv->fw.dbg_mem_tlv[i]),
|
||||
GFP_KERNEL);
|
||||
if (!drv->fw.dbg_mem_tlv[i])
|
||||
goto out_free_fw;
|
||||
}
|
||||
}
|
||||
|
||||
/* Now that we can no longer fail, copy information */
|
||||
|
||||
drv->fw.dbg_mem_tlv = pieces->dbg_mem_tlv;
|
||||
pieces->dbg_mem_tlv = NULL;
|
||||
drv->fw.n_dbg_mem_tlv = pieces->n_dbg_mem_tlv;
|
||||
|
||||
/*
|
||||
* The (size - 16) / 12 formula is based on the information recorded
|
||||
* for each event, which is of mode 1 (including timestamp) for all
|
||||
|
@ -1441,25 +1455,27 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
|||
op->name, err);
|
||||
#endif
|
||||
}
|
||||
kfree(pieces);
|
||||
return;
|
||||
goto free;
|
||||
|
||||
try_again:
|
||||
/* try next, if any */
|
||||
release_firmware(ucode_raw);
|
||||
if (iwl_request_firmware(drv, false))
|
||||
goto out_unbind;
|
||||
kfree(pieces);
|
||||
return;
|
||||
goto free;
|
||||
|
||||
out_free_fw:
|
||||
IWL_ERR(drv, "failed to allocate pci memory\n");
|
||||
iwl_dealloc_ucode(drv);
|
||||
release_firmware(ucode_raw);
|
||||
out_unbind:
|
||||
kfree(pieces);
|
||||
complete(&drv->request_firmware_complete);
|
||||
device_release_driver(drv->trans->dev);
|
||||
free:
|
||||
for (i = 0; i < ARRAY_SIZE(pieces->img); i++)
|
||||
kfree(pieces->img[i].sec);
|
||||
kfree(pieces->dbg_mem_tlv);
|
||||
kfree(pieces);
|
||||
}
|
||||
|
||||
struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
|
||||
|
|
|
@ -379,7 +379,6 @@ enum iwl_ucode_tlv_capa {
|
|||
* For 16.0 uCode and above, there is no differentiation between sections,
|
||||
* just an offset to the HW address.
|
||||
*/
|
||||
#define IWL_UCODE_SECTION_MAX 16
|
||||
#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
|
||||
#define PAGING_SEPARATOR_SECTION 0xAAAABBBB
|
||||
|
||||
|
@ -489,25 +488,22 @@ enum iwl_fw_dbg_monitor_mode {
|
|||
};
|
||||
|
||||
/**
|
||||
* enum iwl_fw_mem_seg_type - data types for dumping on error
|
||||
*
|
||||
* @FW_DBG_MEM_SMEM: the data type is SMEM
|
||||
* @FW_DBG_MEM_DCCM_LMAC: the data type is DCCM_LMAC
|
||||
* @FW_DBG_MEM_DCCM_UMAC: the data type is DCCM_UMAC
|
||||
* enum iwl_fw_mem_seg_type - memory segment type
|
||||
* @FW_DBG_MEM_TYPE_MASK: mask for the type indication
|
||||
* @FW_DBG_MEM_TYPE_REGULAR: regular memory
|
||||
* @FW_DBG_MEM_TYPE_PRPH: periphery memory (requires special reading)
|
||||
*/
|
||||
enum iwl_fw_dbg_mem_seg_type {
|
||||
FW_DBG_MEM_DCCM_LMAC = 0,
|
||||
FW_DBG_MEM_DCCM_UMAC,
|
||||
FW_DBG_MEM_SMEM,
|
||||
|
||||
/* Must be last */
|
||||
FW_DBG_MEM_MAX,
|
||||
enum iwl_fw_mem_seg_type {
|
||||
FW_DBG_MEM_TYPE_MASK = 0xff000000,
|
||||
FW_DBG_MEM_TYPE_REGULAR = 0x00000000,
|
||||
FW_DBG_MEM_TYPE_PRPH = 0x01000000,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments
|
||||
*
|
||||
* @data_type: enum %iwl_fw_mem_seg_type
|
||||
* @data_type: the memory segment type to record, see &enum iwl_fw_mem_seg_type
|
||||
* for what we care about
|
||||
* @ofs: the memory segment offset
|
||||
* @len: the memory segment length, in bytes
|
||||
*
|
||||
|
|
|
@ -132,7 +132,8 @@ struct fw_desc {
|
|||
};
|
||||
|
||||
struct fw_img {
|
||||
struct fw_desc sec[IWL_UCODE_SECTION_MAX];
|
||||
struct fw_desc *sec;
|
||||
int num_sec;
|
||||
bool is_dual_cpus;
|
||||
u32 paging_mem_size;
|
||||
};
|
||||
|
@ -295,8 +296,8 @@ struct iwl_fw {
|
|||
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
|
||||
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
|
||||
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
|
||||
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
|
||||
bool dbg_dynamic_mem;
|
||||
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
|
||||
size_t n_dbg_mem_tlv;
|
||||
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
|
||||
u8 dbg_dest_reg_num;
|
||||
struct iwl_gscan_capabilities gscan_capa;
|
||||
|
|
|
@ -1262,12 +1262,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
|
|||
iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
|
||||
out:
|
||||
if (ret < 0) {
|
||||
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
|
||||
if (mvm->restart_fw > 0) {
|
||||
mvm->restart_fw--;
|
||||
ieee80211_restart_hw(mvm->hw);
|
||||
}
|
||||
iwl_mvm_free_nd(mvm);
|
||||
|
||||
if (!unified_image) {
|
||||
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
|
||||
if (mvm->restart_fw > 0) {
|
||||
mvm->restart_fw--;
|
||||
ieee80211_restart_hw(mvm->hw);
|
||||
}
|
||||
}
|
||||
}
|
||||
out_noreset:
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
|
|
@ -798,7 +798,7 @@ static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file,
|
|||
static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
int __maybe_unused ret;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
|
|
|
@ -2075,7 +2075,7 @@ struct iwl_mu_group_mgmt_notif {
|
|||
* @system_time: system time on air rise
|
||||
* @tsf: TSF on air rise
|
||||
* @beacon_timestamp: beacon on air rise
|
||||
* @phy_flags: general phy flags: band, modulation, etc.
|
||||
* @band: band, matches &RX_RES_PHY_FLAGS_BAND_24 definition
|
||||
* @channel: channel this beacon was received on
|
||||
* @rates: rate in ucode internal format
|
||||
* @byte_count: frame's byte count
|
||||
|
@ -2084,12 +2084,12 @@ struct iwl_stored_beacon_notif {
|
|||
__le32 system_time;
|
||||
__le64 tsf;
|
||||
__le32 beacon_timestamp;
|
||||
__le16 phy_flags;
|
||||
__le16 band;
|
||||
__le16 channel;
|
||||
__le32 rates;
|
||||
__le32 byte_count;
|
||||
u8 data[MAX_STORED_BEACON_SIZE];
|
||||
} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */
|
||||
} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */
|
||||
|
||||
#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16
|
||||
|
||||
|
|
|
@ -406,46 +406,63 @@ static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
|
|||
{ .start = 0x00a02400, .end = 0x00a02758 },
|
||||
};
|
||||
|
||||
static u32 iwl_dump_prph(struct iwl_trans *trans,
|
||||
struct iwl_fw_error_dump_data **data,
|
||||
const struct iwl_prph_range *iwl_prph_dump_addr,
|
||||
u32 range_len)
|
||||
static void _iwl_read_prph_block(struct iwl_trans *trans, u32 start,
|
||||
u32 len_bytes, __le32 *data)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < len_bytes; i += 4)
|
||||
*data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i));
|
||||
}
|
||||
|
||||
static bool iwl_read_prph_block(struct iwl_trans *trans, u32 start,
|
||||
u32 len_bytes, __le32 *data)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool success = false;
|
||||
|
||||
if (iwl_trans_grab_nic_access(trans, &flags)) {
|
||||
success = true;
|
||||
_iwl_read_prph_block(trans, start, len_bytes, data);
|
||||
iwl_trans_release_nic_access(trans, &flags);
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
static void iwl_dump_prph(struct iwl_trans *trans,
|
||||
struct iwl_fw_error_dump_data **data,
|
||||
const struct iwl_prph_range *iwl_prph_dump_addr,
|
||||
u32 range_len)
|
||||
{
|
||||
struct iwl_fw_error_dump_prph *prph;
|
||||
unsigned long flags;
|
||||
u32 prph_len = 0, i;
|
||||
u32 i;
|
||||
|
||||
if (!iwl_trans_grab_nic_access(trans, &flags))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
for (i = 0; i < range_len; i++) {
|
||||
/* The range includes both boundaries */
|
||||
int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
|
||||
iwl_prph_dump_addr[i].start + 4;
|
||||
int reg;
|
||||
__le32 *val;
|
||||
|
||||
prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk;
|
||||
|
||||
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
|
||||
(*data)->len = cpu_to_le32(sizeof(*prph) +
|
||||
num_bytes_in_chunk);
|
||||
prph = (void *)(*data)->data;
|
||||
prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
|
||||
val = (void *)prph->data;
|
||||
|
||||
for (reg = iwl_prph_dump_addr[i].start;
|
||||
reg <= iwl_prph_dump_addr[i].end;
|
||||
reg += 4)
|
||||
*val++ = cpu_to_le32(iwl_read_prph_no_grab(trans,
|
||||
reg));
|
||||
_iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start,
|
||||
/* our range is inclusive, hence + 4 */
|
||||
iwl_prph_dump_addr[i].end -
|
||||
iwl_prph_dump_addr[i].start + 4,
|
||||
(void *)prph->data);
|
||||
|
||||
*data = iwl_fw_error_next_data(*data);
|
||||
}
|
||||
|
||||
iwl_trans_release_nic_access(trans, &flags);
|
||||
|
||||
return prph_len;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -495,11 +512,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
|||
struct iwl_mvm_dump_ptrs *fw_error_dump;
|
||||
struct scatterlist *sg_dump_data;
|
||||
u32 sram_len, sram_ofs;
|
||||
struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem =
|
||||
mvm->fw->dbg_mem_tlv;
|
||||
const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = mvm->fw->dbg_mem_tlv;
|
||||
u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
|
||||
u32 smem_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->smem_len;
|
||||
u32 sram2_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->dccm2_len;
|
||||
u32 smem_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->smem_len;
|
||||
u32 sram2_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->dccm2_len;
|
||||
bool monitor_dump_only = false;
|
||||
int i;
|
||||
|
||||
|
@ -624,10 +640,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
|||
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
|
||||
|
||||
/* Make room for MEM segments */
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
|
||||
if (fw_dbg_mem[i])
|
||||
file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
|
||||
le32_to_cpu(fw_dbg_mem[i]->len);
|
||||
for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) {
|
||||
file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
|
||||
le32_to_cpu(fw_dbg_mem[i].len);
|
||||
}
|
||||
|
||||
/* Make room for fw's virtual image pages, if it exists */
|
||||
|
@ -656,7 +671,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
|||
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
|
||||
mvm->fw_dump_desc->len;
|
||||
|
||||
if (!mvm->fw->dbg_dynamic_mem)
|
||||
if (!mvm->fw->n_dbg_mem_tlv)
|
||||
file_len += sram_len + sizeof(*dump_mem);
|
||||
|
||||
dump_file = vzalloc(file_len);
|
||||
|
@ -708,7 +723,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
|||
if (monitor_dump_only)
|
||||
goto dump_trans_data;
|
||||
|
||||
if (!mvm->fw->dbg_dynamic_mem) {
|
||||
if (!mvm->fw->n_dbg_mem_tlv) {
|
||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
|
||||
dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
|
||||
dump_mem = (void *)dump_data->data;
|
||||
|
@ -719,22 +734,39 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
|||
dump_data = iwl_fw_error_next_data(dump_data);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
|
||||
if (fw_dbg_mem[i]) {
|
||||
u32 len = le32_to_cpu(fw_dbg_mem[i]->len);
|
||||
u32 ofs = le32_to_cpu(fw_dbg_mem[i]->ofs);
|
||||
for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) {
|
||||
u32 len = le32_to_cpu(fw_dbg_mem[i].len);
|
||||
u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
|
||||
bool success;
|
||||
|
||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
|
||||
dump_data->len = cpu_to_le32(len +
|
||||
sizeof(*dump_mem));
|
||||
dump_mem = (void *)dump_data->data;
|
||||
dump_mem->type = fw_dbg_mem[i]->data_type;
|
||||
dump_mem->offset = cpu_to_le32(ofs);
|
||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
|
||||
dump_data->len = cpu_to_le32(len + sizeof(*dump_mem));
|
||||
dump_mem = (void *)dump_data->data;
|
||||
dump_mem->type = fw_dbg_mem[i].data_type;
|
||||
dump_mem->offset = cpu_to_le32(ofs);
|
||||
|
||||
switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) {
|
||||
case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR):
|
||||
iwl_trans_read_mem_bytes(mvm->trans, ofs,
|
||||
dump_mem->data,
|
||||
len);
|
||||
dump_data = iwl_fw_error_next_data(dump_data);
|
||||
success = true;
|
||||
break;
|
||||
case cpu_to_le32(FW_DBG_MEM_TYPE_PRPH):
|
||||
success = iwl_read_prph_block(mvm->trans, ofs, len,
|
||||
(void *)dump_mem->data);
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* shouldn't get here, we ignored this kind
|
||||
* of TLV earlier during the TLV parsing?!
|
||||
*/
|
||||
WARN_ON(1);
|
||||
success = false;
|
||||
}
|
||||
|
||||
if (success)
|
||||
dump_data = iwl_fw_error_next_data(dump_data);
|
||||
}
|
||||
|
||||
if (smem_len) {
|
||||
|
@ -816,11 +848,12 @@ dump_trans_data:
|
|||
sg_nents(sg_dump_data),
|
||||
fw_error_dump->op_mode_ptr,
|
||||
fw_error_dump->op_mode_len, 0);
|
||||
sg_pcopy_from_buffer(sg_dump_data,
|
||||
sg_nents(sg_dump_data),
|
||||
fw_error_dump->trans_ptr->data,
|
||||
fw_error_dump->trans_ptr->len,
|
||||
fw_error_dump->op_mode_len);
|
||||
if (fw_error_dump->trans_ptr)
|
||||
sg_pcopy_from_buffer(sg_dump_data,
|
||||
sg_nents(sg_dump_data),
|
||||
fw_error_dump->trans_ptr->data,
|
||||
fw_error_dump->trans_ptr->len,
|
||||
fw_error_dump->op_mode_len);
|
||||
dev_coredumpsg(mvm->trans->dev, sg_dump_data, file_len,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
|
|
|
@ -190,7 +190,7 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
|
|||
* CPU2 paging CSS
|
||||
* CPU2 paging image (including instruction and data)
|
||||
*/
|
||||
for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
|
||||
for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) {
|
||||
if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
|
||||
sec_idx++;
|
||||
break;
|
||||
|
@ -201,7 +201,7 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
|
|||
* If paging is enabled there should be at least 2 more sections left
|
||||
* (one for CSS and one for Paging data)
|
||||
*/
|
||||
if (sec_idx >= ARRAY_SIZE(image->sec) - 1) {
|
||||
if (sec_idx >= image->num_sec - 1) {
|
||||
IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
|
||||
iwl_free_fw_paging(mvm);
|
||||
return -EINVAL;
|
||||
|
@ -259,9 +259,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
|
|||
{
|
||||
struct page *block;
|
||||
dma_addr_t phys = 0;
|
||||
int blk_idx = 0;
|
||||
int order, num_of_pages;
|
||||
int dma_enabled;
|
||||
int blk_idx, order, num_of_pages, size, dma_enabled;
|
||||
|
||||
if (mvm->fw_paging_db[0].fw_paging_block)
|
||||
return 0;
|
||||
|
@ -272,9 +270,8 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
|
|||
BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
|
||||
|
||||
num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
|
||||
mvm->num_of_paging_blk = ((num_of_pages - 1) /
|
||||
NUM_OF_PAGE_PER_GROUP) + 1;
|
||||
|
||||
mvm->num_of_paging_blk =
|
||||
DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP);
|
||||
mvm->num_of_pages_in_last_blk =
|
||||
num_of_pages -
|
||||
NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
|
||||
|
@ -284,46 +281,13 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
|
|||
mvm->num_of_paging_blk,
|
||||
mvm->num_of_pages_in_last_blk);
|
||||
|
||||
/* allocate block of 4Kbytes for paging CSS */
|
||||
order = get_order(FW_PAGING_SIZE);
|
||||
block = alloc_pages(GFP_KERNEL, order);
|
||||
if (!block) {
|
||||
/* free all the previous pages since we failed */
|
||||
iwl_free_fw_paging(mvm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mvm->fw_paging_db[blk_idx].fw_paging_block = block;
|
||||
mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
|
||||
|
||||
if (dma_enabled) {
|
||||
phys = dma_map_page(mvm->trans->dev, block, 0,
|
||||
PAGE_SIZE << order, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(mvm->trans->dev, phys)) {
|
||||
/*
|
||||
* free the previous pages and the current one since
|
||||
* we failed to map_page.
|
||||
*/
|
||||
iwl_free_fw_paging(mvm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
|
||||
} else {
|
||||
mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
|
||||
blk_idx << BLOCK_2_EXP_SIZE;
|
||||
}
|
||||
|
||||
IWL_DEBUG_FW(mvm,
|
||||
"Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
|
||||
order);
|
||||
|
||||
/*
|
||||
* allocate blocks in dram.
|
||||
* since that CSS allocated in fw_paging_db[0] loop start from index 1
|
||||
* Allocate CSS and paging blocks in dram.
|
||||
*/
|
||||
for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
|
||||
/* allocate block of PAGING_BLOCK_SIZE (32K) */
|
||||
order = get_order(PAGING_BLOCK_SIZE);
|
||||
for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
|
||||
/* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
|
||||
size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE;
|
||||
order = get_order(size);
|
||||
block = alloc_pages(GFP_KERNEL, order);
|
||||
if (!block) {
|
||||
/* free all the previous pages since we failed */
|
||||
|
@ -332,7 +296,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
|
|||
}
|
||||
|
||||
mvm->fw_paging_db[blk_idx].fw_paging_block = block;
|
||||
mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
|
||||
mvm->fw_paging_db[blk_idx].fw_paging_size = size;
|
||||
|
||||
if (dma_enabled) {
|
||||
phys = dma_map_page(mvm->trans->dev, block, 0,
|
||||
|
@ -353,9 +317,14 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
|
|||
blk_idx << BLOCK_2_EXP_SIZE;
|
||||
}
|
||||
|
||||
IWL_DEBUG_FW(mvm,
|
||||
"Paging: allocated 32K bytes (order %d) for firmware paging.\n",
|
||||
order);
|
||||
if (!blk_idx)
|
||||
IWL_DEBUG_FW(mvm,
|
||||
"Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
|
||||
order);
|
||||
else
|
||||
IWL_DEBUG_FW(mvm,
|
||||
"Paging: allocated 32K bytes (order %d) for firmware paging.\n",
|
||||
order);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1565,7 +1565,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
|
|||
rx_status.flag |= RX_FLAG_MACTIME_PLCP_START;
|
||||
rx_status.device_timestamp = le32_to_cpu(sb->system_time);
|
||||
rx_status.band =
|
||||
(sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
|
||||
(sb->band & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
|
||||
NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
|
||||
rx_status.freq =
|
||||
ieee80211_channel_to_frequency(le16_to_cpu(sb->channel),
|
||||
|
|
|
@ -463,6 +463,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
|||
IEEE80211_RADIOTAP_MCS_HAVE_STBC;
|
||||
hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
|
||||
IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
|
||||
|
||||
hw->radiotap_timestamp.units_pos =
|
||||
IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US |
|
||||
IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ;
|
||||
/* this is the case for CCK frames, it's better (only 8) for OFDM */
|
||||
hw->radiotap_timestamp.accuracy = 22;
|
||||
|
||||
hw->rate_control_algorithm = "iwl-mvm-rs";
|
||||
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
|
||||
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
|
||||
|
@ -670,7 +677,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
|||
hw->wiphy->wowlan = &mvm->wowlan;
|
||||
}
|
||||
|
||||
if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
|
||||
if (mvm->fw->img[IWL_UCODE_WOWLAN].num_sec &&
|
||||
mvm->trans->ops->d3_suspend &&
|
||||
mvm->trans->ops->d3_resume &&
|
||||
device_can_wakeup(mvm->trans->dev)) {
|
||||
|
|
|
@ -1657,8 +1657,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
|||
* Disable a TXQ.
|
||||
* Note that in non-DQA mode the %mac80211_queue and %tid params are ignored.
|
||||
*/
|
||||
void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
u8 tid, u8 flags);
|
||||
int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
u8 tid, u8 flags);
|
||||
int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
|
||||
|
||||
/* Return a bitmask with all the hw supported queues, except for the
|
||||
|
|
|
@ -161,9 +161,6 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
struct rs_rate *rate,
|
||||
const struct rs_tx_column *next_col)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
struct iwl_mvm_vif *mvmvif;
|
||||
|
||||
if (!sta->ht_cap.ht_supported)
|
||||
return false;
|
||||
|
||||
|
@ -176,9 +173,6 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
|
||||
return false;
|
||||
|
||||
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
|
||||
|
||||
if (mvm->nvm_data->sku_cap_mimo_disabled)
|
||||
return false;
|
||||
|
||||
|
@ -3071,7 +3065,7 @@ static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
|
|||
|
||||
void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
|
||||
{
|
||||
u8 nss = 0, mcs = 0;
|
||||
u8 nss = 0;
|
||||
|
||||
spin_lock(&mvm->drv_stats_lock);
|
||||
|
||||
|
@ -3099,11 +3093,9 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
|
|||
|
||||
if (rate & RATE_MCS_HT_MSK) {
|
||||
mvm->drv_rx_stats.ht_frames++;
|
||||
mcs = rate & RATE_HT_MCS_RATE_CODE_MSK;
|
||||
nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1;
|
||||
} else if (rate & RATE_MCS_VHT_MSK) {
|
||||
mvm->drv_rx_stats.vht_frames++;
|
||||
mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
|
||||
nss = ((rate & RATE_VHT_MCS_NSS_MSK) >>
|
||||
RATE_VHT_MCS_NSS_POS) + 1;
|
||||
} else {
|
||||
|
|
|
@ -621,12 +621,10 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
|
|||
};
|
||||
int expected_size = iwl_mvm_has_new_rx_api(mvm) ? sizeof(*stats) :
|
||||
sizeof(struct iwl_notif_statistics_v10);
|
||||
u32 temperature;
|
||||
|
||||
if (iwl_rx_packet_payload_len(pkt) != expected_size)
|
||||
goto invalid;
|
||||
|
||||
temperature = le32_to_cpu(stats->general.radio_temperature);
|
||||
data.mac_id = stats->rx.general.mac_id;
|
||||
data.beacon_filter_average_energy =
|
||||
stats->general.beacon_filter_average_energy;
|
||||
|
|
|
@ -454,13 +454,6 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
|
|||
|
||||
rcu_read_unlock();
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
/* Unmap MAC queues and TIDs from this queue */
|
||||
mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
|
||||
mvm->queue_info[queue].hw_queue_refcount = 0;
|
||||
mvm->queue_info[queue].tid_bitmap = 0;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
return disable_agg_tids;
|
||||
}
|
||||
|
||||
|
@ -755,28 +748,22 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|||
* first
|
||||
*/
|
||||
if (using_inactive_queue) {
|
||||
struct iwl_scd_txq_cfg_cmd cmd = {
|
||||
.scd_queue = queue,
|
||||
.action = SCD_CFG_DISABLE_QUEUE,
|
||||
};
|
||||
u8 txq_curr_ac;
|
||||
|
||||
disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
|
||||
u8 txq_curr_ac, sta_id;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
|
||||
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[txq_curr_ac];
|
||||
cmd.tid = mvm->queue_info[queue].txq_tid;
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
|
||||
/* Disable the queue */
|
||||
if (disable_agg_tids)
|
||||
iwl_mvm_invalidate_sta_queue(mvm, queue,
|
||||
disable_agg_tids, false);
|
||||
iwl_trans_txq_disable(mvm->trans, queue, false);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
|
||||
&cmd);
|
||||
|
||||
ret = iwl_mvm_disable_txq(mvm, queue,
|
||||
mvmsta->vif->hw_queue[txq_curr_ac],
|
||||
tid, 0);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm,
|
||||
"Failed to free inactive queue %d (ret=%d)\n",
|
||||
|
@ -791,7 +778,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|||
}
|
||||
|
||||
/* If TXQ is allocated to another STA, update removal in FW */
|
||||
if (cmd.sta_id != mvmsta->sta_id)
|
||||
if (sta_id != mvmsta->sta_id)
|
||||
iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
|
||||
}
|
||||
|
||||
|
@ -868,7 +855,6 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
|
|||
.scd_queue = queue,
|
||||
.action = SCD_CFG_UPDATE_QUEUE_TID,
|
||||
};
|
||||
s8 sta_id;
|
||||
int tid;
|
||||
unsigned long tid_bitmap;
|
||||
int ret;
|
||||
|
@ -876,7 +862,6 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
|
|||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
|
|
|
@ -102,14 +102,13 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
|
|||
#define OPT_HDR(type, skb, off) \
|
||||
(type *)(skb_network_header(skb) + (off))
|
||||
|
||||
static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_hdr *hdr,
|
||||
struct ieee80211_tx_info *info,
|
||||
struct iwl_tx_cmd *tx_cmd)
|
||||
static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_hdr *hdr,
|
||||
struct ieee80211_tx_info *info)
|
||||
{
|
||||
u16 offload_assist = 0;
|
||||
#if IS_ENABLED(CONFIG_INET)
|
||||
u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
|
||||
u16 offload_assist = le16_to_cpu(tx_cmd->offload_assist);
|
||||
u8 protocol = 0;
|
||||
|
||||
/*
|
||||
|
@ -117,7 +116,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
* compute it
|
||||
*/
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
/* We do not expect to be requested to csum stuff we do not support */
|
||||
if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
|
||||
|
@ -125,7 +124,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
skb->protocol != htons(ETH_P_IPV6)),
|
||||
"No support for requested checksum\n")) {
|
||||
skb_checksum_help(skb);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
|
@ -145,7 +144,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
protocol != NEXTHDR_HOP &&
|
||||
protocol != NEXTHDR_DEST) {
|
||||
skb_checksum_help(skb);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
|
||||
|
@ -159,7 +158,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
|
||||
WARN_ON_ONCE(1);
|
||||
skb_checksum_help(skb);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* enable L4 csum */
|
||||
|
@ -191,8 +190,9 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
mh_len /= 2;
|
||||
offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
|
||||
|
||||
tx_cmd->offload_assist = cpu_to_le16(offload_assist);
|
||||
out:
|
||||
#endif
|
||||
return offload_assist;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -295,7 +295,52 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
!(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU))))
|
||||
tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD));
|
||||
|
||||
iwl_mvm_tx_csum(mvm, skb, hdr, info, tx_cmd);
|
||||
tx_cmd->offload_assist |=
|
||||
cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info));
|
||||
}
|
||||
|
||||
static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
|
||||
struct ieee80211_tx_info *info,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
int rate_idx;
|
||||
u8 rate_plcp;
|
||||
u32 rate_flags;
|
||||
|
||||
/* HT rate doesn't make sense for a non data frame */
|
||||
WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
|
||||
"Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n",
|
||||
info->control.rates[0].flags,
|
||||
info->control.rates[0].idx);
|
||||
|
||||
rate_idx = info->control.rates[0].idx;
|
||||
/* if the rate isn't a well known legacy rate, take the lowest one */
|
||||
if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
|
||||
rate_idx = rate_lowest_index(
|
||||
&mvm->nvm_data->bands[info->band], sta);
|
||||
|
||||
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
|
||||
if (info->band == NL80211_BAND_5GHZ)
|
||||
rate_idx += IWL_FIRST_OFDM_RATE;
|
||||
|
||||
/* For 2.4 GHZ band, check that there is no need to remap */
|
||||
BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
|
||||
|
||||
/* Get PLCP rate for tx_cmd->rate_n_flags */
|
||||
rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
|
||||
|
||||
if (info->band == NL80211_BAND_2GHZ &&
|
||||
!iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
|
||||
rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
|
||||
else
|
||||
rate_flags =
|
||||
BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
|
||||
|
||||
/* Set CCK flag as needed */
|
||||
if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
|
||||
rate_flags |= RATE_MCS_CCK_MSK;
|
||||
|
||||
return (u32)rate_plcp | rate_flags;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -305,10 +350,6 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
|
|||
struct ieee80211_tx_info *info,
|
||||
struct ieee80211_sta *sta, __le16 fc)
|
||||
{
|
||||
u32 rate_flags;
|
||||
int rate_idx;
|
||||
u8 rate_plcp;
|
||||
|
||||
/* Set retry limit on RTS packets */
|
||||
tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
|
||||
|
||||
|
@ -337,46 +378,12 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
|
|||
cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
|
||||
}
|
||||
|
||||
/* HT rate doesn't make sense for a non data frame */
|
||||
WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
|
||||
"Got an HT rate (flags:0x%x/mcs:%d) for a non data frame (fc:0x%x)\n",
|
||||
info->control.rates[0].flags,
|
||||
info->control.rates[0].idx,
|
||||
le16_to_cpu(fc));
|
||||
|
||||
rate_idx = info->control.rates[0].idx;
|
||||
/* if the rate isn't a well known legacy rate, take the lowest one */
|
||||
if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
|
||||
rate_idx = rate_lowest_index(
|
||||
&mvm->nvm_data->bands[info->band], sta);
|
||||
|
||||
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
|
||||
if (info->band == NL80211_BAND_5GHZ)
|
||||
rate_idx += IWL_FIRST_OFDM_RATE;
|
||||
|
||||
/* For 2.4 GHZ band, check that there is no need to remap */
|
||||
BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
|
||||
|
||||
/* Get PLCP rate for tx_cmd->rate_n_flags */
|
||||
rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
|
||||
|
||||
mvm->mgmt_last_antenna_idx =
|
||||
iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
|
||||
mvm->mgmt_last_antenna_idx);
|
||||
|
||||
if (info->band == NL80211_BAND_2GHZ &&
|
||||
!iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
|
||||
rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
|
||||
else
|
||||
rate_flags =
|
||||
BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
|
||||
|
||||
/* Set CCK flag as needed */
|
||||
if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
|
||||
rate_flags |= RATE_MCS_CCK_MSK;
|
||||
|
||||
/* Set the rate in the TX cmd */
|
||||
tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags);
|
||||
tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
|
||||
}
|
||||
|
||||
static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
|
||||
|
|
|
@ -693,10 +693,6 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
|||
.tid = cfg->tid,
|
||||
};
|
||||
|
||||
/* Set sta_id in the command, if it exists */
|
||||
if (iwl_mvm_is_dqa_supported(mvm))
|
||||
cmd.sta_id = cfg->sta_id;
|
||||
|
||||
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
|
||||
wdg_timeout);
|
||||
WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
|
||||
|
@ -706,8 +702,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
|||
}
|
||||
}
|
||||
|
||||
void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
u8 tid, u8 flags)
|
||||
int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
u8 tid, u8 flags)
|
||||
{
|
||||
struct iwl_scd_txq_cfg_cmd cmd = {
|
||||
.scd_queue = queue,
|
||||
|
@ -720,7 +716,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
|||
|
||||
if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
|
||||
|
@ -760,7 +756,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
|||
/* If the queue is still enabled - nothing left to do in this func */
|
||||
if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
|
@ -791,6 +787,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
|||
if (ret)
|
||||
IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
|
||||
queue, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -805,7 +805,7 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
|
|||
(*first_ucode_section)++;
|
||||
}
|
||||
|
||||
for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
|
||||
for (i = *first_ucode_section; i < image->num_sec; i++) {
|
||||
last_read_idx = i;
|
||||
|
||||
/*
|
||||
|
@ -868,19 +868,15 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
|
|||
int cpu,
|
||||
int *first_ucode_section)
|
||||
{
|
||||
int shift_param;
|
||||
int i, ret = 0;
|
||||
u32 last_read_idx = 0;
|
||||
|
||||
if (cpu == 1) {
|
||||
shift_param = 0;
|
||||
if (cpu == 1)
|
||||
*first_ucode_section = 0;
|
||||
} else {
|
||||
shift_param = 16;
|
||||
else
|
||||
(*first_ucode_section)++;
|
||||
}
|
||||
|
||||
for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
|
||||
for (i = *first_ucode_section; i < image->num_sec; i++) {
|
||||
last_read_idx = i;
|
||||
|
||||
/*
|
||||
|
@ -1066,6 +1062,20 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
|
|||
&first_ucode_section);
|
||||
}
|
||||
|
||||
static bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans)
|
||||
{
|
||||
bool hw_rfkill = iwl_is_rfkill_set(trans);
|
||||
|
||||
if (hw_rfkill)
|
||||
set_bit(STATUS_RFKILL, &trans->status);
|
||||
else
|
||||
clear_bit(STATUS_RFKILL, &trans->status);
|
||||
|
||||
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
|
||||
|
||||
return hw_rfkill;
|
||||
}
|
||||
|
||||
static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
@ -1208,12 +1218,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
|
|||
mutex_lock(&trans_pcie->mutex);
|
||||
|
||||
/* If platform's RF_KILL switch is NOT set to KILL */
|
||||
hw_rfkill = iwl_is_rfkill_set(trans);
|
||||
if (hw_rfkill)
|
||||
set_bit(STATUS_RFKILL, &trans->status);
|
||||
else
|
||||
clear_bit(STATUS_RFKILL, &trans->status);
|
||||
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
|
||||
hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
|
||||
if (hw_rfkill && !run_in_rfkill) {
|
||||
ret = -ERFKILL;
|
||||
goto out;
|
||||
|
@ -1261,13 +1266,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
|
|||
ret = iwl_pcie_load_given_ucode(trans, fw);
|
||||
|
||||
/* re-check RF-Kill state since we may have missed the interrupt */
|
||||
hw_rfkill = iwl_is_rfkill_set(trans);
|
||||
if (hw_rfkill)
|
||||
set_bit(STATUS_RFKILL, &trans->status);
|
||||
else
|
||||
clear_bit(STATUS_RFKILL, &trans->status);
|
||||
|
||||
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
|
||||
hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
|
||||
if (hw_rfkill && !run_in_rfkill)
|
||||
ret = -ERFKILL;
|
||||
|
||||
|
@ -1659,7 +1658,6 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
|
|||
static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
bool hw_rfkill;
|
||||
int err;
|
||||
|
||||
lockdep_assert_held(&trans_pcie->mutex);
|
||||
|
@ -1683,13 +1681,8 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
|
|||
/* Set is_down to false here so that...*/
|
||||
trans_pcie->is_down = false;
|
||||
|
||||
hw_rfkill = iwl_is_rfkill_set(trans);
|
||||
if (hw_rfkill)
|
||||
set_bit(STATUS_RFKILL, &trans->status);
|
||||
else
|
||||
clear_bit(STATUS_RFKILL, &trans->status);
|
||||
/* ... rfkill can call stop_device and set it false if needed */
|
||||
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
|
||||
/* ...rfkill can call stop_device and set it false if needed */
|
||||
iwl_trans_check_hw_rf_kill(trans);
|
||||
|
||||
/* Make sure we sync here, because we'll need full access later */
|
||||
if (low_power)
|
||||
|
|
Loading…
Reference in New Issue