wireless-drivers-next patches for 4.12

Quite a lot of patches for rtlwifi and iwlwifi this time, but changes
 also for other active wireless drivers.
 
 Major changes:
 
 ath9k
 
 * add support for Dell Wireless 1601 PCI device
 
 * add debugfs file to manually override noise floor
 
 ath10k
 
 * bump up FW API to 6 for a new QCA6174 firmware branch
 
 wil6210
 
 * support 8 kB RX buffers
 
 iwlwifi
 
 * work to support A000 devices continues
 
 * add support for FW API 30
 
 * add Geographical and Dynamic Specific Absorption Rate (SAR) support
 
 * support a few new PCI device IDs
 
 rtlwifi
 
 * work on adding Bluetooth coexistance support, not finished yet
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJY+cipAAoJEG4XJFUm622b+04H/2u7zhIqv3R5NsmGRlwjKmvd
 6us1hcYzUiBW1kBFFtcvokwgqv3Rw/EUi+NYA5UCHgo30HTVJWTqW0q+BUEI7U0X
 iwuFdHbs/qjj8hXKgZwaJMVyRhXAbq+iHYaMY5OEDNTRSbxqXA4V69SHGK9vLX0l
 KQLJBZX3etnwsyRWrmd4E0roGGfD5tMPpGMUDSlvTPWt0I+1QFUxRI9ohn1fRBpD
 p+UHHC+SYF6NzxvfqyPwm6ndb2XOwdAKYljlf4KrBwHtSXvAH8p5EASm4PjGbavY
 0mrkv0cb8QOKQNVe9q4B4AmM4iAITZ9i3sQBPJ+EBN2P+kS/lFU5L/+5z3VQjms=
 =KTEG
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-for-davem-2017-04-21' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for 4.12

Quite a lot of patches for rtlwifi and iwlwifi this time, but changes
also for other active wireless drivers.

Major changes:

ath9k

* add support for Dell Wireless 1601 PCI device

* add debugfs file to manually override noise floor

ath10k

* bump up FW API to 6 for a new QCA6174 firmware branch

wil6210

* support 8 kB RX buffers

iwlwifi

* work to support A000 devices continues

* add support for FW API 30

* add Geographical and Dynamic Specific Absorption Rate (SAR) support

* support a few new PCI device IDs

rtlwifi

* work on adding Bluetooth coexistance support, not finished yet
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-04-24 12:25:01 -04:00
commit ac2291ce1f
123 changed files with 8801 additions and 2212 deletions

View File

@ -7887,7 +7887,7 @@ S: Maintained
F: drivers/net/ethernet/marvell/mvneta.*
MARVELL MWIFIEX WIRELESS DRIVER
M: Amitkumar Karwar <akarwar@marvell.com>
M: Amitkumar Karwar <amitkarwar@gmail.com>
M: Nishant Sarmukadam <nishants@marvell.com>
M: Ganapathi Bhat <gbhat@marvell.com>
M: Xinming Hu <huxm@marvell.com>

View File

@ -640,6 +640,7 @@ static int ath10k_ahb_hif_start(struct ath10k *ar)
{
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif start\n");
napi_enable(&ar->napi);
ath10k_ce_enable_interrupts(ar);
ath10k_pci_enable_legacy_irq(ar);
@ -692,7 +693,6 @@ static int ath10k_ahb_hif_power_up(struct ath10k *ar)
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
goto err_ce_deinit;
}
napi_enable(&ar->napi);
return 0;

View File

@ -176,7 +176,8 @@ union bmi_resp {
} rompatch_uninstall;
struct {
/* 0 = nothing executed
* otherwise = NVRAM segment return value */
* otherwise = NVRAM segment return value
*/
__le32 result;
} nvram_process;
u8 payload[BMI_MAX_CMDBUF_SIZE];

View File

@ -261,8 +261,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
}
/*
* Guts of ath10k_ce_send, used by both ath10k_ce_send and
* ath10k_ce_sendlist_send.
* Guts of ath10k_ce_send.
* The caller takes responsibility for any needed locking.
*/
int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
@ -1052,7 +1051,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
*/
BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
BUILD_BUG_ON(2 * TARGET_10X_NUM_MSDU_DESC >
BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));

View File

@ -71,6 +71,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
},
{
.id = QCA9887_HW_1_0_VERSION,
@ -91,6 +92,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
},
{
.id = QCA6174_HW_2_1_VERSION,
@ -110,6 +112,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
},
{
.id = QCA6174_HW_2_1_VERSION,
@ -129,6 +132,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
},
{
.id = QCA6174_HW_3_0_VERSION,
@ -148,6 +152,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
},
{
.id = QCA6174_HW_3_2_VERSION,
@ -170,6 +175,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@ -195,6 +201,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@ -221,6 +228,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@ -246,6 +254,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@ -265,6 +274,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@ -286,6 +296,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@ -312,6 +323,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
},
};
@ -1627,6 +1639,13 @@ static void ath10k_core_restart(struct work_struct *work)
wake_up(&ar->wmi.tx_credits_wq);
wake_up(&ar->peer_mapping_wq);
/* TODO: We can have one instance of cancelling coverage_class_work by
* moving it to ath10k_halt(), so that both stop() and restart() would
* call that but it takes conf_mutex() and if we call cancel_work_sync()
* with conf_mutex it will deadlock.
*/
cancel_work_sync(&ar->set_coverage_class_work);
mutex_lock(&ar->conf_mutex);
switch (ar->state) {
@ -1638,7 +1657,8 @@ static void ath10k_core_restart(struct work_struct *work)
break;
case ATH10K_STATE_OFF:
/* this can happen if driver is being unloaded
* or if the crash happens during FW probing */
* or if the crash happens during FW probing
*/
ath10k_warn(ar, "cannot restart a device that hasn't been started\n");
break;
case ATH10K_STATE_RESTARTING:
@ -2166,7 +2186,8 @@ EXPORT_SYMBOL(ath10k_core_stop);
/* mac80211 manages fw/hw initialization through start/stop hooks. However in
* order to know what hw capabilities should be advertised to mac80211 it is
* necessary to load the firmware (and tear it down immediately since start
* hook will try to init it again) before registering */
* hook will try to init it again) before registering
*/
static int ath10k_core_probe_fw(struct ath10k *ar)
{
struct bmi_target_info target_info;
@ -2360,7 +2381,8 @@ void ath10k_core_unregister(struct ath10k *ar)
/* We must unregister from mac80211 before we stop HTC and HIF.
* Otherwise we will fail to submit commands to FW and mac80211 will be
* unhappy about callback failures. */
* unhappy about callback failures.
*/
ath10k_mac_unregister(ar);
ath10k_testmode_destroy(ar);

View File

@ -501,14 +501,16 @@ enum ath10k_state {
* stopped in ath10k_core_restart() work holding conf_mutex. The state
* RESTARTED means that the device is up and mac80211 has started hw
* reconfiguration. Once mac80211 is done with the reconfiguration we
* set the state to STATE_ON in reconfig_complete(). */
* set the state to STATE_ON in reconfig_complete().
*/
ATH10K_STATE_RESTARTING,
ATH10K_STATE_RESTARTED,
/* The device has crashed while restarting hw. This state is like ON
* but commands are blocked in HTC and -ECOMM response is given. This
* prevents completion timeouts and makes the driver more responsive to
* userspace commands. This is also prevents recursive recovery. */
* userspace commands. This is also prevents recursive recovery.
*/
ATH10K_STATE_WEDGED,
/* factory tests */
@ -920,7 +922,8 @@ struct ath10k {
struct work_struct restart_work;
/* cycle count is reported twice for each visited channel during scan.
* access protected by data_lock */
* access protected by data_lock
*/
u32 survey_last_rx_clear_count;
u32 survey_last_cycle_count;
struct survey_info survey[ATH10K_NUM_CHANS];

View File

@ -1816,7 +1816,7 @@ static void ath10k_tpc_stats_fill(struct ath10k *ar,
tpc_stats->num_tx_chain,
tpc_stats->rate_max);
for (j = 0; j < tpc_stats->num_tx_chain ; j++) {
for (j = 0; j < WMI_TPC_FLAG; j++) {
switch (j) {
case WMI_TPC_TABLE_TYPE_CDD:
if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
@ -1982,7 +1982,8 @@ void ath10k_debug_stop(struct ath10k *ar)
/* Must not use _sync to avoid deadlock, we do that in
* ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
* warning from del_timer(). */
* warning from del_timer().
*/
if (ar->debug.htt_stats_mask != 0)
cancel_delayed_work(&ar->debug.htt_stats_dwork);
@ -2443,86 +2444,82 @@ int ath10k_debug_register(struct ath10k *ar)
init_completion(&ar->debug.tpc_complete);
init_completion(&ar->debug.fw_stats_complete);
debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
debugfs_create_file("fw_stats", 0400, ar->debug.debugfs_phy, ar,
&fops_fw_stats);
debugfs_create_file("fw_reset_stats", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_fw_reset_stats);
debugfs_create_file("fw_reset_stats", 0400, ar->debug.debugfs_phy, ar,
&fops_fw_reset_stats);
debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
debugfs_create_file("wmi_services", 0400, ar->debug.debugfs_phy, ar,
&fops_wmi_services);
debugfs_create_file("simulate_fw_crash", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_simulate_fw_crash);
debugfs_create_file("simulate_fw_crash", 0600, ar->debug.debugfs_phy, ar,
&fops_simulate_fw_crash);
debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_fw_crash_dump);
debugfs_create_file("fw_crash_dump", 0400, ar->debug.debugfs_phy, ar,
&fops_fw_crash_dump);
debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_reg_addr);
debugfs_create_file("reg_addr", 0600, ar->debug.debugfs_phy, ar,
&fops_reg_addr);
debugfs_create_file("reg_value", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_reg_value);
debugfs_create_file("reg_value", 0600, ar->debug.debugfs_phy, ar,
&fops_reg_value);
debugfs_create_file("mem_value", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_mem_value);
debugfs_create_file("mem_value", 0600, ar->debug.debugfs_phy, ar,
&fops_mem_value);
debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_chip_id);
debugfs_create_file("chip_id", 0400, ar->debug.debugfs_phy, ar,
&fops_chip_id);
debugfs_create_file("htt_stats_mask", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_htt_stats_mask);
debugfs_create_file("htt_stats_mask", 0600, ar->debug.debugfs_phy, ar,
&fops_htt_stats_mask);
debugfs_create_file("htt_max_amsdu_ampdu", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar,
debugfs_create_file("htt_max_amsdu_ampdu", 0600, ar->debug.debugfs_phy, ar,
&fops_htt_max_amsdu_ampdu);
debugfs_create_file("fw_dbglog", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_fw_dbglog);
debugfs_create_file("fw_dbglog", 0600, ar->debug.debugfs_phy, ar,
&fops_fw_dbglog);
debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_cal_data);
debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar,
&fops_cal_data);
debugfs_create_file("ani_enable", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_ani_enable);
debugfs_create_file("ani_enable", 0600, ar->debug.debugfs_phy, ar,
&fops_ani_enable);
debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar,
&fops_nf_cal_period);
if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
debugfs_create_file("dfs_simulate_radar", S_IWUSR,
ar->debug.debugfs_phy, ar,
&fops_simulate_radar);
debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_phy,
ar, &fops_simulate_radar);
debugfs_create_bool("dfs_block_radar_events", S_IWUSR,
debugfs_create_bool("dfs_block_radar_events", 0200,
ar->debug.debugfs_phy,
&ar->dfs_block_radar_events);
debugfs_create_file("dfs_stats", S_IRUSR,
ar->debug.debugfs_phy, ar,
debugfs_create_file("dfs_stats", 0400, ar->debug.debugfs_phy, ar,
&fops_dfs_stats);
}
debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_pktlog_filter);
debugfs_create_file("pktlog_filter", 0644, ar->debug.debugfs_phy, ar,
&fops_pktlog_filter);
debugfs_create_file("quiet_period", S_IRUGO | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_quiet_period);
debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar,
&fops_quiet_period);
debugfs_create_file("tpc_stats", S_IRUSR,
ar->debug.debugfs_phy, ar, &fops_tpc_stats);
debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_phy, ar,
&fops_tpc_stats);
if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
debugfs_create_file("btcoex", S_IRUGO | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_btcoex);
debugfs_create_file("btcoex", 0644, ar->debug.debugfs_phy, ar,
&fops_btcoex);
if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
debugfs_create_file("peer_stats", S_IRUGO | S_IWUSR,
ar->debug.debugfs_phy, ar,
debugfs_create_file("peer_stats", 0644, ar->debug.debugfs_phy, ar,
&fops_peer_stats);
debugfs_create_file("fw_checksums", S_IRUSR,
ar->debug.debugfs_phy, ar, &fops_fw_checksums);
debugfs_create_file("fw_checksums", 0400, ar->debug.debugfs_phy, ar,
&fops_fw_checksums);
return 0;
}

View File

@ -372,11 +372,10 @@ static const struct file_operations fops_peer_debug_trigger = {
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
debugfs_create_file("aggr_mode", S_IRUGO | S_IWUSR, dir, sta,
&fops_aggr_mode);
debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba);
debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp);
debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba);
debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode);
debugfs_create_file("addba", 0200, dir, sta, &fops_addba);
debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp);
debugfs_create_file("delba", 0200, dir, sta, &fops_delba);
debugfs_create_file("peer_debug_trigger", 0600, dir, sta,
&fops_peer_debug_trigger);
}

View File

@ -54,7 +54,8 @@ struct ath10k_hif_ops {
int (*start)(struct ath10k *ar);
/* Clean up what start() did. This does not revert to BMI phase. If
* desired so, call power_down() and power_up() */
* desired so, call power_down() and power_up()
*/
void (*stop)(struct ath10k *ar);
int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
@ -82,7 +83,8 @@ struct ath10k_hif_ops {
int (*power_up)(struct ath10k *ar);
/* Power down the device and free up resources. stop() must be called
* before this if start() was called earlier */
* before this if start() was called earlier
*/
void (*power_down)(struct ath10k *ar);
int (*suspend)(struct ath10k *ar);

View File

@ -119,6 +119,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credits < credits) {
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc insufficient credits ep %d required %d available %d\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
ret = -EAGAIN;
goto err_pull;
@ -419,7 +422,8 @@ static void ath10k_htc_control_rx_complete(struct ath10k *ar,
struct sk_buff *skb)
{
/* This is unexpected. FW is not supposed to send regular rx on this
* endpoint. */
* endpoint.
*/
ath10k_warn(ar, "unexpected htc rx\n");
kfree_skb(skb);
}

View File

@ -51,7 +51,8 @@ enum htt_h2t_msg_type { /* host-to-target */
HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
/* This command is used for sending management frames in HTT < 3.0.
* HTT >= 3.0 uses TX_FRM for everything. */
* HTT >= 3.0 uses TX_FRM for everything.
*/
HTT_H2T_MSG_TYPE_MGMT_TX = 7,
HTT_H2T_MSG_TYPE_TX_FETCH_RESP = 11,
@ -910,7 +911,8 @@ struct htt_rx_test {
/* payload consists of 2 lists:
* a) num_ints * sizeof(__le32)
* b) num_chars * sizeof(u8) aligned to 4bytes */
* b) num_chars * sizeof(u8) aligned to 4bytes
*/
u8 payload[0];
} __packed;
@ -1307,7 +1309,8 @@ struct htt_frag_desc_bank_id {
} __packed;
/* real is 16 but it wouldn't fit in the max htt message size
* so we use a conservatively safe value for now */
* so we use a conservatively safe value for now
*/
#define HTT_FRAG_DESC_BANK_MAX 4
#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
@ -1684,12 +1687,14 @@ struct ath10k_htt {
DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done);
/* set if host-fw communication goes haywire
* used to avoid further failures */
* used to avoid further failures
*/
bool rx_confused;
atomic_t num_mpdus_ready;
/* This is used to group tx/rx completions separately and process them
* in batches to reduce cache stalls */
* in batches to reduce cache stalls
*/
struct sk_buff_head rx_compl_q;
struct sk_buff_head rx_in_ord_compl_q;
struct sk_buff_head tx_fetch_ind_q;
@ -1725,11 +1730,13 @@ struct ath10k_htt {
/* This structure layout is programmed via rx ring setup
* so that FW knows how to transfer the rx descriptor to the host.
* Buffers like this are placed on the rx ring. */
* Buffers like this are placed on the rx ring.
*/
struct htt_rx_desc {
union {
/* This field is filled on the host using the msdu buffer
* from htt_rx_indication */
* from htt_rx_indication
*/
struct fw_rx_desc_base fw_desc;
u32 pad;
} __packed;
@ -1760,7 +1767,8 @@ struct htt_rx_desc {
#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
* aggregated traffic more nicely. */
* aggregated traffic more nicely.
*/
#define ATH10K_HTT_MAX_NUM_REFILL 100
/*

View File

@ -177,7 +177,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
* automatically balances load wrt to CPU power.
*
* This probably comes at a cost of lower maximum throughput but
* improves the average and stability. */
* improves the average and stability.
*/
spin_lock_bh(&htt->rx_ring.lock);
num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
@ -304,7 +305,8 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
rx_desc = (struct htt_rx_desc *)msdu->data;
/* FIXME: we must report msdu payload since this is what caller
* expects now */
* expects now
*/
skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
@ -639,7 +641,8 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
case HTT_RX_VHT:
case HTT_RX_VHT_WITH_TXBF:
/* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
TODO check this */
* TODO check this
*/
bw = info2 & 3;
sgi = info3 & 1;
group_id = (info2 >> 4) & 0x3F;

View File

@ -526,7 +526,8 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
memset(req, 0, sizeof(*req));
/* currently we support only max 8 bit masks so no need to worry
* about endian support */
* about endian support
*/
req->upload_types[0] = mask;
req->reset_types[0] = mask;
req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
@ -1008,7 +1009,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
* There is simply no point in pushing HTT TX_FRM through HTC tx path
* as it's a waste of resources. By bypassing HTC it is possible to
* avoid extra memory allocations, compress data structures and thus
* improve performance. */
* improve performance.
*/
txbuf->htc_hdr.eid = htt->eid;
txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +

View File

@ -129,7 +129,7 @@ enum qca9377_chip_id_rev {
#define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234
#define ATH10K_FW_FILE_BASE "firmware"
#define ATH10K_FW_API_MAX 5
#define ATH10K_FW_API_MAX 6
#define ATH10K_FW_API_MIN 2
#define ATH10K_FW_API2_FILE "firmware-2.bin"
@ -141,6 +141,9 @@ enum qca9377_chip_id_rev {
/* HTT id conflict fix for management frames over HTT */
#define ATH10K_FW_API5_FILE "firmware-5.bin"
/* the firmware-6.bin blob */
#define ATH10K_FW_API6_FILE "firmware-6.bin"
#define ATH10K_FW_UTF_FILE "utf.bin"
#define ATH10K_FW_UTF_API2_FILE "utf-2.bin"
@ -296,7 +299,8 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
* - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
* - raw have FCS, nwifi doesn't
* - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
* param, llc/snap) are aligned to 4byte boundaries each */
* param, llc/snap) are aligned to 4byte boundaries each
*/
enum ath10k_hw_txrx_mode {
ATH10K_HW_TXRX_RAW = 0,
@ -447,6 +451,9 @@ struct ath10k_hw_params {
/* hw specific clock control parameters */
const struct ath10k_hw_clk_params *hw_clk;
int target_cpu_freq;
/* Number of bytes to be discarded for each FFT sample */
int spectral_bin_discard;
};
struct htt_rx_desc;

View File

@ -457,7 +457,8 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
for (;;) {
/* since ath10k_install_key we can't hold data_lock all the
* time, so we try to remove the keys incrementally */
* time, so we try to remove the keys incrementally
*/
spin_lock_bh(&ar->data_lock);
i = 0;
list_for_each_entry(peer, &ar->peers, list) {
@ -609,7 +610,8 @@ static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
case 2:
case 3:
/* Our lower layer calculations limit our precision to
1 microsecond */
* 1 microsecond
*/
return 1;
case 4:
return 2;
@ -978,7 +980,8 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
arg.channel.band_center_freq2 = chandef->center_freq2;
/* TODO setup this dynamically, what in case we
don't have any vifs? */
* don't have any vifs?
*/
arg.channel.mode = chan_to_phymode(chandef);
arg.channel.chan_radar =
!!(channel->flags & IEEE80211_CHAN_RADAR);
@ -2373,9 +2376,10 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
}
/* TODO setup this based on STA listen interval and
beacon interval. Currently we don't know
sta->listen_interval - mac80211 patch required.
Currently use 10 seconds */
* beacon interval. Currently we don't know
* sta->listen_interval - mac80211 patch required.
* Currently use 10 seconds
*/
ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
10);
@ -2451,6 +2455,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
enum nl80211_band band;
const u16 *vht_mcs_mask;
u8 ampdu_factor;
u8 max_nss, vht_mcs;
int i;
if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
return;
@ -2478,7 +2484,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
/* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
* zero in VHT IE. Using it would result in degraded throughput.
* arg->peer_max_mpdu at this point contains HT max_mpdu so keep
* it if VHT max_mpdu is smaller. */
* it if VHT max_mpdu is smaller.
*/
arg->peer_max_mpdu = max(arg->peer_max_mpdu,
(1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1);
@ -2489,6 +2496,18 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
arg->peer_flags |= ar->wmi.peer_flags->bw160;
/* Calculate peer NSS capability from VHT capabilities if STA
* supports VHT.
*/
for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
(2 * i) & 3;
if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) &&
vht_mcs_mask[i])
max_nss = i + 1;
}
arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
arg->peer_vht_rates.rx_max_rate =
__le16_to_cpu(vht_cap->vht_mcs.rx_highest);
arg->peer_vht_rates.rx_mcs_set =
@ -2779,7 +2798,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
}
/* ap_sta must be accessed only within rcu section which must be left
* before calling ath10k_setup_peer_smps() which might sleep. */
* before calling ath10k_setup_peer_smps() which might sleep.
*/
ht_cap = ap_sta->ht_cap;
vht_cap = ap_sta->vht_cap;
@ -3050,7 +3070,8 @@ static int ath10k_update_channel_list(struct ath10k *ar)
/* FIXME: why use only legacy modes, why not any
* HT/VHT modes? Would that even make any
* difference? */
* difference?
*/
if (channel->band == NL80211_BAND_2GHZ)
ch->mode = MODE_11G;
else
@ -3114,7 +3135,8 @@ static void ath10k_regd_update(struct ath10k *ar)
}
/* Target allows setting up per-band regdomain but ath_common provides
* a combined one only */
* a combined one only
*/
ret = ath10k_wmi_pdev_set_regdomain(ar,
regpair->reg_domain,
regpair->reg_domain, /* 2ghz */
@ -3663,7 +3685,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
* never transmitted. We delete the peer upon tx completion.
* It is unlikely that a peer for offchannel tx will already be
* present. However it may be in some rare cases so account for that.
* Otherwise we might remove a legitimate peer and break stuff. */
* Otherwise we might remove a legitimate peer and break stuff.
*/
for (;;) {
skb = skb_dequeue(&ar->offchan_tx_queue);
@ -4703,6 +4726,7 @@ static void ath10k_stop(struct ieee80211_hw *hw)
}
mutex_unlock(&ar->conf_mutex);
cancel_work_sync(&ar->set_coverage_class_work);
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->restart_work);
}
@ -5702,7 +5726,8 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
/* the peer should not disappear in mid-way (unless FW goes awry) since
* we already hold conf_mutex. we just make sure its there now. */
* we already hold conf_mutex. we just make sure its there now.
*/
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
spin_unlock_bh(&ar->data_lock);
@ -5714,8 +5739,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = -EOPNOTSUPP;
goto exit;
} else {
/* if the peer doesn't exist there is no key to disable
* anymore */
/* if the peer doesn't exist there is no key to disable anymore */
goto exit;
}
}
@ -6574,7 +6598,8 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
long time_left;
/* mac80211 doesn't care if we really xmit queued frames or not
* we'll collect those frames either way if we stop/delete vdevs */
* we'll collect those frames either way if we stop/delete vdevs
*/
if (drop)
return;
@ -6625,7 +6650,8 @@ static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
/* If device failed to restart it will be in a different state, e.g.
* ATH10K_STATE_WEDGED */
* ATH10K_STATE_WEDGED
*/
if (ar->state == ATH10K_STATE_RESTARTED) {
ath10k_info(ar, "device successfully recovered\n");
ar->state = ATH10K_STATE_ON;

View File

@ -720,14 +720,16 @@ void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
{
/* IMPORTANT: INTR_CLR register has to be set after
* INTR_ENABLE is set to 0, otherwise interrupt can not be
* really cleared. */
* really cleared.
*/
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
0);
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
/* IMPORTANT: this extra read transaction is required to
* flush the posted write buffer. */
* flush the posted write buffer.
*/
(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_ENABLE_ADDRESS);
}
@ -739,7 +741,8 @@ void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
/* IMPORTANT: this extra read transaction is required to
* flush the posted write buffer. */
* flush the posted write buffer.
*/
(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_ENABLE_ADDRESS);
}
@ -2908,7 +2911,8 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
* host won't know when target writes BAR to CORE_CTRL.
* This write might get lost if target has NOT written BAR.
* For now, fix the race by repeating the write in below
* synchronization checking. */
* synchronization checking.
*/
ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
@ -3424,6 +3428,7 @@ MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
/* QCA6174 3.1 firmware files */
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);

View File

@ -439,19 +439,22 @@ struct rx_mpdu_end {
* c) A-MSDU subframe header (14 bytes) if appliable
* d) LLC/SNAP (RFC1042, 8 bytes)
*
* In case of A-MSDU only first frame in sequence contains (a) and (b). */
* In case of A-MSDU only first frame in sequence contains (a) and (b).
*/
enum rx_msdu_decap_format {
RX_MSDU_DECAP_RAW = 0,
/* Note: QoS frames are reported as non-QoS. The rx_hdr_status in
* htt_rx_desc contains the original decapped 802.11 header. */
* htt_rx_desc contains the original decapped 802.11 header.
*/
RX_MSDU_DECAP_NATIVE_WIFI = 1,
/* Payload contains an ethernet header (struct ethhdr). */
RX_MSDU_DECAP_ETHERNET2_DIX = 2,
/* Payload contains two 48-bit addresses and 2-byte length (14 bytes
* total), followed by an RFC1042 header (8 bytes). */
* total), followed by an RFC1042 header (8 bytes).
*/
RX_MSDU_DECAP_8023_SNAP_LLC = 3
};
@ -867,7 +870,7 @@ struct rx_ppdu_start {
*
* reserved_9
* Reserved: HW should fill with 0, FW should ignore.
*/
*/
#define RX_PPDU_END_FLAGS_PHY_ERR (1 << 0)
#define RX_PPDU_END_FLAGS_RX_LOCATION (1 << 1)
@ -1207,7 +1210,7 @@ struct rx_ppdu_end {
* Every time HW sets this bit in memory FW/SW must clear this
* bit in memory. FW will initialize all the ppdu_done dword
* to 0.
*/
*/
#define FW_RX_DESC_INFO0_DISCARD (1 << 0)
#define FW_RX_DESC_INFO0_FORWARD (1 << 1)

View File

@ -56,6 +56,21 @@ static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len,
return max_exp;
}
static inline size_t ath10k_spectral_fix_bin_size(struct ath10k *ar,
size_t bin_len)
{
/* some chipsets reports bin size as 2^n bytes + 'm' bytes in
* report mode 2. First 2^n bytes carries inband tones and last
* 'm' bytes carries band edge detection data mainly used in
* radar detection purpose. Strip last 'm' bytes to make bin size
* as a valid one. 'm' can take possible values of 4, 12.
*/
if (!is_power_of_2(bin_len))
bin_len -= ar->hw_params.spectral_bin_discard;
return bin_len;
}
int ath10k_spectral_process_fft(struct ath10k *ar,
struct wmi_phyerr_ev_arg *phyerr,
const struct phyerr_fft_report *fftr,
@ -70,18 +85,11 @@ int ath10k_spectral_process_fft(struct ath10k *ar,
fft_sample = (struct fft_sample_ath10k *)&buf;
bin_len = ath10k_spectral_fix_bin_size(ar, bin_len);
if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS)
return -EINVAL;
/* qca99x0 reports bin size as 68 bytes (64 bytes + 4 bytes) in
* report mode 2. First 64 bytes carries inband tones (-32 to +31)
* and last 4 byte carries band edge detection data (+32) mainly
* used in radar detection purpose. Strip last 4 byte to make bin
* size is valid one.
*/
if (bin_len == 68)
bin_len -= 4;
reg0 = __le32_to_cpu(fftr->reg0);
reg1 = __le32_to_cpu(fftr->reg1);
@ -536,15 +544,15 @@ int ath10k_spectral_create(struct ath10k *ar)
1140, 2500,
&rfs_spec_scan_cb, NULL);
debugfs_create_file("spectral_scan_ctl",
S_IRUSR | S_IWUSR,
0600,
ar->debug.debugfs_phy, ar,
&fops_spec_scan_ctl);
debugfs_create_file("spectral_count",
S_IRUSR | S_IWUSR,
0600,
ar->debug.debugfs_phy, ar,
&fops_spectral_count);
debugfs_create_file("spectral_bins",
S_IRUSR | S_IWUSR,
0600,
ar->debug.debugfs_phy, ar,
&fops_spectral_bins);

View File

@ -268,13 +268,13 @@ struct host_interest {
#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
/*
Fw Mode/SubMode Mask
|-----------------------------------------------------------------------------|
| SUB | SUB | SUB | SUB | | | | |
|MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]|
| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) |
|-----------------------------------------------------------------------------|
*/
* Fw Mode/SubMode Mask
*-----------------------------------------------------------------------------
* SUB | SUB | SUB | SUB | | | |
*MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]
* (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2)
*-----------------------------------------------------------------------------
*/
#define HI_OPTION_FW_MODE_BITS 0x2
#define HI_OPTION_FW_MODE_MASK 0x3
#define HI_OPTION_FW_MODE_SHIFT 0xC
@ -428,8 +428,9 @@ Fw Mode/SubMode Mask
#define HI_PWR_SAVE_LPL_ENABLED 0x1
/*b1-b3 reserved*/
/*b4-b5 : dev0 LPL type : 0 - none
1- Reduce Pwr Search
2- Reduce Pwr Listen*/
* 1- Reduce Pwr Search
* 2- Reduce Pwr Listen
*/
/*b6-b7 : dev1 LPL type and so on for Max 8 devices*/
#define HI_PWR_SAVE_LPL_DEV0_LSB 4
#define HI_PWR_SAVE_LPL_DEV_MASK 0x3

View File

@ -124,7 +124,7 @@ void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature)
complete(&ar->thermal.wmi_sync);
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ath10k_thermal_show_temp,
static SENSOR_DEVICE_ATTR(temp1_input, 0444, ath10k_thermal_show_temp,
NULL, 0);
static struct attribute *ath10k_hwmon_attrs[] = {
@ -191,7 +191,8 @@ int ath10k_thermal_register(struct ath10k *ar)
return 0;
/* Avoid linking error on devm_hwmon_device_register_with_groups, I
* guess linux/hwmon.h is missing proper stubs. */
* guess linux/hwmon.h is missing proper stubs.
*/
if (!IS_REACHABLE(CONFIG_HWMON))
return 0;

View File

@ -34,7 +34,8 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
/* If the original wait_for_completion() timed out before
* {data,mgmt}_tx_completed() was called then we could complete
* offchan_tx_completed for a different skb. Prevent this by using
* offchan_tx_skb. */
* offchan_tx_skb.
*/
spin_lock_bh(&ar->data_lock);
if (ar->offchan_tx_skb != skb) {
ath10k_warn(ar, "completed old offchannel frame\n");

View File

@ -390,7 +390,8 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
return ret;
/* FIXME There's no ACK event for Management Tx. This probably
* shouldn't be called here either. */
* shouldn't be called here either.
*/
info->flags |= IEEE80211_TX_STAT_ACK;
ieee80211_tx_status_irqsafe(ar->hw, msdu);

View File

@ -3210,7 +3210,8 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
/* if next SWBA has no tim_changed the tim_bitmap is garbage.
* we must copy the bitmap upon change and reuse it later */
* we must copy the bitmap upon change and reuse it later
*/
if (__le32_to_cpu(tim_info->tim_changed)) {
int i;
@ -3529,7 +3530,8 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
* before telling mac80211 to decrement CSA counter
*
* Once CSA counter is completed stop sending beacons until
* actual channel switch is done */
* actual channel switch is done
*/
if (arvif->vif->csa_active &&
ieee80211_csa_is_complete(arvif->vif)) {
ieee80211_csa_finish(arvif->vif);
@ -3691,7 +3693,8 @@ radar_detected:
ATH10K_DFS_STAT_INC(ar, radar_detected);
/* Control radar events reporting in debugfs file
dfs_block_radar_events */
* dfs_block_radar_events
*/
if (ar->dfs_block_radar_events) {
ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
return;
@ -4769,9 +4772,10 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
num_units = ar->max_num_peers + 1;
} else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
/* number of units to allocate is number of
* peers, 1 extra for self peer on target */
/* this needs to be tied, host and target
* can get out of sync */
* peers, 1 extra for self peer on target
* this needs to be tied, host and target
* can get out of sync
*/
num_units = ar->max_num_peers + 1;
} else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
num_units = ar->max_num_vdevs + 1;

View File

@ -1038,7 +1038,8 @@ enum wmi_cmd_id {
WMI_STA_UAPSD_AUTO_TRIG_CMDID,
/* STA Keep alive parameter configuration,
Requires WMI_SERVICE_STA_KEEP_ALIVE */
* Requires WMI_SERVICE_STA_KEEP_ALIVE
*/
WMI_STA_KEEPALIVE_CMD,
/* misc command group */
@ -1774,7 +1775,8 @@ static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
break;
/* no default handler to allow compiler to check that the
* enum is fully handled */
* enum is fully handled
*/
};
return "<unknown>";
@ -2974,7 +2976,8 @@ struct wmi_start_scan_arg {
/* When set, DFS channels will not be scanned */
#define WMI_SCAN_BYPASS_DFS_CHN 0x40
/* Different FW scan engine may choose to bail out on errors.
* Allow the driver to have influence over that. */
* Allow the driver to have influence over that.
*/
#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
/* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */
@ -4447,14 +4450,16 @@ enum wmi_vdev_subtype_10_4 {
/* values for vdev_start_request flags */
/*
* Indicates that AP VDEV uses hidden ssid. only valid for
* AP/GO */
* AP/GO
*/
#define WMI_VDEV_START_HIDDEN_SSID (1 << 0)
/*
* Indicates if robust management frame/management frame
* protection is enabled. For GO/AP vdevs, it indicates that
* it may support station/client associations with RMF enabled.
* For STA/client vdevs, it indicates that sta will
* associate with AP with RMF enabled. */
* associate with AP with RMF enabled.
*/
#define WMI_VDEV_START_PMF_ENABLED (1 << 1)
struct wmi_p2p_noa_descriptor {
@ -4814,7 +4819,8 @@ enum wmi_vdev_param {
* An associated STA is considered unresponsive if there is no recent
* TX/RX activity and downlink frames are buffered for it. Once a STA
* exceeds the maximum unresponsive time, the AP will send a
* WMI_STA_KICKOUT event to the host so the STA can be deleted. */
* WMI_STA_KICKOUT event to the host so the STA can be deleted.
*/
WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
/* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
@ -4941,7 +4947,8 @@ enum wmi_10x_vdev_param {
* An associated STA is considered unresponsive if there is no recent
* TX/RX activity and downlink frames are buffered for it. Once a STA
* exceeds the maximum unresponsive time, the AP will send a
* WMI_10X_STA_KICKOUT event to the host so the STA can be deleted. */
* WMI_10X_STA_KICKOUT event to the host so the STA can be deleted.
*/
WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
/* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
@ -5605,12 +5612,14 @@ struct wmi_tim_info_arg {
struct wmi_p2p_noa_info {
/* Bit 0 - Flag to indicate an update in NOA schedule
Bits 7-1 - Reserved */
* Bits 7-1 - Reserved
*/
u8 changed;
/* NOA index */
u8 index;
/* Bit 0 - Opp PS state of the AP
Bits 1-7 - Ctwindow in TUs */
* Bits 1-7 - Ctwindow in TUs
*/
u8 ctwindow_oppps;
/* Number of NOA descriptors */
u8 num_descriptors;
@ -6000,7 +6009,8 @@ struct wmi_main_peer_assoc_complete_cmd {
struct wmi_common_peer_assoc_complete_cmd cmd;
/* HT Operation Element of the peer. Five bytes packed in 2
* INT32 array and filled from lsb to msb. */
* INT32 array and filled from lsb to msb.
*/
__le32 peer_ht_info[2];
} __packed;

View File

@ -63,6 +63,7 @@ int ath6kl_read_tgt_stats(struct ath6kl *ar, struct ath6kl_vif *vif);
#ifdef CONFIG_ATH6KL_DEBUG
__printf(2, 3)
void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...);
void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
const char *msg, const char *prefix,
@ -83,6 +84,7 @@ int ath6kl_debug_init_fs(struct ath6kl *ar);
void ath6kl_debug_cleanup(struct ath6kl *ar);
#else
__printf(2, 3)
static inline void ath6kl_dbg(enum ATH6K_DEBUG_MASK dbg_mask,
const char *fmt, ...)
{

View File

@ -995,7 +995,7 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
if (netlen < (payload_len + HTC_HDR_LENGTH)) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"HTC Rx: insufficient length, got:%d expected =%u\n",
"HTC Rx: insufficient length, got:%d expected =%zu\n",
netlen, payload_len + HTC_HDR_LENGTH);
status = -EINVAL;
goto free_skb;

View File

@ -1596,7 +1596,7 @@ static int ath6kl_wmi_txe_notify_event_rx(struct wmi *wmi, u8 *datap, int len,
rate = le32_to_cpu(ev->rate);
pkts = le32_to_cpu(ev->pkts);
ath6kl_dbg(ATH6KL_DBG_WMI, "TXE notify event: peer %pM rate %d% pkts %d intvl %ds\n",
ath6kl_dbg(ATH6KL_DBG_WMI, "TXE notify event: peer %pM rate %d%% pkts %d intvl %ds\n",
vif->bssid, rate, pkts, vif->txe_intvl);
cfg80211_cqm_txe_notify(vif->ndev, vif->bssid, pkts,

View File

@ -254,7 +254,9 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
continue;
if (h)
if (ah->nf_override)
nfval = ah->nf_override;
else if (h)
nfval = h[i].privNF;
else
nfval = default_nf;
@ -348,6 +350,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
return 0;
}
EXPORT_SYMBOL(ath9k_hw_loadnf);
static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)

View File

@ -1191,6 +1191,65 @@ static const struct file_operations fops_tpc = {
.llseek = default_llseek,
};
static ssize_t read_file_nf_override(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_hw *ah = sc->sc_ah;
char buf[32];
unsigned int len;
if (ah->nf_override == 0)
len = sprintf(buf, "off\n");
else
len = sprintf(buf, "%d\n", ah->nf_override);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_nf_override(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_hw *ah = sc->sc_ah;
long val;
char buf[32];
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (strncmp("off", buf, 3) == 0)
val = 0;
else if (kstrtol(buf, 0, &val))
return -EINVAL;
if (val > 0)
return -EINVAL;
if (val < -120)
return -EINVAL;
ah->nf_override = val;
if (ah->curchan)
ath9k_hw_loadnf(ah, ah->curchan);
return count;
}
static const struct file_operations fops_nf_override = {
.read = read_file_nf_override,
.write = write_file_nf_override,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
/* Ethtool support for get-stats */
#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
@ -1402,5 +1461,8 @@ int ath9k_init_debug(struct ath_hw *ah)
debugfs_create_u16("airtime_flags", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->airtime_flags);
debugfs_create_file("nf_override", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, sc, &fops_nf_override);
return 0;
}

View File

@ -112,7 +112,7 @@ void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
static bool ath9k_hw_nvram_read_array(u16 *blob, size_t blob_size,
off_t offset, u16 *data)
{
if (offset > blob_size)
if (offset >= blob_size)
return false;
*data = blob[offset];

View File

@ -106,7 +106,7 @@
#define AR9285_RDEXT_DEFAULT 0x1F
#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
#define FREQ2FBIN(x, y) (u8)((y) ? ((x) - 2300) : (((x) - 4800) / 5))
#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))

View File

@ -1220,6 +1220,9 @@ static int send_eject_command(struct usb_interface *interface)
u8 bulk_out_ep;
int r;
if (iface_desc->desc.bNumEndpoints < 2)
return -ENODEV;
/* Find bulk out endpoint */
for (r = 1; r >= 0; r--) {
endpoint = &iface_desc->endpoint[r].desc;

View File

@ -803,6 +803,7 @@ struct ath_hw {
u32 rfkill_gpio;
u32 rfkill_polarity;
u32 ah_flags;
s16 nf_override;
bool reset_power_on;
bool htc_reset_init;

View File

@ -383,6 +383,11 @@ static const struct pci_device_id ath_pci_id_table[] = {
0x10CF, /* Fujitsu */
0x1783),
.driver_data = ATH9K_PCI_WOW },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0034,
PCI_VENDOR_ID_DELL,
0x020B),
.driver_data = ATH9K_PCI_WOW },
/* Killer Wireless (2x2) */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,

View File

@ -254,8 +254,12 @@ bool ath_is_49ghz_allowed(u16 regdomain)
EXPORT_SYMBOL(ath_is_49ghz_allowed);
/* Frequency is one where radar detection is required */
static bool ath_is_radar_freq(u16 center_freq)
static bool ath_is_radar_freq(u16 center_freq,
struct ath_regulatory *reg)
{
if (reg->country_code == CTRY_INDIA)
return (center_freq >= 5500 && center_freq <= 5700);
return (center_freq >= 5260 && center_freq <= 5700);
}
@ -306,7 +310,7 @@ __ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator,
struct ieee80211_channel *ch)
{
if (ath_is_radar_freq(ch->center_freq) ||
if (ath_is_radar_freq(ch->center_freq, reg) ||
(ch->flags & IEEE80211_CHAN_RADAR))
return;
@ -395,8 +399,9 @@ ath_reg_apply_ir_flags(struct wiphy *wiphy,
}
}
/* Always apply Radar/DFS rules on freq range 5260 MHz - 5700 MHz */
static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
/* Always apply Radar/DFS rules on freq range 5500 MHz - 5700 MHz */
static void ath_reg_apply_radar_flags(struct wiphy *wiphy,
struct ath_regulatory *reg)
{
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
@ -409,7 +414,7 @@ static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
if (!ath_is_radar_freq(ch->center_freq))
if (!ath_is_radar_freq(ch->center_freq, reg))
continue;
/* We always enable radar detection/DFS on this
* frequency range. Additionally we also apply on
@ -506,7 +511,7 @@ void ath_reg_notifier_apply(struct wiphy *wiphy,
struct ath_common *common = container_of(reg, struct ath_common,
regulatory);
/* We always apply this */
ath_reg_apply_radar_flags(wiphy);
ath_reg_apply_radar_flags(wiphy, reg);
/*
* This would happen when we have sent a custom regulatory request
@ -654,7 +659,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
}
wiphy_apply_custom_regulatory(wiphy, regd);
ath_reg_apply_radar_flags(wiphy);
ath_reg_apply_radar_flags(wiphy, reg);
ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
return 0;
}

View File

@ -1559,12 +1559,6 @@ static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_ps_profile_type ps_profile;
int rc;
if (!test_bit(WMI_FW_CAPABILITY_PS_CONFIG, wil->fw_capabilities)) {
wil_err(wil, "set_power_mgmt not supported\n");
return -EOPNOTSUPP;
}
wil_dbg_misc(wil, "enabled=%d, timeout=%d\n",
enabled, timeout);
@ -1574,11 +1568,7 @@ static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy,
else
ps_profile = WMI_PS_PROFILE_TYPE_PS_DISABLED;
rc = wmi_ps_dev_profile_cfg(wil, ps_profile);
if (rc)
wil_err(wil, "wmi_ps_dev_profile_cfg failed (%d)\n", rc);
return rc;
return wil_ps_update(wil, ps_profile);
}
static const struct cfg80211_ops wil_cfg80211_ops = {

View File

@ -524,9 +524,8 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
wil_memcpy_fromio_halp_vote(wil_blob->wil, buf,
(const volatile void __iomem *)
wil_blob->blob.data + pos, count);
wil_memcpy_fromio_32(buf, (const void __iomem *)
wil_blob->blob.data + pos, count);
ret = copy_to_user(user_buf, buf, count);
kfree(buf);

View File

@ -554,5 +554,7 @@ bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name)
rc = request_firmware(&fw, name, wil_to_dev(wil));
if (!rc)
release_firmware(fw);
return rc != -ENOENT;
else
wil_dbg_fw(wil, "<%s> not available: %d\n", name, rc);
return !rc;
}

View File

@ -130,17 +130,15 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
u32 *d = dst;
const volatile u32 __iomem *s = src;
/* size_t is unsigned, if (count%4 != 0) it will wrap */
for (count += 4; count > 4; count -= 4)
for (; count >= 4; count -= 4)
*d++ = __raw_readl(s++);
}
void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
const volatile void __iomem *src, size_t count)
{
wil_halp_vote(wil);
wil_memcpy_fromio_32(dst, src, count);
wil_halp_unvote(wil);
if (unlikely(count)) {
/* count can be 1..3 */
u32 tmp = __raw_readl(s);
memcpy(d, &tmp, count);
}
}
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
@ -149,17 +147,16 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
volatile u32 __iomem *d = dst;
const u32 *s = src;
for (count += 4; count > 4; count -= 4)
for (; count >= 4; count -= 4)
__raw_writel(*s++, d++);
}
void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
volatile void __iomem *dst,
const void *src, size_t count)
{
wil_halp_vote(wil);
wil_memcpy_toio_32(dst, src, count);
wil_halp_unvote(wil);
if (unlikely(count)) {
/* count can be 1..3 */
u32 tmp = 0;
memcpy(&tmp, s, count);
__raw_writel(tmp, d);
}
}
static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
@ -576,6 +573,9 @@ int wil_priv_init(struct wil6210_priv *wil)
if (rx_ring_overflow_thrsh == WIL6210_RX_HIGH_TRSH_INIT)
rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_DEFAULT;
wil->ps_profile = WMI_PS_PROFILE_TYPE_DEFAULT;
return 0;
out_wmi_wq:
@ -903,6 +903,24 @@ void wil_abort_scan(struct wil6210_priv *wil, bool sync)
}
}
int wil_ps_update(struct wil6210_priv *wil, enum wmi_ps_profile_type ps_profile)
{
int rc;
if (!test_bit(WMI_FW_CAPABILITY_PS_CONFIG, wil->fw_capabilities)) {
wil_err(wil, "set_power_mgmt not supported\n");
return -EOPNOTSUPP;
}
rc = wmi_ps_dev_profile_cfg(wil, ps_profile);
if (rc)
wil_err(wil, "wmi_ps_dev_profile_cfg failed (%d)\n", rc);
else
wil->ps_profile = ps_profile;
return rc;
}
/*
* We reset all the structures, and we reset the UMAC.
* After calling this routine, you're expected to reload
@ -948,15 +966,15 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* Disable device led before reset*/
wmi_led_cfg(wil, false);
mutex_lock(&wil->p2p_wdev_mutex);
wil_abort_scan(wil, false);
mutex_unlock(&wil->p2p_wdev_mutex);
/* prevent NAPI from being scheduled and prevent wmi commands */
mutex_lock(&wil->wmi_mutex);
bitmap_zero(wil->status, wil_status_last);
mutex_unlock(&wil->wmi_mutex);
mutex_lock(&wil->p2p_wdev_mutex);
wil_abort_scan(wil, false);
mutex_unlock(&wil->p2p_wdev_mutex);
wil_mask_irq(wil);
wmi_event_flush(wil);
@ -1033,6 +1051,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
}
if (wil->ps_profile != WMI_PS_PROFILE_TYPE_DEFAULT)
wil_ps_update(wil, wil->ps_profile);
wil_collect_fw_info(wil);
if (wil->platform_ops.notify) {

View File

@ -71,6 +71,11 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
if (test_bit(wil_status_suspended, wil->status)) {
wil_dbg_pm(wil, "trying to suspend while suspended\n");
return 0;
}
/* if netif up, hardware is alive, shut it down */
if (ndev->flags & IFF_UP) {
rc = wil_down(wil);
@ -80,12 +85,24 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
}
}
if (wil->platform_ops.suspend)
/* Disable PCIe IRQ to prevent sporadic IRQs when PCIe is suspending */
wil_dbg_pm(wil, "Disabling PCIe IRQ before suspending\n");
wil_disable_irq(wil);
if (wil->platform_ops.suspend) {
rc = wil->platform_ops.suspend(wil->platform_handle);
if (rc) {
wil_enable_irq(wil);
goto out;
}
}
set_bit(wil_status_suspended, wil->status);
out:
wil_dbg_pm(wil, "suspend: %s => %d\n",
is_runtime ? "runtime" : "system", rc);
return rc;
}
@ -104,12 +121,18 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
}
}
wil_dbg_pm(wil, "Enabling PCIe IRQ\n");
wil_enable_irq(wil);
/* if netif up, bring hardware up
* During open(), IFF_UP set after actual device method
* invocation. This prevent recursive call to wil_up()
* invocation. This prevent recursive call to wil_up().
* wil_status_suspended will be cleared in wil_reset
*/
if (ndev->flags & IFF_UP)
rc = wil_up(wil);
else
clear_bit(wil_status_suspended, wil->status);
out:
wil_dbg_pm(wil, "resume: %s => %d\n",

View File

@ -200,7 +200,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
release_pmc_skbs:
wil_err(wil, "exit on error: Releasing skbs...\n");
for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) {
for (i = 0; i < num_descriptors && pmc->descriptors[i].va; i++) {
dma_free_coherent(dev,
descriptor_size,
pmc->descriptors[i].va,
@ -283,7 +283,7 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
int i;
for (i = 0;
pmc->descriptors[i].va && i < pmc->num_descriptors; i++) {
i < pmc->num_descriptors && pmc->descriptors[i].va; i++) {
dma_free_coherent(dev,
pmc->descriptor_size,
pmc->descriptors[i].va,

View File

@ -343,8 +343,16 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
wil_err(wil, "BACK requested unsupported ba_policy == 1\n");
status = WLAN_STATUS_INVALID_QOS_PARAM;
}
if (status == WLAN_STATUS_SUCCESS)
agg_wsize = wil_agg_size(wil, req_agg_wsize);
if (status == WLAN_STATUS_SUCCESS) {
if (req_agg_wsize == 0) {
wil_dbg_misc(wil, "Suggest BACK wsize %d\n",
WIL_MAX_AGG_WSIZE);
agg_wsize = WIL_MAX_AGG_WSIZE;
} else {
agg_wsize = min_t(u16,
WIL_MAX_AGG_WSIZE, req_agg_wsize);
}
}
rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status,
agg_amsdu, agg_wsize, agg_timeout);

View File

@ -37,6 +37,10 @@ bool rx_align_2;
module_param(rx_align_2, bool, 0444);
MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
bool rx_large_buf;
module_param(rx_large_buf, bool, 0444);
MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no");
static inline uint wil_rx_snaplen(void)
{
return rx_align_2 ? 6 : 0;
@ -255,7 +259,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
u32 i, int headroom)
{
struct device *dev = wil_to_dev(wil);
unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen();
unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
struct vring_rx_desc dd, *d = &dd;
volatile struct vring_rx_desc *_d = &vring->va[i].rx;
dma_addr_t pa;
@ -419,7 +423,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
struct sk_buff *skb;
dma_addr_t pa;
unsigned int snaplen = wil_rx_snaplen();
unsigned int sz = mtu_max + ETH_HLEN + snaplen;
unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen;
u16 dmalen;
u8 ftype;
int cid;
@ -780,6 +784,20 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
wil_rx_refill(wil, v->size);
}
static void wil_rx_buf_len_init(struct wil6210_priv *wil)
{
wil->rx_buf_len = rx_large_buf ?
WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
if (mtu_max > wil->rx_buf_len) {
/* do not allow RX buffers to be smaller than mtu_max, for
* backward compatibility (mtu_max parameter was also used
* to support receiving large packets)
*/
wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max);
wil->rx_buf_len = mtu_max;
}
}
int wil_rx_init(struct wil6210_priv *wil, u16 size)
{
struct vring *vring = &wil->vring_rx;
@ -792,6 +810,8 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
return -EINVAL;
}
wil_rx_buf_len_init(wil);
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)

View File

@ -32,6 +32,7 @@ extern unsigned short rx_ring_overflow_thrsh;
extern int agg_wsize;
extern u32 vring_idle_trsh;
extern bool rx_align_2;
extern bool rx_large_buf;
extern bool debug_fw;
extern bool disable_ap_sme;
@ -411,6 +412,7 @@ enum { /* for wil6210_priv.status */
wil_status_irqen, /* FIXME: interrupts enabled - for debug */
wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
wil_status_resetting, /* reset in progress */
wil_status_suspended, /* suspend completed, device is suspended */
wil_status_last /* keep last */
};
@ -656,6 +658,7 @@ struct wil6210_priv {
struct work_struct probe_client_worker;
/* DMA related */
struct vring vring_rx;
unsigned int rx_buf_len;
struct vring vring_tx[WIL6210_MAX_TX_RINGS];
struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
@ -691,6 +694,8 @@ struct wil6210_priv {
/* High Access Latency Policy voting */
struct wil_halp halp;
enum wmi_ps_profile_type ps_profile;
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
struct notifier_block pm_notify;
@ -799,12 +804,6 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
size_t count);
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
size_t count);
void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
const volatile void __iomem *src,
size_t count);
void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
volatile void __iomem *dst,
const void *src, size_t count);
void *wil_if_alloc(struct device *dev);
void wil_if_free(struct wil6210_priv *wil);
@ -812,6 +811,8 @@ int wil_if_add(struct wil6210_priv *wil);
void wil_if_remove(struct wil6210_priv *wil);
int wil_priv_init(struct wil6210_priv *wil);
void wil_priv_deinit(struct wil6210_priv *wil);
int wil_ps_update(struct wil6210_priv *wil,
enum wmi_ps_profile_type ps_profile);
int wil_reset(struct wil6210_priv *wil, bool no_fw);
void wil_fw_error_recovery(struct wil6210_priv *wil);
void wil_set_recovery_state(struct wil6210_priv *wil, int state);

View File

@ -518,16 +518,16 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
assoc_resp_ielen = 0;
}
mutex_lock(&wil->mutex);
if (test_bit(wil_status_resetting, wil->status) ||
!test_bit(wil_status_fwready, wil->status)) {
wil_err(wil, "status_resetting, cancel connect event, CID %d\n",
evt->cid);
mutex_unlock(&wil->mutex);
/* no need for cleanup, wil_reset will do that */
return;
}
mutex_lock(&wil->mutex);
if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
(wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
if (!test_bit(wil_status_fwconnecting, wil->status)) {
@ -631,6 +631,13 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
wil->sinfo_gen++;
if (test_bit(wil_status_resetting, wil->status) ||
!test_bit(wil_status_fwready, wil->status)) {
wil_err(wil, "status_resetting, cancel disconnect event\n");
/* no need for cleanup, wil_reset will do that */
return;
}
mutex_lock(&wil->mutex);
wil6210_disconnect(wil, evt->bssid, reason_code, true);
mutex_unlock(&wil->mutex);
@ -1398,7 +1405,8 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
struct wmi_cfg_rx_chain_cmd cmd = {
.action = WMI_RX_CHAIN_ADD,
.rx_sw_ring = {
.max_mpdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
.max_mpdu_size = cpu_to_le16(
wil_mtu2macbuf(wil->rx_buf_len)),
.ring_mem_base = cpu_to_le64(vring->pa),
.ring_size = cpu_to_le16(vring->size),
},

View File

@ -58,6 +58,7 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3,
WMI_FW_CAPABILITY_DISABLE_AP_SME = 4,
WMI_FW_CAPABILITY_WMI_ONLY = 5,
WMI_FW_CAPABILITY_THERMAL_THROTTLING = 7,
WMI_FW_CAPABILITY_MAX,
};
@ -142,8 +143,6 @@ enum wmi_command_id {
WMI_MAINTAIN_RESUME_CMDID = 0x851,
WMI_RS_MGMT_CMDID = 0x852,
WMI_RF_MGMT_CMDID = 0x853,
WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x854,
WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x855,
WMI_OTP_READ_CMDID = 0x856,
WMI_OTP_WRITE_CMDID = 0x857,
WMI_LED_CFG_CMDID = 0x858,
@ -192,6 +191,8 @@ enum wmi_command_id {
WMI_GET_MGMT_RETRY_LIMIT_CMDID = 0x931,
WMI_NEW_STA_CMDID = 0x935,
WMI_DEL_STA_CMDID = 0x936,
WMI_SET_THERMAL_THROTTLING_CFG_CMDID = 0x940,
WMI_GET_THERMAL_THROTTLING_CFG_CMDID = 0x941,
WMI_TOF_SESSION_START_CMDID = 0x991,
WMI_TOF_GET_CAPABILITIES_CMDID = 0x992,
WMI_TOF_SET_LCR_CMDID = 0x993,
@ -438,16 +439,6 @@ struct wmi_rf_mgmt_cmd {
__le32 rf_mgmt_type;
} __packed;
/* WMI_THERMAL_THROTTLING_CTRL_CMDID */
#define THERMAL_THROTTLING_USE_DEFAULT_MAX_TXOP_LENGTH (0xFFFFFFFF)
/* WMI_THERMAL_THROTTLING_CTRL_CMDID */
struct wmi_thermal_throttling_ctrl_cmd {
__le32 time_on_usec;
__le32 time_off_usec;
__le32 max_txop_length_usec;
} __packed;
/* WMI_RF_RX_TEST_CMDID */
struct wmi_rf_rx_test_cmd {
__le32 sector;
@ -549,7 +540,7 @@ struct wmi_pcp_start_cmd {
u8 hidden_ssid;
u8 is_go;
u8 reserved0[5];
/* abft_len override if non-0 */
/* A-BFT length override if non-0 */
u8 abft_len;
u8 disable_ap_sme;
u8 network_type;
@ -910,6 +901,39 @@ struct wmi_set_mgmt_retry_limit_cmd {
u8 reserved[3];
} __packed;
/* Zones: HIGH, MAX, CRITICAL */
#define WMI_NUM_OF_TT_ZONES (3)
struct wmi_tt_zone_limits {
/* Above this temperature this zone is active */
u8 temperature_high;
/* Below this temperature the adjacent lower zone is active */
u8 temperature_low;
u8 reserved[2];
} __packed;
/* Struct used for both configuration and status commands of thermal
* throttling
*/
struct wmi_tt_data {
/* Enable/Disable TT algorithm for baseband */
u8 bb_enabled;
u8 reserved0[3];
/* Define zones for baseband */
struct wmi_tt_zone_limits bb_zones[WMI_NUM_OF_TT_ZONES];
/* Enable/Disable TT algorithm for radio */
u8 rf_enabled;
u8 reserved1[3];
/* Define zones for all radio chips */
struct wmi_tt_zone_limits rf_zones[WMI_NUM_OF_TT_ZONES];
} __packed;
/* WMI_SET_THERMAL_THROTTLING_CFG_CMDID */
struct wmi_set_thermal_throttling_cfg_cmd {
/* Command data */
struct wmi_tt_data tt_data;
} __packed;
/* WMI_NEW_STA_CMDID */
struct wmi_new_sta_cmd {
u8 dst_mac[WMI_MAC_LEN];
@ -1040,7 +1064,6 @@ enum wmi_event_id {
WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
WMI_RS_MGMT_DONE_EVENTID = 0x1852,
WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
WMI_THERMAL_THROTTLING_STATUS_EVENTID = 0x1855,
WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
@ -1090,6 +1113,8 @@ enum wmi_event_id {
WMI_BRP_SET_ANT_LIMIT_EVENTID = 0x1924,
WMI_SET_MGMT_RETRY_LIMIT_EVENTID = 0x1930,
WMI_GET_MGMT_RETRY_LIMIT_EVENTID = 0x1931,
WMI_SET_THERMAL_THROTTLING_CFG_EVENTID = 0x1940,
WMI_GET_THERMAL_THROTTLING_CFG_EVENTID = 0x1941,
WMI_TOF_SESSION_END_EVENTID = 0x1991,
WMI_TOF_GET_CAPABILITIES_EVENTID = 0x1992,
WMI_TOF_SET_LCR_EVENTID = 0x1993,
@ -1133,13 +1158,6 @@ struct wmi_rf_mgmt_status_event {
__le32 rf_status;
} __packed;
/* WMI_THERMAL_THROTTLING_STATUS_EVENTID */
struct wmi_thermal_throttling_status_event {
__le32 time_on_usec;
__le32 time_off_usec;
__le32 max_txop_length_usec;
} __packed;
/* WMI_GET_STATUS_DONE_EVENTID */
struct wmi_get_status_done_event {
__le32 is_associated;
@ -2206,6 +2224,19 @@ struct wmi_tof_get_capabilities_event {
__le32 aoa_supported_types;
} __packed;
/* WMI_SET_THERMAL_THROTTLING_CFG_EVENTID */
struct wmi_set_thermal_throttling_cfg_event {
/* wmi_fw_status */
u8 status;
u8 reserved[3];
} __packed;
/* WMI_GET_THERMAL_THROTTLING_CFG_EVENTID */
struct wmi_get_thermal_throttling_cfg_event {
/* Status data */
struct wmi_tt_data tt_data;
} __packed;
enum wmi_tof_session_end_status {
WMI_TOF_SESSION_END_NO_ERROR = 0x00,
WMI_TOF_SESSION_END_FAIL = 0x01,

View File

@ -25,7 +25,6 @@ brcmfmac-objs += \
chip.o \
fwil.o \
fweh.o \
fwsignal.o \
p2p.o \
proto.o \
common.o \
@ -36,7 +35,8 @@ brcmfmac-objs += \
vendor.o \
pno.o
brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \
bcdc.o
bcdc.o \
fwsignal.o
brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \
commonring.o \
flowring.o \

View File

@ -103,9 +103,17 @@ struct brcmf_bcdc {
u8 bus_header[BUS_HEADER_LEN];
struct brcmf_proto_bcdc_dcmd msg;
unsigned char buf[BRCMF_DCMD_MAXLEN];
struct brcmf_fws_info *fws;
};
struct brcmf_fws_info *drvr_to_fws(struct brcmf_pub *drvr)
{
struct brcmf_bcdc *bcdc = drvr->proto->pd;
return bcdc->fws;
}
static int
brcmf_proto_bcdc_msg(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
uint len, bool set)
@ -330,8 +338,9 @@ static int brcmf_proto_bcdc_tx_queue_data(struct brcmf_pub *drvr, int ifidx,
struct sk_buff *skb)
{
struct brcmf_if *ifp = brcmf_get_ifp(drvr, ifidx);
struct brcmf_bcdc *bcdc = drvr->proto->pd;
if (!brcmf_fws_queue_skbs(drvr->fws))
if (!brcmf_fws_queue_skbs(bcdc->fws))
return brcmf_proto_txdata(drvr, ifidx, 0, skb);
return brcmf_fws_process_skb(ifp, skb);
@ -360,15 +369,15 @@ brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp,
bool success)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
struct brcmf_bcdc *bcdc = bus_if->drvr->proto->pd;
struct brcmf_if *ifp;
/* await txstatus signal for firmware if active */
if (brcmf_fws_fc_active(drvr->fws)) {
if (brcmf_fws_fc_active(bcdc->fws)) {
if (!success)
brcmf_fws_bustxfail(drvr->fws, txp);
brcmf_fws_bustxfail(bcdc->fws, txp);
} else {
if (brcmf_proto_bcdc_hdrpull(drvr, false, txp, &ifp))
if (brcmf_proto_bcdc_hdrpull(bus_if->drvr, false, txp, &ifp))
brcmu_pkt_buf_free_skb(txp);
else
brcmf_txfinalize(ifp, txp, success);
@ -420,7 +429,15 @@ brcmf_proto_bcdc_reset_if(struct brcmf_if *ifp)
static int
brcmf_proto_bcdc_init_done(struct brcmf_pub *drvr)
{
return brcmf_fws_init(drvr);
struct brcmf_bcdc *bcdc = drvr->proto->pd;
struct brcmf_fws_info *fws;
fws = brcmf_fws_attach(drvr);
if (IS_ERR(fws))
return PTR_ERR(fws);
bcdc->fws = fws;
return 0;
}
int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
@ -464,7 +481,9 @@ fail:
void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr)
{
brcmf_fws_deinit(drvr);
kfree(drvr->proto->pd);
struct brcmf_bcdc *bcdc = drvr->proto->pd;
drvr->proto->pd = NULL;
brcmf_fws_detach(bcdc->fws);
kfree(bcdc);
}

View File

@ -22,6 +22,7 @@ void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr);
void brcmf_proto_bcdc_txflowblock(struct device *dev, bool state);
void brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp,
bool success);
struct brcmf_fws_info *drvr_to_fws(struct brcmf_pub *drvr);
#else
static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; }
static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {}

View File

@ -3214,7 +3214,7 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req,
{
struct ieee80211_channel *chan;
enum nl80211_band band;
int freq;
int freq, i;
if (channel <= CH_MAX_2G_CHANNEL)
band = NL80211_BAND_2GHZ;
@ -3229,10 +3229,22 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req,
if (!chan)
return -EINVAL;
req->channels[req->n_channels++] = chan;
memcpy(req->ssids[req->n_ssids].ssid, ssid, ssid_len);
req->ssids[req->n_ssids++].ssid_len = ssid_len;
for (i = 0; i < req->n_channels; i++) {
if (req->channels[i] == chan)
break;
}
if (i == req->n_channels)
req->channels[req->n_channels++] = chan;
for (i = 0; i < req->n_ssids; i++) {
if (req->ssids[i].ssid_len == ssid_len &&
!memcmp(req->ssids[i].ssid, ssid, ssid_len))
break;
}
if (i == req->n_ssids) {
memcpy(req->ssids[req->n_ssids].ssid, ssid, ssid_len);
req->ssids[req->n_ssids++].ssid_len = ssid_len;
}
return 0;
}
@ -3298,6 +3310,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
struct brcmf_pno_scanresults_le *pfn_result;
u32 result_count;
u32 status;
u32 datalen;
brcmf_dbg(SCAN, "Enter\n");
@ -3324,6 +3337,14 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
brcmf_err("FALSE PNO Event. (pfn_count == 0)\n");
goto out_err;
}
netinfo_start = brcmf_get_netinfo_array(pfn_result);
datalen = e->datalen - ((void *)netinfo_start - (void *)pfn_result);
if (datalen < result_count * sizeof(*netinfo)) {
brcmf_err("insufficient event data\n");
goto out_err;
}
request = brcmf_alloc_internal_escan_request(wiphy,
result_count);
if (!request) {
@ -3331,17 +3352,11 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
goto out_err;
}
netinfo_start = brcmf_get_netinfo_array(pfn_result);
for (i = 0; i < result_count; i++) {
netinfo = &netinfo_start[i];
if (!netinfo) {
brcmf_err("Invalid netinfo ptr. index: %d\n",
i);
err = -EINVAL;
goto out_err;
}
if (netinfo->SSID_len > IEEE80211_MAX_SSID_LEN)
netinfo->SSID_len = IEEE80211_MAX_SSID_LEN;
brcmf_dbg(SCAN, "SSID:%.32s Channel:%d\n",
netinfo->SSID, netinfo->channel);
err = brcmf_internal_escan_add_info(request,

View File

@ -127,8 +127,6 @@ struct brcmf_pub {
struct brcmf_fweh_info fweh;
struct brcmf_fws_info *fws;
struct brcmf_ampdu_rx_reorder
*reorder_flows[BRCMF_AMPDU_RX_REORDER_MAXFLOWS];

View File

@ -36,6 +36,7 @@
#include "p2p.h"
#include "cfg80211.h"
#include "proto.h"
#include "bcdc.h"
#include "common.h"
/**
@ -1586,7 +1587,7 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
const struct brcmf_event_msg *e,
void *data)
{
struct brcmf_fws_info *fws = ifp->drvr->fws;
struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
int i;
u8 *credits = data;
@ -1617,7 +1618,7 @@ static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp,
const struct brcmf_event_msg *e,
void *data)
{
struct brcmf_fws_info *fws = ifp->drvr->fws;
struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
if (fws) {
brcmf_fws_lock(fws);
@ -1826,7 +1827,7 @@ netif_rx:
void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb)
{
struct brcmf_skb_reorder_data *rd;
struct brcmf_fws_info *fws = ifp->drvr->fws;
struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
u8 *signal_data;
s16 data_len;
u8 type;
@ -2091,8 +2092,7 @@ static int brcmf_fws_assign_htod(struct brcmf_fws_info *fws, struct sk_buff *p,
int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
{
struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_fws_info *fws = drvr->fws;
struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
struct ethhdr *eh = (struct ethhdr *)(skb->data);
int fifo = BRCMF_FWS_FIFO_BCMC;
@ -2142,10 +2142,10 @@ void brcmf_fws_reset_interface(struct brcmf_if *ifp)
void brcmf_fws_add_interface(struct brcmf_if *ifp)
{
struct brcmf_fws_info *fws = ifp->drvr->fws;
struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
struct brcmf_fws_mac_descriptor *entry;
if (!ifp->ndev)
if (!ifp->ndev || fws->fcmode == BRCMF_FWS_FCMODE_NONE)
return;
entry = &fws->desc.iface[ifp->ifidx];
@ -2160,16 +2160,17 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
void brcmf_fws_del_interface(struct brcmf_if *ifp)
{
struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc;
struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
if (!entry)
return;
brcmf_fws_lock(ifp->drvr->fws);
brcmf_fws_lock(fws);
ifp->fws_desc = NULL;
brcmf_dbg(TRACE, "deleting %s\n", entry->name);
brcmf_fws_macdesc_deinit(entry);
brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx);
brcmf_fws_unlock(ifp->drvr->fws);
brcmf_fws_cleanup(fws, ifp->ifidx);
brcmf_fws_unlock(fws);
}
static void brcmf_fws_dequeue_worker(struct work_struct *worker)
@ -2243,7 +2244,7 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
static int brcmf_debugfs_fws_stats_read(struct seq_file *seq, void *data)
{
struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
struct brcmf_fws_stats *fwstats = &bus_if->drvr->fws->stats;
struct brcmf_fws_stats *fwstats = &(drvr_to_fws(bus_if->drvr)->stats);
seq_printf(seq,
"header_pulls: %u\n"
@ -2308,7 +2309,7 @@ static int brcmf_debugfs_fws_stats_read(struct seq_file *seq, void *data)
}
#endif
int brcmf_fws_init(struct brcmf_pub *drvr)
struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr)
{
struct brcmf_fws_info *fws;
struct brcmf_if *ifp;
@ -2316,17 +2317,15 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
int rc;
u32 mode;
drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL);
if (!drvr->fws) {
fws = kzalloc(sizeof(*fws), GFP_KERNEL);
if (!fws) {
rc = -ENOMEM;
goto fail;
}
fws = drvr->fws;
spin_lock_init(&fws->spinlock);
/* set linkage back */
/* store drvr reference */
fws->drvr = drvr;
fws->fcmode = drvr->settings->fcmode;
@ -2334,7 +2333,7 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
(fws->fcmode == BRCMF_FWS_FCMODE_NONE)) {
fws->avoid_queueing = true;
brcmf_dbg(INFO, "FWS queueing will be avoided\n");
return 0;
return fws;
}
fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
@ -2396,6 +2395,7 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
brcmf_fws_hanger_init(&fws->hanger);
brcmf_fws_macdesc_init(&fws->desc.other, NULL, 0);
brcmf_fws_macdesc_set_name(fws, &fws->desc.other);
brcmf_dbg(INFO, "added %s\n", fws->desc.other.name);
brcmu_pktq_init(&fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT,
BRCMF_FWS_PSQ_LEN);
@ -2405,27 +2405,24 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n",
fws->fw_signals ? "enabled" : "disabled", tlv);
return 0;
return fws;
fail:
brcmf_fws_deinit(drvr);
return rc;
brcmf_fws_detach(fws);
return ERR_PTR(rc);
}
void brcmf_fws_deinit(struct brcmf_pub *drvr)
void brcmf_fws_detach(struct brcmf_fws_info *fws)
{
struct brcmf_fws_info *fws = drvr->fws;
if (!fws)
return;
if (drvr->fws->fws_wq)
destroy_workqueue(drvr->fws->fws_wq);
if (fws->fws_wq)
destroy_workqueue(fws->fws_wq);
/* cleanup */
brcmf_fws_lock(fws);
brcmf_fws_cleanup(fws, -1);
drvr->fws = NULL;
brcmf_fws_unlock(fws);
/* free top structure */
@ -2461,7 +2458,7 @@ void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked)
{
struct brcmf_fws_info *fws = drvr->fws;
struct brcmf_fws_info *fws = drvr_to_fws(drvr);
struct brcmf_if *ifp;
int i;

View File

@ -18,8 +18,8 @@
#ifndef FWSIGNAL_H_
#define FWSIGNAL_H_
int brcmf_fws_init(struct brcmf_pub *drvr);
void brcmf_fws_deinit(struct brcmf_pub *drvr);
struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr);
void brcmf_fws_detach(struct brcmf_fws_info *fws);
bool brcmf_fws_queue_skbs(struct brcmf_fws_info *fws);
bool brcmf_fws_fc_active(struct brcmf_fws_info *fws);
void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb);

View File

@ -540,7 +540,11 @@ static int qcount[NUMPRIO];
/* Limit on rounding up frames */
static const uint max_roundup = 512;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
#define ALIGNMENT 8
#else
#define ALIGNMENT 4
#endif
enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_NORMAL,

View File

@ -7,6 +7,7 @@ iwlwifi-objs += iwl-notif-wait.o
iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-objs += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o iwl-9000.o iwl-a000.o
iwlwifi-objs += iwl-trans.o

View File

@ -73,8 +73,8 @@
/* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 17
#define IWL7265_UCODE_API_MAX 17
#define IWL7265D_UCODE_API_MAX 28
#define IWL3168_UCODE_API_MAX 28
#define IWL7265D_UCODE_API_MAX 30
#define IWL3168_UCODE_API_MAX 30
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 17

View File

@ -70,8 +70,8 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL8000_UCODE_API_MAX 28
#define IWL8265_UCODE_API_MAX 28
#define IWL8000_UCODE_API_MAX 30
#define IWL8265_UCODE_API_MAX 30
/* Lowest firmware API version supported */
#define IWL8000_UCODE_API_MIN 17

View File

@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015-2016 Intel Deutschland GmbH
* Copyright(c) 2015-2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -18,7 +18,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2015-2016 Intel Deutschland GmbH
* Copyright(c) 2015-2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -55,10 +55,10 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL9000_UCODE_API_MAX 28
#define IWL9000_UCODE_API_MAX 30
/* Lowest firmware API version supported */
#define IWL9000_UCODE_API_MIN 17
#define IWL9000_UCODE_API_MIN 30
/* NVM versions */
#define IWL9000_NVM_VERSION 0x0a1d
@ -73,14 +73,14 @@
#define IWL9000_SMEM_LEN 0x68000
#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
#define IWL9260_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
#define IWL9000LC_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-"
#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
#define IWL9000_MODULE_FIRMWARE(api) \
IWL9000_FW_PRE "-" __stringify(api) ".ucode"
#define IWL9260_MODULE_FIRMWARE(api) \
IWL9260_FW_PRE "-" __stringify(api) ".ucode"
#define IWL9000LC_MODULE_FIRMWARE(api) \
IWL9000LC_FW_PRE "-" __stringify(api) ".ucode"
#define IWL9260A_MODULE_FIRMWARE(api) \
IWL9260A_FW_PRE "-" __stringify(api) ".ucode"
#define IWL9260B_MODULE_FIRMWARE(api) \
IWL9260B_FW_PRE "-" __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_9000 10
@ -148,7 +148,8 @@ static const struct iwl_tt_params iwl9000_tt_params = {
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
.fw_name_pre = IWL9260_FW_PRE,
.fw_name_pre = IWL9260A_FW_PRE,
.fw_name_pre_next_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@ -158,7 +159,8 @@ const struct iwl_cfg iwl9160_2ac_cfg = {
const struct iwl_cfg iwl9260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260",
.fw_name_pre = IWL9260_FW_PRE,
.fw_name_pre = IWL9260A_FW_PRE,
.fw_name_pre_next_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@ -168,7 +170,8 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
const struct iwl_cfg iwl9270_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9270",
.fw_name_pre = IWL9260_FW_PRE,
.fw_name_pre = IWL9260A_FW_PRE,
.fw_name_pre_next_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@ -198,21 +201,6 @@ const struct iwl_cfg iwl9560_2ac_cfg = {
.integrated = true,
};
/*
* TODO the struct below is for internal testing only this should be
* removed by EO 2016~
*/
const struct iwl_cfg iwl9000lc_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9000",
.fw_name_pre = IWL9000LC_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
};
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9000LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));

View File

@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015-2016 Intel Deutschland GmbH
* Copyright(c) 2015-2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -18,7 +18,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2015-2016 Intel Deutschland GmbH
* Copyright(c) 2015-2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -55,7 +55,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL_A000_UCODE_API_MAX 28
#define IWL_A000_UCODE_API_MAX 30
/* Lowest firmware API version supported */
#define IWL_A000_UCODE_API_MIN 24
@ -65,15 +65,16 @@
#define IWL_A000_TX_POWER_VERSION 0xffff /* meaningless */
/* Memory offsets and lengths */
#define IWL_A000_DCCM_OFFSET 0x800000
#define IWL_A000_DCCM_LEN 0x18000
#define IWL_A000_DCCM_OFFSET 0x800000 /* LMAC1 */
#define IWL_A000_DCCM_LEN 0x10000 /* LMAC1 */
#define IWL_A000_DCCM2_OFFSET 0x880000
#define IWL_A000_DCCM2_LEN 0x8000
#define IWL_A000_SMEM_OFFSET 0x400000
#define IWL_A000_SMEM_LEN 0x68000
#define IWL_A000_SMEM_LEN 0xD0000
#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
#define IWL_A000_HR_CDB_FW_PRE "iwlwifi-QuIcp-a0-hrcdb-a0-"
#define IWL_A000_HR_MODULE_FIRMWARE(api) \
IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
@ -121,7 +122,8 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
.use_tfh = true, \
.rf_id = true
.rf_id = true, \
.gen2 = true
const struct iwl_cfg iwla000_2ac_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AC a000",
@ -133,6 +135,17 @@ const struct iwl_cfg iwla000_2ac_cfg_hr = {
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwla000_2ac_cfg_hr_cdb = {
.name = "Intel(R) Dual Band Wireless AC a000",
.fw_name_pre = IWL_A000_HR_CDB_FW_PRE,
IWL_DEVICE_A000,
.ht_params = &iwl_a000_ht_params,
.nvm_ver = IWL_A000_NVM_VERSION,
.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.cdb = true,
};
const struct iwl_cfg iwla000_2ac_cfg_jf = {
.name = "Intel(R) Dual Band Wireless AC a000",
.fw_name_pre = IWL_A000_JF_FW_PRE,

View File

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2016 Intel Deutschland GmbH
* Copyright (C) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2016 Intel Deutschland GmbH
* Copyright (C) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -90,16 +90,6 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_8000,
};
static inline bool iwl_has_secure_boot(u32 hw_rev,
enum iwl_device_family family)
{
/* return 1 only for family 8000 B0 */
if ((family == IWL_DEVICE_FAMILY_8000) && (hw_rev & 0xC))
return true;
return false;
}
/*
* LED mode
* IWL_LED_DEFAULT: use device default
@ -283,6 +273,8 @@ struct iwl_pwr_tx_backoff {
* @fw_name_pre: Firmware filename prefix. The api version and extension
* (.ucode) will be added to filename before loading from disk. The
* filename is constructed as fw_name_pre<api>.ucode.
* @fw_name_pre_next_step: same as @fw_name_pre, only for next step
* (if supported)
* @ucode_api_max: Highest version of uCode API supported by driver.
* @ucode_api_min: Lowest version of uCode API supported by driver.
* @max_inst_size: The maximal length of the fw inst section
@ -321,6 +313,8 @@ struct iwl_pwr_tx_backoff {
* @vht_mu_mimo_supported: VHT MU-MIMO support
* @rf_id: need to read rf_id to determine the firmware image
* @integrated: discrete or integrated
* @gen2: a000 and on transport operation
* @cdb: CDB support
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@ -330,6 +324,7 @@ struct iwl_cfg {
/* params specific to an individual device within a device family */
const char *name;
const char *fw_name_pre;
const char *fw_name_pre_next_step;
/* params not likely to change within a device family */
const struct iwl_base_params *base_params;
/* params likely to change within a device family */
@ -365,7 +360,9 @@ struct iwl_cfg {
vht_mu_mimo_supported:1,
rf_id:1,
integrated:1,
use_tfh:1;
use_tfh:1,
gen2:1,
cdb:1;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
@ -449,13 +446,13 @@ extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
extern const struct iwl_cfg iwl9000lc_2ac_cfg;
extern const struct iwl_cfg iwl9160_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
extern const struct iwl_cfg iwla000_2ac_cfg_hr;
extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwla000_2ac_cfg_jf;
#endif /* CONFIG_IWLMVM */

View File

@ -0,0 +1,203 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_context_info_file_h__
#define __iwl_context_info_file_h__
/* maximmum number of DRAM map entries supported by FW */
#define IWL_MAX_DRAM_ENTRY 64
#define CSR_CTXT_INFO_BA 0x40
/**
* enum iwl_context_info_flags - Context information control flags
* @IWL_CTXT_INFO_AUTO_FUNC_INIT: If set, FW will not wait before interrupting
* the init done for driver command that configures several system modes
* @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug
* @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump
* @IWL_CTXT_INFO_RB_SIZE_4K: Use 4K RB size (the default is 2K)
* @IWL_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size
* exponent, the actual size is 2**value, valid sizes are 8-2048.
* The value is four bits long. Maximum valid exponent is 12
* @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the
* default is short format - not supported by the driver)
*/
enum iwl_context_info_flags {
IWL_CTXT_INFO_AUTO_FUNC_INIT = BIT(0),
IWL_CTXT_INFO_EARLY_DEBUG = BIT(1),
IWL_CTXT_INFO_ENABLE_CDMP = BIT(2),
IWL_CTXT_INFO_RB_SIZE_4K = BIT(3),
IWL_CTXT_INFO_RB_CB_SIZE_POS = 4,
IWL_CTXT_INFO_TFD_FORMAT_LONG = BIT(8),
};
/*
* struct iwl_context_info_version - version structure
* @mac_id: SKU and revision id
* @version: context information version id
* @size: the size of the context information in DWs
*/
struct iwl_context_info_version {
__le16 mac_id;
__le16 version;
__le16 size;
__le16 reserved;
} __packed;
/*
* struct iwl_context_info_control - version structure
* @control_flags: context information flags see &enum iwl_context_info_flags
*/
struct iwl_context_info_control {
__le32 control_flags;
__le32 reserved;
} __packed;
/*
* struct iwl_context_info_dram - images DRAM map
* each entry in the map represents a DRAM chunk of up to 32 KB
* @umac_img: UMAC image DRAM map
* @lmac_img: LMAC image DRAM map
* @virtual_img: paged image DRAM map
*/
struct iwl_context_info_dram {
__le64 umac_img[IWL_MAX_DRAM_ENTRY];
__le64 lmac_img[IWL_MAX_DRAM_ENTRY];
__le64 virtual_img[IWL_MAX_DRAM_ENTRY];
} __packed;
/*
* struct iwl_context_info_rbd_cfg - RBDs configuration
* @free_rbd_addr: default queue free RB CB base address
* @used_rbd_addr: default queue used RB CB base address
* @status_wr_ptr: default queue used RB status write pointer
*/
struct iwl_context_info_rbd_cfg {
__le64 free_rbd_addr;
__le64 used_rbd_addr;
__le64 status_wr_ptr;
} __packed;
/*
* struct iwl_context_info_hcmd_cfg - command queue configuration
* @cmd_queue_addr: address of command queue
* @cmd_queue_size: number of entries
*/
struct iwl_context_info_hcmd_cfg {
__le64 cmd_queue_addr;
u8 cmd_queue_size;
u8 reserved[7];
} __packed;
/*
* struct iwl_context_info_dump_cfg - Core Dump configuration
* @core_dump_addr: core dump (debug DRAM address) start address
* @core_dump_size: size, in DWs
*/
struct iwl_context_info_dump_cfg {
__le64 core_dump_addr;
__le32 core_dump_size;
__le32 reserved;
} __packed;
/*
* struct iwl_context_info_pnvm_cfg - platform NVM data configuration
* @platform_nvm_addr: Platform NVM data start address
* @platform_nvm_size: size in DWs
*/
struct iwl_context_info_pnvm_cfg {
__le64 platform_nvm_addr;
__le32 platform_nvm_size;
__le32 reserved;
} __packed;
/*
* struct iwl_context_info_early_dbg_cfg - early debug configuration for
* dumping DRAM addresses
* @early_debug_addr: early debug start address
* @early_debug_size: size in DWs
*/
struct iwl_context_info_early_dbg_cfg {
__le64 early_debug_addr;
__le32 early_debug_size;
__le32 reserved;
} __packed;
/*
* struct iwl_context_info - device INIT configuration
* @version: version information of context info and HW
* @control: control flags of FH configurations
* @rbd_cfg: default RX queue configuration
* @hcmd_cfg: command queue configuration
* @dump_cfg: core dump data
* @edbg_cfg: early debug configuration
* @pnvm_cfg: platform nvm configuration
* @dram: firmware image addresses in DRAM
*/
struct iwl_context_info {
struct iwl_context_info_version version;
struct iwl_context_info_control control;
__le64 reserved0;
struct iwl_context_info_rbd_cfg rbd_cfg;
struct iwl_context_info_hcmd_cfg hcmd_cfg;
__le32 reserved1[4];
struct iwl_context_info_dump_cfg dump_cfg;
struct iwl_context_info_early_dbg_cfg edbg_cfg;
struct iwl_context_info_pnvm_cfg pnvm_cfg;
__le32 reserved2[16];
struct iwl_context_info_dram dram;
__le32 reserved3[16];
} __packed;
int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw);
void iwl_pcie_ctxt_info_free(struct iwl_trans *trans);
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans);
#endif /* __iwl_context_info_file_h__ */

View File

@ -348,7 +348,6 @@ enum {
/* RF_ID value */
#define CSR_HW_RF_ID_TYPE_JF (0x00105000)
#define CSR_HW_RF_ID_TYPE_LC (0x00101000)
#define CSR_HW_RF_ID_TYPE_HR (0x00109000)
/* EEPROM REG */

View File

@ -7,7 +7,7 @@
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -34,7 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -211,24 +211,46 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw,
static int iwl_request_firmware(struct iwl_drv *drv, bool first)
{
const char *name_pre = drv->trans->cfg->fw_name_pre;
const struct iwl_cfg *cfg = drv->trans->cfg;
char tag[8];
const char *fw_pre_name;
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP)
fw_pre_name = cfg->fw_name_pre_next_step;
else
fw_pre_name = cfg->fw_name_pre;
if (first) {
drv->fw_index = drv->trans->cfg->ucode_api_max;
drv->fw_index = cfg->ucode_api_max;
sprintf(tag, "%d", drv->fw_index);
} else {
drv->fw_index--;
sprintf(tag, "%d", drv->fw_index);
}
if (drv->fw_index < drv->trans->cfg->ucode_api_min) {
if (drv->fw_index < cfg->ucode_api_min) {
IWL_ERR(drv, "no suitable firmware found!\n");
if (cfg->ucode_api_min == cfg->ucode_api_max) {
IWL_ERR(drv, "%s%d is required\n", fw_pre_name,
cfg->ucode_api_max);
} else {
IWL_ERR(drv, "minimum version required: %s%d\n",
fw_pre_name,
cfg->ucode_api_min);
IWL_ERR(drv, "maximum version supported: %s%d\n",
fw_pre_name,
cfg->ucode_api_max);
}
IWL_ERR(drv,
"check git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git\n");
return -ENOENT;
}
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
name_pre, tag);
fw_pre_name, tag);
IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n",
drv->firmware_name);

View File

@ -614,6 +614,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define RX_POOL_SIZE (MQ_RX_NUM_RBDS + \
IWL_MAX_RX_HW_QUEUES * \
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC))
/* cb size is the exponent */
#define RX_QUEUE_CB_SIZE(x) ilog2(x)
#define RX_QUEUE_SIZE 256
#define RX_QUEUE_MASK 255
@ -639,6 +641,8 @@ struct iwl_rb_status {
#define TFD_QUEUE_SIZE_MAX (256)
/* cb size is the exponent - 3 */
#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
@ -647,7 +651,7 @@ struct iwl_rb_status {
static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
{
return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF;
}
/**
* struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor

View File

@ -241,6 +241,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* iteration complete notification, and the timestamp reported for RX
* received during scan, are reported in TSF of the mac specified in the
* scan request.
* @IWL_UCODE_TLV_API_TKIP_MIC_KEYS: This ucode supports version 2 of
* ADD_MODIFY_STA_KEY_API_S_VER_2.
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@ -250,6 +252,7 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28,
IWL_UCODE_TLV_API_TKIP_MIC_KEYS = (__force iwl_ucode_tlv_api_t)29,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
@ -344,6 +347,8 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38,
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39,
IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40,
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,

View File

@ -54,8 +54,8 @@ IWL_EXPORT_SYMBOL(iwl_write32);
void iwl_write64(struct iwl_trans *trans, u64 ofs, u64 val)
{
trace_iwlwifi_dev_iowrite64(trans->dev, ofs, val);
iwl_trans_write32(trans, ofs, val & 0xffffffff);
iwl_trans_write32(trans, ofs + 4, val >> 32);
iwl_trans_write32(trans, ofs, lower_32_bits(val));
iwl_trans_write32(trans, ofs + 4, upper_32_bits(val));
}
IWL_EXPORT_SYMBOL(iwl_write64);

View File

@ -76,8 +76,8 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
}
IWL_EXPORT_SYMBOL(iwl_notification_wait_init);
void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt)
bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt)
{
bool triggered = false;
@ -118,13 +118,11 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
}
}
spin_unlock(&notif_wait->notif_wait_lock);
}
if (triggered)
wake_up_all(&notif_wait->notif_waitq);
return triggered;
}
IWL_EXPORT_SYMBOL(iwl_notification_wait_notify);
IWL_EXPORT_SYMBOL(iwl_notification_wait);
void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
{

View File

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -89,10 +90,10 @@ struct iwl_notif_wait_data {
*
* This structure is not used directly, to wait for a
* notification declare it on the stack, and call
* iwlagn_init_notification_wait() with appropriate
* iwl_init_notification_wait() with appropriate
* parameters. Then do whatever will cause the ucode
* to notify the driver, and to wait for that then
* call iwlagn_wait_notification().
* call iwl_wait_notification().
*
* Each notification is one-shot. If at some point we
* need to support multi-shot notifications (which
@ -114,10 +115,24 @@ struct iwl_notification_wait {
/* caller functions */
void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data);
void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
struct iwl_rx_packet *pkt);
bool iwl_notification_wait(struct iwl_notif_wait_data *notif_data,
struct iwl_rx_packet *pkt);
void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
static inline void
iwl_notification_notify(struct iwl_notif_wait_data *notif_data)
{
wake_up_all(&notif_data->notif_waitq);
}
static inline void
iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
struct iwl_rx_packet *pkt)
{
if (iwl_notification_wait(notif_data, pkt))
iwl_notification_notify(notif_data);
}
/* user functions */
void __acquires(wait_entry)
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,

View File

@ -294,9 +294,6 @@
/*********************** END TX SCHEDULER *************************************/
/* tcp checksum offload */
#define RX_EN_CSUM (0x00a00d88)
/* Oscillator clock */
#define OSC_CLK (0xa04068)
#define OSC_CLK_FORCE_CONTROL (0x8)
@ -309,6 +306,7 @@
* Note this address is cleared after MAC reset.
*/
#define UREG_UCODE_LOAD_STATUS (0xa05c40)
#define UREG_CPU_INIT_RUN (0xa05c44)
#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78)
#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C)
@ -316,6 +314,8 @@
#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
#define LMAC2_PRPH_OFFSET (0x100000)
/* Rx FIFO */
#define RXF_SIZE_ADDR (0xa00c88)
#define RXF_RD_D_SPACE (0xa00c40)
@ -378,6 +378,7 @@
#define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078
#define RFIC_REG_RD 0xAD0470
#define WFPM_CTRL_REG 0xA03030
#define WFPM_GP2 0xA030B4
enum {
ENABLE_WFPM = BIT(31),
WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000,
@ -398,6 +399,8 @@ enum aux_misc_master1_en {
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
#define SB_CPU_1_STATUS 0xA01E30
#define SB_CPU_2_STATUS 0xA01E34
#define UMAG_SB_CPU_1_STATUS 0xA038C0
#define UMAG_SB_CPU_2_STATUS 0xA038C4
/* FW chicken bits */
#define LMPM_CHICK 0xA01FF8

View File

@ -70,8 +70,7 @@
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_cfg *cfg,
const struct iwl_trans_ops *ops,
size_t dev_cmd_headroom)
const struct iwl_trans_ops *ops)
{
struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
@ -90,15 +89,13 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
trans->dev = dev;
trans->cfg = cfg;
trans->ops = ops;
trans->dev_cmd_headroom = dev_cmd_headroom;
trans->num_rx_queues = 1;
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
"iwl_cmd_pool:%s", dev_name(trans->dev));
trans->dev_cmd_pool =
kmem_cache_create(trans->dev_cmd_pool_name,
sizeof(struct iwl_device_cmd)
+ trans->dev_cmd_headroom,
sizeof(struct iwl_device_cmd),
sizeof(void *),
SLAB_HWCACHE_ALIGN,
NULL);

View File

@ -7,7 +7,7 @@
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -34,7 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -397,6 +397,7 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
*/
#define IWL_MAX_HW_QUEUES 32
#define IWL_MAX_TID_COUNT 8
#define IWL_MGMT_TID 15
#define IWL_FRAME_LIMIT 64
#define IWL_MAX_RX_HW_QUEUES 16
@ -530,6 +531,44 @@ struct iwl_trans_txq_scd_cfg {
int frame_limit;
};
/* Available options for &struct iwl_tx_queue_cfg_cmd */
enum iwl_tx_queue_cfg_actions {
TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0),
TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
};
/**
* struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
* @sta_id: station id
* @tid: tid of the queue
* @flags: Bit 0 - on enable, off - disable, Bit 1 - short TFD format
* @cb_size: size of TFD cyclic buffer. Value is exponent - 3.
* Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs)
* @byte_cnt_addr: address of byte count table
* @tfdq_addr: address of TFD circular buffer
*/
struct iwl_tx_queue_cfg_cmd {
u8 sta_id;
u8 tid;
__le16 flags;
__le32 cb_size;
__le64 byte_cnt_addr;
__le64 tfdq_addr;
} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
/**
* struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config
* @queue_number: queue number assigned to this RA -TID
* @flags: set on failure
* @write_pointer: initial value for write pointer
*/
struct iwl_tx_queue_cfg_rsp {
__le16 queue_number;
__le16 flags;
__le16 write_pointer;
__le16 reserved;
} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
/**
* struct iwl_trans_ops - transport specific operations
*
@ -640,12 +679,16 @@ struct iwl_trans_ops {
unsigned int queue_wdg_timeout);
void (*txq_disable)(struct iwl_trans *trans, int queue,
bool configure_scd);
/* a000 functions */
int (*txq_alloc)(struct iwl_trans *trans,
struct iwl_tx_queue_cfg_cmd *cmd,
int cmd_id,
unsigned int queue_wdg_timeout);
void (*txq_free)(struct iwl_trans *trans, int queue);
void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
bool shared);
dma_addr_t (*get_txq_byte_table)(struct iwl_trans *trans, int txq_id);
int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
bool freeze);
@ -774,9 +817,6 @@ enum iwl_plat_pm_mode {
* the transport must set this before calling iwl_drv_start()
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
* The user should use iwl_trans_{alloc,free}_tx_cmd.
* @dev_cmd_headroom: room needed for the transport's private use before the
* device_cmd for Tx - for internal use only
* The user should use iwl_trans_{alloc,free}_tx_cmd.
* @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
* starting the firmware, used for tracing
* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
@ -827,7 +867,6 @@ struct iwl_trans {
/* The following fields are internal only */
struct kmem_cache *dev_cmd_pool;
size_t dev_cmd_headroom;
char dev_cmd_pool_name[50];
struct dentry *dbgfs_dir;
@ -1000,13 +1039,13 @@ iwl_trans_dump_data(struct iwl_trans *trans,
static inline struct iwl_device_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
{
u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
struct iwl_device_cmd *dev_cmd_ptr =
kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
if (unlikely(dev_cmd_ptr == NULL))
return NULL;
return (struct iwl_device_cmd *)
(dev_cmd_ptr + trans->dev_cmd_headroom);
return dev_cmd_ptr;
}
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
@ -1014,9 +1053,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
struct iwl_device_cmd *dev_cmd)
{
u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;
kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
}
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
@ -1065,6 +1102,34 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
}
static inline void
iwl_trans_txq_free(struct iwl_trans *trans, int queue)
{
if (WARN_ON_ONCE(!trans->ops->txq_free))
return;
trans->ops->txq_free(trans, queue);
}
static inline int
iwl_trans_txq_alloc(struct iwl_trans *trans,
struct iwl_tx_queue_cfg_cmd *cmd,
int cmd_id,
unsigned int queue_wdg_timeout)
{
might_sleep();
if (WARN_ON_ONCE(!trans->ops->txq_alloc))
return -ENOTSUPP;
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return -EIO;
}
return trans->ops->txq_alloc(trans, cmd, cmd_id, queue_wdg_timeout);
}
static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
int queue, bool shared_mode)
{
@ -1072,15 +1137,6 @@ static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
}
static inline dma_addr_t iwl_trans_get_txq_byte_table(struct iwl_trans *trans,
int queue)
{
/* we should never be called if the trans doesn't support it */
BUG_ON(!trans->ops->get_txq_byte_table);
return trans->ops->get_txq_byte_table(trans, queue);
}
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
int fifo, int sta_id, int tid,
int frame_limit, u16 ssn,
@ -1248,8 +1304,7 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_cfg *cfg,
const struct iwl_trans_ops *ops,
size_t dev_cmd_headroom);
const struct iwl_trans_ops *ops);
void iwl_trans_free(struct iwl_trans *trans);
/*****************************************************

View File

@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -31,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -82,6 +84,19 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt;
int i, ret;
u32 status;
int size;
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
size = sizeof(cmd);
if (phyctxt->channel->band == NL80211_BAND_2GHZ ||
!iwl_mvm_is_cdb_supported(mvm))
cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
else
cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
} else {
size = IWL_BINDING_CMD_SIZE_V1;
}
memset(&cmd, 0, sizeof(cmd));
@ -99,7 +114,7 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
status = 0;
ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
sizeof(cmd), &cmd, &status);
size, &cmd, &status);
if (ret) {
IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n",
action, ret);

View File

@ -756,7 +756,7 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* Rssi update while not associated - can happen since the statistics
* are handled asynchronously
*/
if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA)
return;
/* No BT - reports should be disabled */

View File

@ -665,6 +665,19 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_binding_cmd binding_cmd = {};
struct iwl_time_quota_cmd quota_cmd = {};
u32 status;
int size;
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
size = sizeof(binding_cmd);
if (mvmvif->phy_ctxt->channel->band == NL80211_BAND_2GHZ ||
!iwl_mvm_is_cdb_supported(mvm))
binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
else
binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
} else {
size = IWL_BINDING_CMD_SIZE_V1;
}
/* add back the PHY */
if (WARN_ON(!mvmvif->phy_ctxt))
@ -711,8 +724,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
status = 0;
ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
sizeof(binding_cmd), &binding_cmd,
&status);
size, &binding_cmd, &status);
if (ret) {
IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
return ret;
@ -986,7 +998,9 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
goto out;
}
if (key_data.use_tkip) {
if (key_data.use_tkip &&
!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
ret = iwl_mvm_send_cmd_pdu(mvm,
WOWLAN_TKIP_PARAM,
cmd_flags, sizeof(tkip_cmd),
@ -1194,7 +1208,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) {
if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) {
/* if we're not associated, this must be netdetect */
if (!wowlan->nd_config) {
ret = 1;
@ -2102,6 +2116,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
*/
iwl_mvm_update_changed_regdom(mvm);
if (!unified_image)
/* Re-configure default SAR profile */
iwl_mvm_sar_select_profile(mvm, 1, 1);
if (mvm->net_detect) {
/* If this is a non-unified image, we restart the FW,
* so no need to stop the netdetect scan. If that

View File

@ -280,7 +280,7 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
mvmvif->queue_params[i].uapsd);
if (vif->type == NL80211_IFTYPE_STATION &&
ap_sta_id != IWL_MVM_STATION_COUNT) {
ap_sta_id != IWL_MVM_INVALID_STA) {
struct iwl_mvm_sta *mvm_sta;
mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);

View File

@ -330,7 +330,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex);
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i);
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));

View File

@ -73,7 +73,9 @@
#define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1)
#define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2)
#define IWL_MVM_STATION_COUNT 16
#define IWL_MVM_STATION_COUNT 16
#define IWL_MVM_INVALID_STA 0xFF
#define IWL_MVM_TDLS_STA_COUNT 4
enum iwl_ac {

View File

@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -351,6 +351,45 @@ struct iwl_dev_tx_power_cmd {
u8 reserved[3];
} __packed; /* TX_REDUCED_POWER_API_S_VER_4 */
#define IWL_NUM_GEO_PROFILES 3
/**
* enum iwl_geo_per_chain_offset_operation - type of operation
* @IWL_PER_CHAIN_OFFSET_SET_TABLES: send the tables from the host to the FW.
* @IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE: retrieve the last configured table.
*/
enum iwl_geo_per_chain_offset_operation {
IWL_PER_CHAIN_OFFSET_SET_TABLES,
IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE,
}; /* GEO_TX_POWER_LIMIT FLAGS TYPE */
/**
* struct iwl_per_chain_offset - embedded struct for GEO_TX_POWER_LIMIT.
* @max_tx_power: maximum allowed tx power.
* @chain_a: tx power offset for chain a.
* @chain_b: tx power offset for chain b.
*/
struct iwl_per_chain_offset {
__le16 max_tx_power;
u8 chain_a;
u8 chain_b;
} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */
struct iwl_per_chain_offset_group {
struct iwl_per_chain_offset lb;
struct iwl_per_chain_offset hb;
} __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */
/**
* struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
*/
struct iwl_geo_tx_power_profiles_cmd {
__le32 ops;
struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
} __packed; /* GEO_TX_POWER_LIMIT */
/**
* struct iwl_beacon_filter_cmd
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command)

View File

@ -516,7 +516,7 @@ struct iwl_scan_dwell {
* scan_config_channel_flag
* @channel_array: default supported channels
*/
struct iwl_scan_config {
struct iwl_scan_config_v1 {
__le32 flags;
__le32 tx_chains;
__le32 rx_chains;
@ -532,7 +532,7 @@ struct iwl_scan_config {
#define SCAN_TWO_LMACS 2
struct iwl_scan_config_cdb {
struct iwl_scan_config {
__le32 flags;
__le32 tx_chains;
__le32 rx_chains;
@ -669,7 +669,7 @@ struct iwl_scan_req_umac {
u8 n_channels;
__le16 reserved;
u8 data[];
} no_cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
} v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
struct {
__le32 max_out_time[SCAN_TWO_LMACS];
__le32 suspend_time[SCAN_TWO_LMACS];
@ -679,13 +679,13 @@ struct iwl_scan_req_umac {
u8 n_channels;
__le16 reserved;
u8 data[];
} cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_5 */
} v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
};
} __packed;
#define IWL_SCAN_REQ_UMAC_SIZE_CDB sizeof(struct iwl_scan_req_umac)
#define IWL_SCAN_REQ_UMAC_SIZE (sizeof(struct iwl_scan_req_umac) - \
2 * sizeof(__le32))
#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac)
#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \
2 * sizeof(__le32))
/**
* struct iwl_umac_scan_abort

View File

@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -179,7 +179,7 @@ enum iwl_sta_key_flag {
* enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
* @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue
* @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
* @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_trigger_acs
* @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_acs
* @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
* @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
* @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
@ -351,10 +351,12 @@ struct iwl_mvm_add_sta_cmd_v7 {
* @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
* mac-addr.
* @beamform_flags: beam forming controls
* @tfd_queue_msk: tfd queues used by this station
* @tfd_queue_msk: tfd queues used by this station.
* Obselete for new TX API (9 and above).
* @rx_ba_window: aggregation window size
* @scd_queue_bank: queue bank in used. Each bank contains 32 queues. 0 means
* that the queues used by this station are in the first 32.
* @sp_length: the size of the SP as it appears in the WME IE
* @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver
* enabled ACs.
*
* The device contains an internal table of per-station information, with info
* on security keys, aggregation parameters, and Tx rates for initial Tx
@ -384,32 +386,54 @@ struct iwl_mvm_add_sta_cmd {
__le16 beamform_flags;
__le32 tfd_queue_msk;
__le16 rx_ba_window;
u8 scd_queue_bank;
u8 uapsd_trigger_acs;
} __packed; /* ADD_STA_CMD_API_S_VER_8 */
u8 sp_length;
u8 uapsd_acs;
} __packed; /* ADD_STA_CMD_API_S_VER_9 */
/**
* struct iwl_mvm_add_sta_key_cmd - add/modify sta key
* struct iwl_mvm_add_sta_key_common - add/modify sta key common part
* ( REPLY_ADD_STA_KEY = 0x17 )
* @sta_id: index of station in uCode's station table
* @key_offset: key offset in key storage
* @key_flags: type %iwl_sta_key_flag
* @key: key material data
* @rx_secur_seq_cnt: RX security sequence counter for the key
* @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
* @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
*/
struct iwl_mvm_add_sta_key_cmd {
struct iwl_mvm_add_sta_key_common {
u8 sta_id;
u8 key_offset;
__le16 key_flags;
u8 key[32];
u8 rx_secur_seq_cnt[16];
} __packed;
/**
* struct iwl_mvm_add_sta_key_cmd_v1 - add/modify sta key
* @common: see &struct iwl_mvm_add_sta_key_common
* @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
* @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
*/
struct iwl_mvm_add_sta_key_cmd_v1 {
struct iwl_mvm_add_sta_key_common common;
u8 tkip_rx_tsc_byte2;
u8 reserved;
__le16 tkip_rx_ttak[5];
} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */
/**
* struct iwl_mvm_add_sta_key_cmd - add/modify sta key
* @common: see &struct iwl_mvm_add_sta_key_common
* @rx_mic_key: TKIP RX unicast or multicast key
* @tx_mic_key: TKIP TX key
* @transmit_seq_cnt: TSC, transmit packet number
*/
struct iwl_mvm_add_sta_key_cmd {
struct iwl_mvm_add_sta_key_common common;
__le64 rx_mic_key;
__le64 tx_mic_key;
__le64 transmit_seq_cnt;
} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2 */
/**
* enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
* @ADD_STA_SUCCESS: operation was executed successfully

View File

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -123,6 +124,20 @@ enum iwl_tx_flags {
TX_CMD_FLG_HCCA_CHUNK = BIT(31)
}; /* TX_FLAGS_BITS_API_S_VER_1 */
/**
* enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for a000
* @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command
* @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs
* to a secured STA
* @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate
* selection, retry limits and BT kill
*/
enum iwl_tx_cmd_flags {
IWL_TX_FLAGS_CMD_RATE = BIT(0),
IWL_TX_FLAGS_ENCRYPT_DIS = BIT(1),
IWL_TX_FLAGS_HIGH_PRI = BIT(2),
}; /* TX_FLAGS_BITS_API_S_VER_3 */
/**
* enum iwl_tx_pm_timeouts - pm timeout values in TX command
* @PM_FRAME_NONE: no need to suspend sleep mode
@ -159,7 +174,7 @@ enum iwl_tx_cmd_sec_ctrl {
TX_CMD_SEC_EXT = 0x04,
TX_CMD_SEC_GCMP = 0x05,
TX_CMD_SEC_KEY128 = 0x08,
TX_CMD_SEC_KEY_FROM_TABLE = 0x08,
TX_CMD_SEC_KEY_FROM_TABLE = 0x10,
};
/* TODO: how does these values are OK with only 16 bit variable??? */
@ -301,6 +316,31 @@ struct iwl_tx_cmd {
struct ieee80211_hdr hdr[0];
} __packed; /* TX_CMD_API_S_VER_6 */
struct iwl_dram_sec_info {
__le32 pn_low;
__le16 pn_high;
__le16 aux_info;
} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
/**
* struct iwl_tx_cmd_gen2 - TX command struct to FW for a000 devices
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
* @offload_assist: TX offload configuration
* @tx_flags: combination of &iwl_tx_cmd_flags
* @dram_info: FW internal DRAM storage
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
* cleared. Combination of RATE_MCS_*
*/
struct iwl_tx_cmd_gen2 {
__le16 len;
__le16 offload_assist;
__le32 flags;
struct iwl_dram_sec_info dram_info;
__le32 rate_n_flags;
struct ieee80211_hdr hdr[0];
} __packed; /* TX_CMD_API_S_VER_7 */
/*
* TX response related data
*/
@ -508,9 +548,11 @@ struct agg_tx_status {
* @tlc_info: TLC rate info
* @ra_tid: bits [3:0] = ra, bits [7:4] = tid
* @frame_ctrl: frame control
* @tx_queue: TX queue for this response
* @status: for non-agg: frame status TX_STATUS_*
* for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields
* follow this one, up to frame_count.
* For version 6 TX response isn't received for aggregation at all.
*
* After the array of statuses comes the SSN of the SCD. Look at
* %iwl_mvm_get_scd_ssn for more details.
@ -537,9 +579,17 @@ struct iwl_mvm_tx_resp {
u8 tlc_info;
u8 ra_tid;
__le16 frame_ctrl;
struct agg_tx_status status;
} __packed; /* TX_RSP_API_S_VER_3 */
union {
struct {
struct agg_tx_status status;
} v3;/* TX_RSP_API_S_VER_3 */
struct {
__le16 tx_queue;
__le16 reserved2;
struct agg_tx_status status;
} v6;
};
} __packed; /* TX_RSP_API_S_VER_6 */
/**
* struct iwl_mvm_ba_notif - notifies about reception of BA
@ -579,11 +629,14 @@ struct iwl_mvm_ba_notif {
* struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue
* @q_num: TFD queue number
* @tfd_index: Index of first un-acked frame in the TFD queue
* @scd_queue: For debug only - the physical queue the TFD queue is bound to
*/
struct iwl_mvm_compressed_ba_tfd {
u8 q_num;
u8 reserved;
__le16 q_num;
__le16 tfd_index;
u8 scd_queue;
u8 reserved;
__le16 reserved2;
} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
/**
@ -635,6 +688,10 @@ enum iwl_mvm_ba_resp_flags {
* @tx_rate: the rate the aggregation was sent at
* @tfd_cnt: number of TFD-Q elements
* @ra_tid_cnt: number of RATID-Q elements
* @ba_tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd
* for details.
* @ra_tid: array of RA-TID queue status updates. For debug purposes only. See
* &iwl_mvm_compressed_ba_ratid for more details.
*/
struct iwl_mvm_compressed_ba_notif {
__le32 flags;
@ -646,6 +703,7 @@ struct iwl_mvm_compressed_ba_notif {
__le16 query_frame_cnt;
__le16 txed;
__le16 done;
__le16 reserved;
__le32 wireless_time;
__le32 tx_rate;
__le16 tfd_cnt;
@ -754,25 +812,6 @@ struct iwl_tx_path_flush_cmd {
__le16 reserved;
} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */
/**
* iwl_mvm_get_scd_ssn - returns the SSN of the SCD
* @tx_resp: the Tx response from the fw (agg or non-agg)
*
* When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
* it can't know that everything will go well until the end of the AMPDU, it
* can't know in advance the number of MPDUs that will be sent in the current
* batch. This is why it writes the agg Tx response while it fetches the MPDUs.
* Hence, it can't know in advance what the SSN of the SCD will be at the end
* of the batch. This is why the SSN of the SCD is written at the end of the
* whole struct at a variable offset. This function knows how to cope with the
* variable offset and returns the SSN of the SCD.
*/
static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
{
return le32_to_cpup((__le32 *)&tx_resp->status +
tx_resp->frame_count) & 0xfff;
}
/* Available options for the SCD_QUEUE_CFG HCMD */
enum iwl_scd_cfg_actions {
SCD_CFG_DISABLE_QUEUE = 0x0,

View File

@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -320,12 +320,14 @@ enum iwl_phy_ops_subcmd_ids {
CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
CTDP_CONFIG_CMD = 0x03,
TEMP_REPORTING_THRESHOLDS_CMD = 0x04,
GEO_TX_POWER_LIMIT = 0x05,
CT_KILL_NOTIFICATION = 0xFE,
DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
};
enum iwl_system_subcmd_ids {
SHARED_MEM_CFG_CMD = 0x0,
INIT_EXTENDED_CFG_CMD = 0x03,
};
enum iwl_data_path_subcmd_ids {
@ -345,9 +347,10 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
NVM_ACCESS_COMPLETE = 0x0,
};
enum iwl_fmac_debug_cmds {
enum iwl_debug_cmds {
LMAC_RD_WR = 0x0,
UMAC_RD_WR = 0x1,
MFU_ASSERT_DUMP_NTF = 0xFE,
};
/* command groups */
@ -673,10 +676,8 @@ struct iwl_error_resp {
/* Common PHY, MAC and Bindings definitions */
#define MAX_MACS_IN_BINDING (3)
#define MAX_BINDINGS (4)
#define AUX_BINDING_INDEX (3)
/* Used to extract ID and color from the context dword */
#define FW_CTXT_ID_POS (0)
@ -689,7 +690,7 @@ struct iwl_error_resp {
(_color << FW_CTXT_COLOR_POS))
/* Possible actions on PHYs, MACs and Bindings */
enum {
enum iwl_phy_ctxt_action {
FW_CTXT_ACTION_STUB = 0,
FW_CTXT_ACTION_ADD,
FW_CTXT_ACTION_MODIFY,
@ -960,6 +961,7 @@ struct iwl_time_event_notif {
* @action: action to perform, one of FW_CTXT_ACTION_*
* @macs: array of MAC id and colors which belong to the binding
* @phy: PHY id and color which belongs to the binding
* @lmac_id: the lmac id the binding belongs to
*/
struct iwl_binding_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
@ -968,7 +970,13 @@ struct iwl_binding_cmd {
/* BINDING_DATA_API_S_VER_1 */
__le32 macs[MAX_MACS_IN_BINDING];
__le32 phy;
} __packed; /* BINDING_CMD_API_S_VER_1 */
/* BINDING_CMD_API_S_VER_1 */
__le32 lmac_id;
} __packed; /* BINDING_CMD_API_S_VER_2 */
#define IWL_BINDING_CMD_SIZE_V1 offsetof(struct iwl_binding_cmd, lmac_id)
#define IWL_LMAC_24G_INDEX 0
#define IWL_LMAC_5G_INDEX 1
/* The maximal number of fragments in the FW's schedule session */
#define IWL_MVM_MAX_QUOTA 128
@ -990,6 +998,9 @@ struct iwl_time_quota_data {
* struct iwl_time_quota_cmd - configuration of time quota between bindings
* ( TIME_QUOTA_CMD = 0x2c )
* @quotas: allocations per binding
* Note: on non-CDB the fourth one is the auxilary mac and is
* essentially zero.
* On CDB the fourth one is a regular binding.
*/
struct iwl_time_quota_cmd {
struct iwl_time_quota_data quotas[MAX_BINDINGS];
@ -1230,6 +1241,25 @@ struct iwl_mfuart_load_notif {
__le32 image_size;
} __packed; /*MFU_LOADER_NTFY_API_S_VER_2*/
/**
* struct iwl_mfu_assert_dump_notif - mfuart dump logs
* ( MFU_ASSERT_DUMP_NTF = 0xfe )
* @assert_id: mfuart assert id that cause the notif
* @curr_reset_num: number of asserts since uptime
* @index_num: current chunk id
* @parts_num: total number of chunks
* @data_size: number of data bytes sent
* @data: data buffer
*/
struct iwl_mfu_assert_dump_notif {
__le32 assert_id;
__le32 curr_reset_num;
__le16 index_num;
__le16 parts_num;
__le32 data_size;
__le32 data[0];
} __packed; /*MFU_DUMP_ASSERT_API_S_VER_1*/
/**
* struct iwl_set_calib_default_cmd - set default value for calibration.
* ( SET_CALIB_DEFAULT_CMD = 0x8e )
@ -1998,19 +2028,48 @@ struct iwl_shared_mem_cfg_v1 {
__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
/**
* struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration
*
* @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB)
* @txfifo_size: size of TX FIFOs
* @rxfifo1_addr: RXF1 addr
* @rxfifo1_size: RXF1 size
*/
struct iwl_shared_mem_lmac_cfg {
__le32 txfifo_addr;
__le32 txfifo_size[TX_FIFO_MAX_NUM];
__le32 rxfifo1_addr;
__le32 rxfifo1_size;
} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */
/**
* Shared memory configuration information from the FW
*
* @shared_mem_addr: shared memory address
* @shared_mem_size: shared memory size
* @sample_buff_addr: internal sample (mon/adc) buff addr
* @sample_buff_size: internal sample buff size
* @rxfifo2_addr: start addr of RXF2
* @rxfifo2_size: size of RXF2
* @page_buff_addr: used by UMAC and performance debug (page miss analysis),
* when paging is not supported this should be 0
* @page_buff_size: size of %page_buff_addr
* @lmac_num: number of LMACs (1 or 2)
* @lmac_smem: per - LMAC smem data
*/
struct iwl_shared_mem_cfg {
__le32 shared_mem_addr;
__le32 shared_mem_size;
__le32 sample_buff_addr;
__le32 sample_buff_size;
__le32 txfifo_addr;
__le32 txfifo_size[TX_FIFO_MAX_NUM];
__le32 rxfifo_size[RX_FIFO_MAX_NUM];
__le32 rxfifo2_addr;
__le32 rxfifo2_size;
__le32 page_buff_addr;
__le32 page_buff_size;
__le32 rxfifo_addr;
__le32 internal_txfifo_addr;
__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
__le32 lmac_num;
struct iwl_shared_mem_lmac_cfg lmac_smem[2];
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */
/**
@ -2178,4 +2237,26 @@ struct iwl_nvm_access_complete_cmd {
__le32 reserved;
} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
/**
* enum iwl_extended_cfg_flag - commands driver may send before
* finishing init flow
* @IWL_INIT_DEBUG_CFG: driver is going to send debug config command
* @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands
* @IWL_INIT_PHY: driver is going to send PHY_DB commands
*/
enum iwl_extended_cfg_flags {
IWL_INIT_DEBUG_CFG,
IWL_INIT_NVM,
IWL_INIT_PHY,
};
/**
* struct iwl_extended_cfg_cmd - mark what commands ucode should wait for
* before finishing init flows
* @init_flags: values from iwl_extended_cfg_flags
*/
struct iwl_init_extended_cfg_cmd {
__le32 init_flags;
} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */
#endif /* __fw_api_h__ */

View File

@ -7,7 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -99,10 +99,120 @@ static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm,
iwl_trans_release_nic_access(mvm->trans, &flags);
}
static void iwl_mvm_dump_rxf(struct iwl_mvm *mvm,
struct iwl_fw_error_dump_data **dump_data,
int size, u32 offset, int fifo_num)
{
struct iwl_fw_error_dump_fifo *fifo_hdr;
u32 *fifo_data;
u32 fifo_len;
int i;
fifo_hdr = (void *)(*dump_data)->data;
fifo_data = (void *)fifo_hdr->data;
fifo_len = size;
/* No need to try to read the data if the length is 0 */
if (fifo_len == 0)
return;
/* Add a TLV for the RXF */
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_RD_D_SPACE + offset));
fifo_hdr->wr_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_RD_WR_PTR + offset));
fifo_hdr->rd_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_RD_RD_PTR + offset));
fifo_hdr->fence_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_RD_FENCE_PTR + offset));
fifo_hdr->fence_mode =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_SET_FENCE_MODE + offset));
/* Lock fence */
iwl_trans_write_prph(mvm->trans, RXF_SET_FENCE_MODE + offset, 0x1);
/* Set fence pointer to the same place like WR pointer */
iwl_trans_write_prph(mvm->trans, RXF_LD_WR2FENCE + offset, 0x1);
/* Set fence offset */
iwl_trans_write_prph(mvm->trans,
RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0);
/* Read FIFO */
fifo_len /= sizeof(u32); /* Size in DWORDS */
for (i = 0; i < fifo_len; i++)
fifo_data[i] = iwl_trans_read_prph(mvm->trans,
RXF_FIFO_RD_FENCE_INC +
offset);
*dump_data = iwl_fw_error_next_data(*dump_data);
}
static void iwl_mvm_dump_txf(struct iwl_mvm *mvm,
struct iwl_fw_error_dump_data **dump_data,
int size, u32 offset, int fifo_num)
{
struct iwl_fw_error_dump_fifo *fifo_hdr;
u32 *fifo_data;
u32 fifo_len;
int i;
fifo_hdr = (void *)(*dump_data)->data;
fifo_data = (void *)fifo_hdr->data;
fifo_len = size;
/* No need to try to read the data if the length is 0 */
if (fifo_len == 0)
return;
/* Add a TLV for the FIFO */
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_FIFO_ITEM_CNT + offset));
fifo_hdr->wr_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_WR_PTR + offset));
fifo_hdr->rd_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_RD_PTR + offset));
fifo_hdr->fence_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_FENCE_PTR + offset));
fifo_hdr->fence_mode =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_LOCK_FENCE + offset));
/* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR + offset,
TXF_WR_PTR + offset);
/* Dummy-read to advance the read pointer to the head */
iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA + offset);
/* Read FIFO */
fifo_len /= sizeof(u32); /* Size in DWORDS */
for (i = 0; i < fifo_len; i++)
fifo_data[i] = iwl_trans_read_prph(mvm->trans,
TXF_READ_MODIFY_DATA +
offset);
*dump_data = iwl_fw_error_next_data(*dump_data);
}
static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
struct iwl_fw_error_dump_data **dump_data)
{
struct iwl_fw_error_dump_fifo *fifo_hdr;
struct iwl_mvm_shared_mem_cfg *cfg = &mvm->smem_cfg;
u32 *fifo_data;
u32 fifo_len;
unsigned long flags;
@ -111,126 +221,47 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
if (!iwl_trans_grab_nic_access(mvm->trans, &flags))
return;
/* Pull RXF data from all RXFs */
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
/*
* Keep aside the additional offset that might be needed for
* next RXF
*/
u32 offset_diff = RXF_DIFF_FROM_PREV * i;
/* Pull RXF1 */
iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0);
/* Pull RXF2 */
iwl_mvm_dump_rxf(mvm, dump_data, cfg->rxfifo2_size,
RXF_DIFF_FROM_PREV, 1);
/* Pull LMAC2 RXF1 */
if (mvm->smem_cfg.num_lmacs > 1)
iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[1].rxfifo1_size,
LMAC2_PRPH_OFFSET, 2);
fifo_hdr = (void *)(*dump_data)->data;
fifo_data = (void *)fifo_hdr->data;
fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];
/* No need to try to read the data if the length is 0 */
if (fifo_len == 0)
continue;
/* Add a TLV for the RXF */
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
fifo_hdr->fifo_num = cpu_to_le32(i);
fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_RD_D_SPACE +
offset_diff));
fifo_hdr->wr_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_RD_WR_PTR +
offset_diff));
fifo_hdr->rd_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_RD_RD_PTR +
offset_diff));
fifo_hdr->fence_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_RD_FENCE_PTR +
offset_diff));
fifo_hdr->fence_mode =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_SET_FENCE_MODE +
offset_diff));
/* Lock fence */
iwl_trans_write_prph(mvm->trans,
RXF_SET_FENCE_MODE + offset_diff, 0x1);
/* Set fence pointer to the same place like WR pointer */
iwl_trans_write_prph(mvm->trans,
RXF_LD_WR2FENCE + offset_diff, 0x1);
/* Set fence offset */
iwl_trans_write_prph(mvm->trans,
RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
0x0);
/* Read FIFO */
fifo_len /= sizeof(u32); /* Size in DWORDS */
for (j = 0; j < fifo_len; j++)
fifo_data[j] = iwl_trans_read_prph(mvm->trans,
RXF_FIFO_RD_FENCE_INC +
offset_diff);
*dump_data = iwl_fw_error_next_data(*dump_data);
}
/* Pull TXF data from all TXFs */
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
/* Pull TXF data from LMAC1 */
for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) {
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
iwl_mvm_dump_txf(mvm, dump_data, cfg->lmac[0].txfifo_size[i],
0, i);
}
fifo_hdr = (void *)(*dump_data)->data;
fifo_data = (void *)fifo_hdr->data;
fifo_len = mvm->shared_mem_cfg.txfifo_size[i];
/* No need to try to read the data if the length is 0 */
if (fifo_len == 0)
continue;
/* Add a TLV for the FIFO */
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
fifo_hdr->fifo_num = cpu_to_le32(i);
fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_FIFO_ITEM_CNT));
fifo_hdr->wr_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_WR_PTR));
fifo_hdr->rd_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_RD_PTR));
fifo_hdr->fence_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_FENCE_PTR));
fifo_hdr->fence_mode =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_LOCK_FENCE));
/* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
TXF_WR_PTR);
/* Dummy-read to advance the read pointer to the head */
iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);
/* Read FIFO */
fifo_len /= sizeof(u32); /* Size in DWORDS */
for (j = 0; j < fifo_len; j++)
fifo_data[j] = iwl_trans_read_prph(mvm->trans,
TXF_READ_MODIFY_DATA);
*dump_data = iwl_fw_error_next_data(*dump_data);
/* Pull TXF data from LMAC2 */
if (mvm->smem_cfg.num_lmacs > 1) {
for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) {
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(mvm->trans,
TXF_LARC_NUM + LMAC2_PRPH_OFFSET,
i);
iwl_mvm_dump_txf(mvm, dump_data,
cfg->lmac[1].txfifo_size[i],
LMAC2_PRPH_OFFSET,
i + cfg->num_txfifo_entries);
}
}
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
/* Pull UMAC internal TXF data from all TXFs */
for (i = 0;
i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size);
i++) {
fifo_hdr = (void *)(*dump_data)->data;
fifo_data = (void *)fifo_hdr->data;
fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i];
fifo_len = mvm->smem_cfg.internal_txfifo_size[i];
/* No need to try to read the data if the length is 0 */
if (fifo_len == 0)
@ -246,7 +277,7 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i +
ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size));
mvm->smem_cfg.num_txfifo_entries);
fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
@ -553,31 +584,45 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
/* reading RXF/TXF sizes */
if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;
struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->smem_cfg;
fifo_data_len = 0;
/* Count RXF size */
for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
if (!mem_cfg->rxfifo_size[i])
continue;
/* Count RXF2 size */
if (mem_cfg->rxfifo2_size) {
/* Add header info */
fifo_data_len += mem_cfg->rxfifo_size[i] +
fifo_data_len += mem_cfg->rxfifo2_size +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
for (i = 0; i < mem_cfg->num_txfifo_entries; i++) {
if (!mem_cfg->txfifo_size[i])
/* Count RXF1 sizes */
for (i = 0; i < mem_cfg->num_lmacs; i++) {
if (!mem_cfg->lmac[i].rxfifo1_size)
continue;
/* Add header info */
fifo_data_len += mem_cfg->txfifo_size[i] +
fifo_data_len += mem_cfg->lmac[i].rxfifo1_size +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
/* Count TXF sizes */
for (i = 0; i < mem_cfg->num_lmacs; i++) {
int j;
for (j = 0; j < mem_cfg->num_txfifo_entries; j++) {
if (!mem_cfg->lmac[i].txfifo_size[j])
continue;
/* Add header info */
fifo_data_len +=
mem_cfg->lmac[i].txfifo_size[j] +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
}
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
for (i = 0;

View File

@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -34,6 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -271,6 +272,27 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
return 0;
}
void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
__le32 *dump_data = mfu_dump_notif->data;
int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
int i;
if (mfu_dump_notif->index_num == 0)
IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
le32_to_cpu(mfu_dump_notif->assert_id));
for (i = 0; i < n_words; i++)
IWL_DEBUG_INFO(mvm,
"MFUART assert dump, dword %u: 0x%08x\n",
le16_to_cpu(mfu_dump_notif->index_num) *
n_words + i,
le32_to_cpu(dump_data[i]));
}
static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
const struct fw_img *image)
{
@ -617,11 +639,18 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
MVM_UCODE_ALIVE_TIMEOUT);
if (ret) {
if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
struct iwl_trans *trans = mvm->trans;
if (trans->cfg->gen2)
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS));
else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_prph(trans, SB_CPU_1_STATUS),
iwl_read_prph(trans, SB_CPU_2_STATUS));
mvm->cur_ucode = old_type;
return ret;
}
@ -807,6 +836,9 @@ int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
{
struct iwl_notification_wait init_wait;
struct iwl_nvm_access_complete_cmd nvm_complete = {};
struct iwl_init_extended_cfg_cmd init_cfg = {
.init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
};
static const u16 init_complete[] = {
INIT_COMPLETE_NOTIF,
};
@ -828,10 +860,14 @@ int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
goto error;
}
/* TODO: remove when integrating context info */
ret = iwl_mvm_init_paging(mvm);
/* Send init config command to mark that we are sending NVM access
* commands
*/
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
INIT_EXTENDED_CFG_CMD), 0,
sizeof(init_cfg), &init_cfg);
if (ret) {
IWL_ERR(mvm, "Failed to init paging: %d\n",
IWL_ERR(mvm, "Failed to run init config command: %d\n",
ret);
goto error;
}
@ -876,24 +912,27 @@ static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
int i;
int i, lmac;
int lmac_num = le32_to_cpu(mem_cfg->lmac_num);
mvm->shared_mem_cfg.num_txfifo_entries =
ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
mvm->shared_mem_cfg.txfifo_size[i] =
le32_to_cpu(mem_cfg->txfifo_size[i]);
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
mvm->shared_mem_cfg.rxfifo_size[i] =
le32_to_cpu(mem_cfg->rxfifo_size[i]);
if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem)))
return;
BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
sizeof(mem_cfg->internal_txfifo_size));
mvm->smem_cfg.num_lmacs = lmac_num;
mvm->smem_cfg.num_txfifo_entries =
ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size);
mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size);
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
i++)
mvm->shared_mem_cfg.internal_txfifo_size[i] =
le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
for (lmac = 0; lmac < lmac_num; lmac++) {
struct iwl_shared_mem_lmac_cfg *lmac_cfg =
&mem_cfg->lmac_smem[lmac];
for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++)
mvm->smem_cfg.lmac[lmac].txfifo_size[i] =
le32_to_cpu(lmac_cfg->txfifo_size[i]);
mvm->smem_cfg.lmac[lmac].rxfifo1_size =
le32_to_cpu(lmac_cfg->rxfifo1_size);
}
}
static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
@ -902,25 +941,27 @@ static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data;
int i;
mvm->shared_mem_cfg.num_txfifo_entries =
ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
mvm->smem_cfg.num_lmacs = 1;
mvm->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size);
for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
mvm->shared_mem_cfg.txfifo_size[i] =
mvm->smem_cfg.lmac[0].txfifo_size[i] =
le32_to_cpu(mem_cfg->txfifo_size[i]);
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
mvm->shared_mem_cfg.rxfifo_size[i] =
le32_to_cpu(mem_cfg->rxfifo_size[i]);
mvm->smem_cfg.lmac[0].rxfifo1_size =
le32_to_cpu(mem_cfg->rxfifo_size[0]);
mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]);
/* new API has more data, from rxfifo_addr field and on */
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
BUILD_BUG_ON(sizeof(mvm->smem_cfg.internal_txfifo_size) !=
sizeof(mem_cfg->internal_txfifo_size));
for (i = 0;
i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size);
i++)
mvm->shared_mem_cfg.internal_txfifo_size[i] =
mvm->smem_cfg.internal_txfifo_size[i] =
le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
}
}
@ -969,85 +1010,94 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
sizeof(cmd), &cmd);
}
#define ACPI_WRDS_METHOD "WRDS"
#define ACPI_WRDS_WIFI (0x07)
#define ACPI_WRDS_TABLE_SIZE 10
struct iwl_mvm_sar_table {
bool enabled;
u8 values[ACPI_WRDS_TABLE_SIZE];
};
#ifdef CONFIG_ACPI
static int iwl_mvm_sar_get_wrds(struct iwl_mvm *mvm, union acpi_object *wrds,
struct iwl_mvm_sar_table *sar_table)
#define ACPI_WRDS_METHOD "WRDS"
#define ACPI_EWRD_METHOD "EWRD"
#define ACPI_WGDS_METHOD "WGDS"
#define ACPI_WIFI_DOMAIN (0x07)
#define ACPI_WRDS_WIFI_DATA_SIZE (IWL_MVM_SAR_TABLE_SIZE + 2)
#define ACPI_EWRD_WIFI_DATA_SIZE ((IWL_MVM_SAR_PROFILE_NUM - 1) * \
IWL_MVM_SAR_TABLE_SIZE + 3)
#define ACPI_WGDS_WIFI_DATA_SIZE 18
#define ACPI_WGDS_NUM_BANDS 2
#define ACPI_WGDS_TABLE_SIZE 3
static int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
union acpi_object *table,
struct iwl_mvm_sar_profile *profile,
bool enabled)
{
union acpi_object *data_pkg;
u32 i;
int i;
/* We need at least two packages, one for the revision and one
* for the data itself. Also check that the revision is valid
* (i.e. it is an integer set to 0).
*/
if (wrds->type != ACPI_TYPE_PACKAGE ||
wrds->package.count < 2 ||
wrds->package.elements[0].type != ACPI_TYPE_INTEGER ||
wrds->package.elements[0].integer.value != 0) {
IWL_DEBUG_RADIO(mvm, "Unsupported wrds structure\n");
return -EINVAL;
}
profile->enabled = enabled;
/* loop through all the packages to find the one for WiFi */
for (i = 1; i < wrds->package.count; i++) {
union acpi_object *domain;
data_pkg = &wrds->package.elements[i];
/* Skip anything that is not a package with the right
* amount of elements (i.e. domain_type,
* enabled/disabled plus the sar table size.
*/
if (data_pkg->type != ACPI_TYPE_PACKAGE ||
data_pkg->package.count != ACPI_WRDS_TABLE_SIZE + 2)
continue;
domain = &data_pkg->package.elements[0];
if (domain->type == ACPI_TYPE_INTEGER &&
domain->integer.value == ACPI_WRDS_WIFI)
break;
data_pkg = NULL;
}
if (!data_pkg)
return -ENOENT;
if (data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
return -EINVAL;
sar_table->enabled = !!(data_pkg->package.elements[1].integer.value);
for (i = 0; i < ACPI_WRDS_TABLE_SIZE; i++) {
union acpi_object *entry;
entry = &data_pkg->package.elements[i + 2];
if ((entry->type != ACPI_TYPE_INTEGER) ||
(entry->integer.value > U8_MAX))
for (i = 0; i < IWL_MVM_SAR_TABLE_SIZE; i++) {
if ((table[i].type != ACPI_TYPE_INTEGER) ||
(table[i].integer.value > U8_MAX))
return -EINVAL;
sar_table->values[i] = entry->integer.value;
profile->table[i] = table[i].integer.value;
}
return 0;
}
static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
struct iwl_mvm_sar_table *sar_table)
static union acpi_object *iwl_mvm_sar_find_wifi_pkg(struct iwl_mvm *mvm,
union acpi_object *data,
int data_size)
{
int i;
union acpi_object *wifi_pkg;
/*
* We need at least two packages, one for the revision and one
* for the data itself. Also check that the revision is valid
* (i.e. it is an integer set to 0).
*/
if (data->type != ACPI_TYPE_PACKAGE ||
data->package.count < 2 ||
data->package.elements[0].type != ACPI_TYPE_INTEGER ||
data->package.elements[0].integer.value != 0) {
IWL_DEBUG_RADIO(mvm, "Unsupported packages structure\n");
return ERR_PTR(-EINVAL);
}
/* loop through all the packages to find the one for WiFi */
for (i = 1; i < data->package.count; i++) {
union acpi_object *domain;
wifi_pkg = &data->package.elements[i];
/* Skip anything that is not a package with the right
* amount of elements (i.e. domain_type,
* enabled/disabled plus the actual data size.
*/
if (wifi_pkg->type != ACPI_TYPE_PACKAGE ||
wifi_pkg->package.count != data_size)
continue;
domain = &wifi_pkg->package.elements[0];
if (domain->type == ACPI_TYPE_INTEGER &&
domain->integer.value == ACPI_WIFI_DOMAIN)
break;
wifi_pkg = NULL;
}
if (!wifi_pkg)
return ERR_PTR(-ENOENT);
return wifi_pkg;
}
static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
{
union acpi_object *wifi_pkg, *table;
acpi_handle root_handle;
acpi_handle handle;
struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
bool enabled;
int ret;
root_handle = ACPI_HANDLE(mvm->dev);
@ -1072,62 +1122,301 @@ static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
return -ENOENT;
}
ret = iwl_mvm_sar_get_wrds(mvm, wrds.pointer, sar_table);
kfree(wrds.pointer);
wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wrds.pointer,
ACPI_WRDS_WIFI_DATA_SIZE);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
ret = -EINVAL;
goto out_free;
}
enabled = !!(wifi_pkg->package.elements[1].integer.value);
/* position of the actual table */
table = &wifi_pkg->package.elements[2];
/* The profile from WRDS is officially profile 1, but goes
* into sar_profiles[0] (because we don't have a profile 0).
*/
ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0],
enabled);
out_free:
kfree(wrds.pointer);
return ret;
}
#else /* CONFIG_ACPI */
static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
struct iwl_mvm_sar_table *sar_table)
{
return -ENOENT;
}
#endif /* CONFIG_ACPI */
static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
{
union acpi_object *wifi_pkg;
acpi_handle root_handle;
acpi_handle handle;
struct acpi_buffer ewrd = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
bool enabled;
int i, n_profiles, ret;
root_handle = ACPI_HANDLE(mvm->dev);
if (!root_handle) {
IWL_DEBUG_RADIO(mvm,
"Could not retrieve root port ACPI handle\n");
return -ENOENT;
}
/* Get the method's handle */
status = acpi_get_handle(root_handle, (acpi_string)ACPI_EWRD_METHOD,
&handle);
if (ACPI_FAILURE(status)) {
IWL_DEBUG_RADIO(mvm, "EWRD method not found\n");
return -ENOENT;
}
/* Call EWRD with no arguments */
status = acpi_evaluate_object(handle, NULL, NULL, &ewrd);
if (ACPI_FAILURE(status)) {
IWL_DEBUG_RADIO(mvm, "EWRD invocation failed (0x%x)\n", status);
return -ENOENT;
}
wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, ewrd.pointer,
ACPI_EWRD_WIFI_DATA_SIZE);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
(wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) {
ret = -EINVAL;
goto out_free;
}
enabled = !!(wifi_pkg->package.elements[1].integer.value);
n_profiles = wifi_pkg->package.elements[2].integer.value;
for (i = 0; i < n_profiles; i++) {
/* the tables start at element 3 */
static int pos = 3;
/* The EWRD profiles officially go from 2 to 4, but we
* save them in sar_profiles[1-3] (because we don't
* have profile 0). So in the array we start from 1.
*/
ret = iwl_mvm_sar_set_profile(mvm,
&wifi_pkg->package.elements[pos],
&mvm->sar_profiles[i + 1],
enabled);
if (ret < 0)
break;
/* go to the next table */
pos += IWL_MVM_SAR_TABLE_SIZE;
}
out_free:
kfree(ewrd.pointer);
return ret;
}
static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm,
struct iwl_mvm_geo_table *geo_table)
{
union acpi_object *wifi_pkg;
acpi_handle root_handle;
acpi_handle handle;
struct acpi_buffer wgds = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
int i, ret;
root_handle = ACPI_HANDLE(mvm->dev);
if (!root_handle) {
IWL_DEBUG_RADIO(mvm,
"Could not retrieve root port ACPI handle\n");
return -ENOENT;
}
/* Get the method's handle */
status = acpi_get_handle(root_handle, (acpi_string)ACPI_WGDS_METHOD,
&handle);
if (ACPI_FAILURE(status)) {
IWL_DEBUG_RADIO(mvm, "WGDS method not found\n");
return -ENOENT;
}
/* Call WGDS with no arguments */
status = acpi_evaluate_object(handle, NULL, NULL, &wgds);
if (ACPI_FAILURE(status)) {
IWL_DEBUG_RADIO(mvm, "WGDS invocation failed (0x%x)\n", status);
return -ENOENT;
}
wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wgds.pointer,
ACPI_WGDS_WIFI_DATA_SIZE);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
for (i = 0; i < ACPI_WGDS_WIFI_DATA_SIZE; i++) {
union acpi_object *entry;
entry = &wifi_pkg->package.elements[i + 1];
if ((entry->type != ACPI_TYPE_INTEGER) ||
(entry->integer.value > U8_MAX))
return -EINVAL;
geo_table->values[i] = entry->integer.value;
}
ret = 0;
out_free:
kfree(wgds.pointer);
return ret;
}
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
struct iwl_mvm_sar_table sar_table;
struct iwl_dev_tx_power_cmd cmd = {
.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
};
int ret, i, j, idx;
int i, j, idx;
int profs[IWL_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
int len = sizeof(cmd);
BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS < 2);
BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS !=
IWL_MVM_SAR_TABLE_SIZE);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
len = sizeof(cmd.v3);
ret = iwl_mvm_sar_get_table(mvm, &sar_table);
for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
struct iwl_mvm_sar_profile *prof;
/* don't allow SAR to be disabled (profile 0 means disable) */
if (profs[i] == 0)
return -EPERM;
/* we are off by one, so allow up to IWL_MVM_SAR_PROFILE_NUM */
if (profs[i] > IWL_MVM_SAR_PROFILE_NUM)
return -EINVAL;
/* profiles go from 1 to 4, so decrement to access the array */
prof = &mvm->sar_profiles[profs[i] - 1];
/* if the profile is disabled, do nothing */
if (!prof->enabled) {
IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n",
profs[i]);
/* if one of the profiles is disabled, we fail all */
return -ENOENT;
}
IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i);
for (j = 0; j < IWL_NUM_SUB_BANDS; j++) {
idx = (i * IWL_NUM_SUB_BANDS) + j;
cmd.v3.per_chain_restriction[i][j] =
cpu_to_le16(prof->table[idx]);
IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n",
j, prof->table[idx]);
}
}
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
{
struct iwl_mvm_geo_table geo_table;
struct iwl_geo_tx_power_profiles_cmd cmd = {
.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES),
};
int ret, i, j, idx;
u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
ret = iwl_mvm_sar_get_wgds_table(mvm, &geo_table);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"SAR BIOS table invalid or unavailable. (%d)\n",
"Geo SAR BIOS table invalid or unavailable. (%d)\n",
ret);
/* we don't fail if the table is not available */
return 0;
}
if (!sar_table.enabled)
return 0;
IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
BUILD_BUG_ON(IWL_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE);
BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS !=
ACPI_WRDS_TABLE_SIZE);
for (i = 0; i < IWL_NUM_GEO_PROFILES; i++) {
struct iwl_per_chain_offset *chain =
(struct iwl_per_chain_offset *)&cmd.table[i];
for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i);
for (j = 0; j < IWL_NUM_SUB_BANDS; j++) {
idx = (i * IWL_NUM_SUB_BANDS) + j;
cmd.v3.per_chain_restriction[i][j] =
cpu_to_le16(sar_table.values[idx]);
IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n",
j, sar_table.values[idx]);
for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
u8 *value;
idx = i * ACPI_WGDS_NUM_BANDS * ACPI_WGDS_TABLE_SIZE +
j * ACPI_WGDS_TABLE_SIZE;
value = &geo_table.values[idx];
chain[j].max_tx_power = cpu_to_le16(value[0]);
chain[j].chain_a = value[1];
chain[j].chain_b = value[2];
IWL_DEBUG_RADIO(mvm,
"SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
i, j, value[1], value[2], value[0]);
}
}
return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
}
ret = iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
if (ret)
IWL_ERR(mvm, "failed to set per-chain TX power: %d\n", ret);
#else /* CONFIG_ACPI */
static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
{
return -ENOENT;
}
static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
{
return -ENOENT;
}
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
{
return 0;
}
#endif /* CONFIG_ACPI */
static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
{
int ret;
ret = iwl_mvm_sar_get_wrds_table(mvm);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
ret);
/* if not available, don't fail and don't bother with EWRD */
return 0;
}
ret = iwl_mvm_sar_get_ewrd_table(mvm);
/* if EWRD is not available, we can still use WRDS, so don't fail */
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"EWRD SAR BIOS table invalid or unavailable. (%d)\n",
ret);
/* choose profile 1 (WRDS) as default for both chains */
ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
/* if we don't have profile 0 from BIOS, just skip it */
if (ret == -ENOENT)
return 0;
return ret;
}
@ -1219,7 +1508,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
}
/* Init RSS configuration */
if (iwl_mvm_has_new_rx_api(mvm)) {
/* TODO - remove a000 disablement when we have RXQ config API */
if (iwl_mvm_has_new_rx_api(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
ret = iwl_send_rss_cfg_cmd(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
@ -1229,10 +1519,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
}
/* init the fw <-> mac80211 STA mapping */
for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
/* reset quota debouncing buffer - 0xff will yield invalid data */
memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
@ -1313,10 +1603,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
if (iwl_mvm_is_csum_supported(mvm) &&
mvm->cfg->features & NETIF_F_RXCSUM)
iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
/* allow FW/transport low power modes if not during restart */
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
@ -1325,6 +1611,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
goto error;
ret = iwl_mvm_sar_geo_init(mvm);
if (ret)
goto error;
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
@ -1362,7 +1652,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
goto error;
/* init the fw <-> mac80211 STA mapping */
for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
/* Add auxiliary station for scanning */

View File

@ -472,8 +472,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
}
mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT;
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA;
mvmvif->mcast_sta.sta_id = IWL_MVM_INVALID_STA;
mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
@ -1442,6 +1443,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *beacon_notify_hdr;
struct ieee80211_vif *csa_vif;
struct ieee80211_vif *tx_blocked_vif;
struct agg_tx_status *agg_status;
u16 status;
lockdep_assert_held(&mvm->mutex);
@ -1449,7 +1451,8 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
beacon_notify_hdr = &beacon->beacon_notify_hdr;
mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
status = le16_to_cpu(beacon_notify_hdr->status.status) & TX_STATUS_MSK;
agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
IWL_DEBUG_RX(mvm,
"beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n",
status, beacon_notify_hdr->failure_frame,

View File

@ -6,8 +6,8 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -33,7 +33,8 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -766,7 +767,7 @@ static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
goto out;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
if (mvmsta->sta_id == IWL_MVM_INVALID_STA ||
mvmsta->sta_id != mvm->d0i3_ap_sta_id)
goto out;
@ -1010,7 +1011,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
mvmvif->uploaded = false;
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
spin_lock_bh(&mvm->time_event_lock);
iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
@ -1053,7 +1054,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
mvm->p2p_device_vif = NULL;
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
@ -1351,6 +1352,17 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_release;
}
if (iwl_mvm_is_dqa_supported(mvm)) {
/*
* Only queue for this station is the mcast queue,
* which shouldn't be in TFD mask anyway
*/
ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta,
0, vif->type);
if (ret)
goto out_release;
}
iwl_mvm_vif_dbgfs_register(mvm, vif);
goto out_unlock;
}
@ -1516,6 +1528,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mvm->noa_duration = 0;
}
#endif
iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta);
iwl_mvm_dealloc_bcast_sta(mvm, vif);
goto out_release;
}
@ -1952,7 +1965,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_MVM_SMPS_REQ_PROT,
IEEE80211_SMPS_DYNAMIC);
}
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
} else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
/*
* If update fails - SF might be running in associated
* mode while disassociated - which is forbidden.
@ -1966,8 +1979,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_ERR(mvm, "failed to remove AP station\n");
if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
/* remove quota for this interface */
ret = iwl_mvm_update_quotas(mvm, false, NULL);
if (ret)
@ -2104,6 +2117,10 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (ret)
goto out_unbind;
ret = iwl_mvm_add_mcast_sta(mvm, vif);
if (ret)
goto out_rm_bcast;
/* must be set before quota calculations */
mvmvif->ap_ibss_active = true;
@ -2131,6 +2148,8 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
out_quota_failed:
iwl_mvm_power_update_mac(mvm);
mvmvif->ap_ibss_active = false;
iwl_mvm_rm_mcast_sta(mvm, vif);
out_rm_bcast:
iwl_mvm_send_rm_bcast_sta(mvm, vif);
out_unbind:
iwl_mvm_binding_remove_vif(mvm, vif);
@ -2177,6 +2196,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
iwl_mvm_update_quotas(mvm, false, NULL);
iwl_mvm_rm_mcast_sta(mvm, vif);
iwl_mvm_send_rm_bcast_sta(mvm, vif);
iwl_mvm_binding_remove_vif(mvm, vif);
@ -2343,6 +2363,9 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
continue;
if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE)
continue;
__set_bit(tid_data->txq_id, &txqs);
if (iwl_mvm_tid_queued(tid_data) == 0)
@ -2368,7 +2391,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
*/
break;
case STA_NOTIFY_AWAKE:
if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA))
break;
if (txqs)
@ -3939,7 +3962,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
mvmvif = iwl_mvm_vif_from_mac80211(vif);
/* flush the AP-station and all TDLS peers */
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))
@ -4196,7 +4219,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
if (!iwl_mvm_has_new_rx_api(mvm))
/* TODO - remove a000 disablement when we have RXQ config API */
if (!iwl_mvm_has_new_rx_api(mvm) || iwl_mvm_has_new_tx_api(mvm))
return;
notif->cookie = mvm->queue_sync_cookie;

View File

@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -407,6 +407,7 @@ struct iwl_mvm_vif {
struct iwl_mvm_time_event_data hs_time_event_data;
struct iwl_mvm_int_sta bcast_sta;
struct iwl_mvm_int_sta mcast_sta;
/*
* Assigned while mac80211 has the interface in a channel context,
@ -603,10 +604,15 @@ enum iwl_mvm_tdls_cs_state {
IWL_MVM_TDLS_SW_ACTIVE,
};
#define MAX_NUM_LMAC 2
struct iwl_mvm_shared_mem_cfg {
int num_lmacs;
int num_txfifo_entries;
u32 txfifo_size[TX_FIFO_MAX_NUM];
u32 rxfifo_size[RX_FIFO_MAX_NUM];
struct {
u32 txfifo_size[TX_FIFO_MAX_NUM];
u32 rxfifo1_size;
} lmac[MAX_NUM_LMAC];
u32 rxfifo2_size;
u32 internal_txfifo_addr;
u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
};
@ -625,6 +631,7 @@ struct iwl_mvm_shared_mem_cfg {
* @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
* it is the time of last received sub-frame
* @removed: prevent timer re-arming
* @valid: reordering is valid for this queue
* @lock: protect reorder buffer internal state
* @mvm: mvm pointer, needed for frame timer context
*/
@ -640,6 +647,7 @@ struct iwl_mvm_reorder_buffer {
unsigned long reorder_time[IEEE80211_MAX_AMPDU_BUF];
struct timer_list reorder_timer;
bool removed;
bool valid;
spinlock_t lock;
struct iwl_mvm *mvm;
} ____cacheline_aligned_in_smp;
@ -709,6 +717,21 @@ enum iwl_mvm_queue_status {
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
#define IWL_MVM_NUM_CIPHERS 10
#ifdef CONFIG_ACPI
#define IWL_MVM_SAR_TABLE_SIZE 10
#define IWL_MVM_SAR_PROFILE_NUM 4
#define IWL_MVM_GEO_TABLE_SIZE 18
struct iwl_mvm_sar_profile {
bool enabled;
u8 table[IWL_MVM_SAR_TABLE_SIZE];
};
struct iwl_mvm_geo_table {
u8 values[IWL_MVM_GEO_TABLE_SIZE];
};
#endif
struct iwl_mvm {
/* for logger access */
struct device *dev;
@ -975,7 +998,10 @@ struct iwl_mvm {
#endif
/* Tx queues */
u8 aux_queue;
u16 aux_queue;
u16 probe_queue;
u16 p2p_dev_queue;
u8 first_agg_queue;
u8 last_agg_queue;
@ -1018,7 +1044,7 @@ struct iwl_mvm {
} peer;
} tdls_cs;
struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
struct iwl_mvm_shared_mem_cfg smem_cfg;
u32 ciphers[IWL_MVM_NUM_CIPHERS];
struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
@ -1035,6 +1061,9 @@ struct iwl_mvm {
bool drop_bcn_ap_mode;
struct delayed_work cs_tx_unblock_dwork;
#ifdef CONFIG_ACPI
struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM];
#endif
};
/* Extract MVM priv from op_mode and _hw */
@ -1222,13 +1251,25 @@ static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
{
/*
* TODO:
* The issue of how to determine CDB support is still not well defined.
* It may be that it will be for all next HW devices and it may be per
* FW compilation and it may also differ between different devices.
* For now take a ride on the new TX API and get back to it when
* it is well defined.
* The issue of how to determine CDB APIs and usage is still not fully
* defined.
* There is a compilation for CDB and non-CDB FW, but there may
* be also runtime check.
* For now there is a TLV for checking compilation mode, but a
* runtime check will also have to be here - once defined.
*/
return iwl_mvm_has_new_tx_api(mvm);
return fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CDB_SUPPORT);
}
static inline struct agg_tx_status*
iwl_mvm_get_agg_status(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *tx_resp)
{
if (iwl_mvm_has_new_tx_api(mvm))
return &tx_resp->v6.status;
else
return &tx_resp->v3.status;
}
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
@ -1389,6 +1430,8 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
int queue);
void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
@ -1668,6 +1711,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout);
int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
u8 sta_id, u8 tid, unsigned int timeout);
/*
* Disable a TXQ.
* Note that in non-DQA mode the %mac80211_queue and %tid params are ignored.
@ -1701,7 +1747,8 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
iwl_free_fw_paging(mvm);
if (!iwl_mvm_has_new_tx_api(mvm))
iwl_free_fw_paging(mvm);
mvm->ucode_loaded = false;
iwl_trans_stop_device(mvm->trans);
}
@ -1797,4 +1844,14 @@ int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
u32 duration, u32 timeout);
bool iwl_mvm_lqm_active(struct iwl_mvm *mvm);
#ifdef CONFIG_ACPI
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
#else
static inline
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
return -ENOENT;
}
#endif /* CONFIG_ACPI */
#endif /* __IWL_MVM_H__ */

View File

@ -302,6 +302,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_SYNC),
RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
@ -426,6 +428,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
*/
static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
HCMD_NAME(SHARED_MEM_CFG_CMD),
HCMD_NAME(INIT_EXTENDED_CFG_CMD),
};
/* Please keep this array *SORTED* by hex value.
@ -444,6 +447,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
HCMD_NAME(CTDP_CONFIG_CMD),
HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
HCMD_NAME(GEO_TX_POWER_LIMIT),
HCMD_NAME(CT_KILL_NOTIFICATION),
HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
};
@ -452,6 +456,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(DQA_ENABLE_CMD),
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
HCMD_NAME(STA_PM_NOTIF),
@ -459,6 +464,13 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(RX_QUEUES_NOTIFICATION),
};
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
HCMD_NAME(MFU_ASSERT_DUMP_NTF),
};
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
@ -602,6 +614,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
} else {
mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE;
mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
}
@ -1256,7 +1270,7 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
u8 tid;
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
mvmvif->ap_sta_id == IWL_MVM_INVALID_STA))
return false;
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
@ -1344,7 +1358,7 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvm_ap_sta;
if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT)
if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA)
return;
rcu_read_lock();
@ -1414,7 +1428,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
} else {
WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
mvm->d0i3_offloading = false;
}
@ -1427,7 +1441,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
return ret;
/* configure wowlan configuration only if needed */
if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) {
if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
/* wake on beacons only if beacon storing isn't supported */
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BEACON_STORING))
@ -1504,7 +1518,7 @@ void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
spin_lock_bh(&mvm->d0i3_tx_lock);
if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT)
if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA)
goto out;
IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
@ -1542,7 +1556,7 @@ out:
}
clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
wake_up(&mvm->d0i3_exit_waitq);
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
if (wake_queues)
ieee80211_wake_queues(mvm->hw);

View File

@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -250,12 +251,30 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
{
enum iwl_phy_ctxt_action action = FW_CTXT_ACTION_MODIFY;
lockdep_assert_held(&mvm->mutex);
/* In CDB mode we cannot modify PHY context between bands so... */
if (iwl_mvm_has_new_tx_api(mvm) &&
ctxt->channel->band != chandef->chan->band) {
int ret;
/* ... remove it here ...*/
ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
chains_static, chains_dynamic,
FW_CTXT_ACTION_REMOVE, 0);
if (ret)
return ret;
/* ... and proceed to add it again */
action = FW_CTXT_ACTION_ADD;
}
ctxt->channel = chandef->chan;
return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
chains_static, chains_dynamic,
FW_CTXT_ACTION_MODIFY, 0);
action, 0);
}
void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)

View File

@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -104,7 +104,20 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
u8 crypt_len,
struct iwl_rx_cmd_buffer *rxb)
{
unsigned int hdrlen, fraglen;
unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
unsigned int fraglen;
/*
* The 'hdrlen' (plus the 8 bytes for the SNAP and the crypt_len,
* but those are all multiples of 4 long) all goes away, but we
* want the *end* of it, which is going to be the start of the IP
* header, to be aligned when it gets pulled in.
* The beginning of the skb->data is aligned on at least a 4-byte
* boundary after allocation. Everything here is aligned at least
* on a 2-byte boundary so we can just take hdrlen & 3 and pad by
* the result.
*/
skb_reserve(skb, hdrlen & 3);
/* If frame is small enough to fit in skb->head, pull it completely.
* If not, only pull ieee80211_hdr (including crypto if present, and
@ -118,8 +131,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
* If the latter changes (there are efforts in the standards group
* to do so) we should revisit this and ieee80211_data_to_8023().
*/
hdrlen = (len <= skb_tailroom(skb)) ? len :
sizeof(*hdr) + crypt_len + 8;
hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8;
memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
fraglen = len - hdrlen;
@ -339,7 +351,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT;
if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) {
if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
if (IS_ERR(sta))
sta = NULL;
@ -448,9 +460,16 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
if (rate_n_flags & RATE_MCS_BF_MSK)
rx_status->vht_flag |= RX_VHT_FLAG_BF;
} else {
rx_status->rate_idx =
iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
rx_status->band);
int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
rx_status->band);
if (WARN(rate < 0 || rate > 0xFF,
"Invalid rate flags 0x%x, band %d,\n",
rate_n_flags, rx_status->band)) {
kfree_skb(skb);
return;
}
rx_status->rate_idx = rate;
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
@ -637,6 +656,9 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
.mvm = mvm,
};
int expected_size;
int i;
u8 *energy;
__le32 *bytes, *air_time;
if (iwl_mvm_is_cdb_supported(mvm))
expected_size = sizeof(*stats);
@ -645,8 +667,11 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
else
expected_size = sizeof(struct iwl_notif_statistics_v10);
if (iwl_rx_packet_payload_len(pkt) != expected_size)
goto invalid;
if (iwl_rx_packet_payload_len(pkt) != expected_size) {
IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
iwl_rx_packet_payload_len(pkt));
return;
}
data.mac_id = stats->rx.general.mac_id;
data.beacon_filter_average_energy =
@ -662,38 +687,6 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
le64_to_cpu(stats->general.common.on_time_scan);
data.general = &stats->general;
if (iwl_mvm_has_new_rx_api(mvm)) {
int i;
u8 *energy;
__le32 *bytes, *air_time;
if (!iwl_mvm_is_cdb_supported(mvm)) {
struct iwl_notif_statistics_v11 *v11 =
(void *)&pkt->data;
energy = (void *)&v11->load_stats.avg_energy;
bytes = (void *)&v11->load_stats.byte_count;
air_time = (void *)&v11->load_stats.air_time;
} else {
energy = (void *)&stats->load_stats.avg_energy;
bytes = (void *)&stats->load_stats.byte_count;
air_time = (void *)&stats->load_stats.air_time;
}
rcu_read_lock();
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
struct iwl_mvm_sta *sta;
if (!energy[i])
continue;
sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
if (!sta)
continue;
sta->avg_energy = energy[i];
}
rcu_read_unlock();
}
iwl_mvm_rx_stats_check_trigger(mvm, pkt);
@ -701,10 +694,36 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_stat_iterator,
&data);
return;
invalid:
IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
iwl_rx_packet_payload_len(pkt));
if (!iwl_mvm_has_new_rx_api(mvm))
return;
if (!iwl_mvm_is_cdb_supported(mvm)) {
struct iwl_notif_statistics_v11 *v11 =
(void *)&pkt->data;
energy = (void *)&v11->load_stats.avg_energy;
bytes = (void *)&v11->load_stats.byte_count;
air_time = (void *)&v11->load_stats.air_time;
} else {
energy = (void *)&stats->load_stats.avg_energy;
bytes = (void *)&stats->load_stats.byte_count;
air_time = (void *)&stats->load_stats.air_time;
}
rcu_read_lock();
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
struct iwl_mvm_sta *sta;
if (!energy[i])
continue;
sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
if (!sta)
continue;
sta->avg_energy = energy[i];
}
rcu_read_unlock();
}
void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)

View File

@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -29,7 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -462,6 +462,7 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
int i;
u16 sn = 0, index = 0;
bool expired = false;
bool cont = false;
spin_lock(&buf->lock);
@ -473,12 +474,21 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
for (i = 0; i < buf->buf_size ; i++) {
index = (buf->head_sn + i) % buf->buf_size;
if (skb_queue_empty(&buf->entries[index]))
if (skb_queue_empty(&buf->entries[index])) {
/*
* If there is a hole and the next frame didn't expire
* we want to break and not advance SN
*/
cont = false;
continue;
if (!time_after(jiffies, buf->reorder_time[index] +
RX_REORDER_BUF_TIMEOUT_MQ))
}
if (!cont && !time_after(jiffies, buf->reorder_time[index] +
RX_REORDER_BUF_TIMEOUT_MQ))
break;
expired = true;
/* continue until next hole after this expired frames */
cont = true;
sn = ieee80211_sn_add(buf->head_sn, i + 1);
}
@ -626,9 +636,13 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
return false;
baid_data = rcu_dereference(mvm->baid_map[baid]);
if (WARN(!baid_data,
"Received baid %d, but no data exists for this BAID\n", baid))
if (!baid_data) {
WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN),
"Received baid %d, but no data exists for this BAID\n",
baid);
return false;
}
if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
"baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
@ -643,6 +657,14 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
spin_lock_bh(&buffer->lock);
if (!buffer->valid) {
if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) {
spin_unlock_bh(&buffer->lock);
return false;
}
buffer->valid = true;
}
if (ieee80211_is_back_req(hdr->frame_control)) {
iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
goto drop;
@ -727,7 +749,8 @@ drop:
return true;
}
static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
u32 reorder_data, u8 baid)
{
unsigned long now = jiffies;
unsigned long timeout;
@ -736,8 +759,10 @@ static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
rcu_read_lock();
data = rcu_dereference(mvm->baid_map[baid]);
if (WARN_ON(!data))
if (!data) {
WARN_ON(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN));
goto out;
}
if (!data->timeout)
goto out;
@ -831,7 +856,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) {
u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) {
if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
if (IS_ERR(sta))
sta = NULL;
@ -893,26 +918,39 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
kfree_skb(skb);
rcu_read_unlock();
return;
goto out;
}
/*
* Our hardware de-aggregates AMSDUs but copies the mac header
* as it to the de-aggregated MPDUs. We need to turn off the
* AMSDU bit in the QoS control ourselves.
* In addition, HW reverses addr3 and addr4 - reverse it back.
*/
if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
!WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
int i;
u8 *qc = ieee80211_get_qos_ctl(hdr);
u8 mac_addr[ETH_ALEN];
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
if (!(desc->amsdu_info &
IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
rx_status->flag |= RX_FLAG_AMSDU_MORE;
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] = hdr->addr3[ETH_ALEN - i - 1];
ether_addr_copy(hdr->addr3, mac_addr);
if (ieee80211_has_a4(hdr->frame_control)) {
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] =
hdr->addr4[ETH_ALEN - i - 1];
ether_addr_copy(hdr->addr4, mac_addr);
}
}
if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) {
u32 reorder_data = le32_to_cpu(desc->reorder_data);
iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
}
if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
iwl_mvm_agg_rx_received(mvm, baid);
}
/* Set up the HT phy flags */
@ -953,9 +991,17 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (rate_n_flags & RATE_MCS_BF_MSK)
rx_status->vht_flag |= RX_VHT_FLAG_BF;
} else {
rx_status->rate_idx =
iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
rx_status->band);
int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
rx_status->band);
if (WARN(rate < 0 || rate > 0xFF,
"Invalid rate flags 0x%x, band %d,\n",
rate_n_flags, rx_status->band)) {
kfree_skb(skb);
goto out;
}
rx_status->rate_idx = rate;
}
/* management stuff on default queue */
@ -974,6 +1020,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
out:
rcu_read_unlock();
}

View File

@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -966,11 +966,11 @@ static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
channels[j] = band->channels[i].hw_value;
}
static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
u32 flags, u8 channel_flags)
static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
u32 flags, u8 channel_flags)
{
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
struct iwl_scan_config *cfg = config;
struct iwl_scan_config_v1 *cfg = config;
cfg->flags = cpu_to_le32(flags);
cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
@ -989,11 +989,11 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
iwl_mvm_fill_channels(mvm, cfg->channel_array);
}
static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config,
u32 flags, u8 channel_flags)
static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
u32 flags, u8 channel_flags)
{
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
struct iwl_scan_config_cdb *cfg = config;
struct iwl_scan_config *cfg = config;
cfg->flags = cpu_to_le32(flags);
cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
@ -1001,10 +1001,14 @@ static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config,
cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
cfg->out_of_channel_time[0] =
cpu_to_le32(scan_timing[type].max_out_time);
cfg->out_of_channel_time[1] =
cpu_to_le32(scan_timing[type].max_out_time);
cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time);
cfg->suspend_time[1] = cpu_to_le32(scan_timing[type].suspend_time);
if (iwl_mvm_is_cdb_supported(mvm)) {
cfg->suspend_time[1] =
cpu_to_le32(scan_timing[type].suspend_time);
cfg->out_of_channel_time[1] =
cpu_to_le32(scan_timing[type].max_out_time);
}
iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]);
@ -1033,16 +1037,13 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
return -ENOBUFS;
if (type == mvm->scan_type) {
IWL_DEBUG_SCAN(mvm,
"Ignoring UMAC scan config of the same type\n");
if (type == mvm->scan_type)
return 0;
}
if (iwl_mvm_is_cdb_supported(mvm))
cmd_size = sizeof(struct iwl_scan_config_cdb);
else
if (iwl_mvm_has_new_tx_api(mvm))
cmd_size = sizeof(struct iwl_scan_config);
else
cmd_size = sizeof(struct iwl_scan_config_v1);
cmd_size += mvm->fw->ucode_capa.n_scan_channels;
cfg = kzalloc(cmd_size, GFP_KERNEL);
@ -1068,13 +1069,13 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
IWL_CHANNEL_FLAG_EBS_ADD |
IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
if (iwl_mvm_is_cdb_supported(mvm)) {
if (iwl_mvm_has_new_tx_api(mvm)) {
flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ?
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
iwl_mvm_fill_scan_config_cdb(mvm, cfg, flags, channel_flags);
} else {
iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
} else {
iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags);
}
cmd.data[0] = cfg;
@ -1119,16 +1120,20 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
}
cmd->fragmented_dwell = timing->dwell_fragmented;
if (iwl_mvm_is_cdb_supported(mvm)) {
cmd->cdb.max_out_time[0] = cpu_to_le32(timing->max_out_time);
cmd->cdb.suspend_time[0] = cpu_to_le32(timing->suspend_time);
cmd->cdb.max_out_time[1] = cpu_to_le32(timing->max_out_time);
cmd->cdb.suspend_time[1] = cpu_to_le32(timing->suspend_time);
cmd->cdb.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
if (iwl_mvm_has_new_tx_api(mvm)) {
cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time);
cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time);
if (iwl_mvm_is_cdb_supported(mvm)) {
cmd->v6.max_out_time[1] =
cpu_to_le32(timing->max_out_time);
cmd->v6.suspend_time[1] =
cpu_to_le32(timing->suspend_time);
}
} else {
cmd->no_cdb.max_out_time = cpu_to_le32(timing->max_out_time);
cmd->no_cdb.suspend_time = cpu_to_le32(timing->suspend_time);
cmd->no_cdb.scan_priority =
cmd->v1.max_out_time = cpu_to_le32(timing->max_out_time);
cmd->v1.suspend_time = cpu_to_le32(timing->suspend_time);
cmd->v1.scan_priority =
cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
}
@ -1207,8 +1212,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int type)
{
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
void *cmd_data = iwl_mvm_is_cdb_supported(mvm) ?
(void *)&cmd->cdb.data : (void *)&cmd->no_cdb.data;
void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ?
(void *)&cmd->v6.data : (void *)&cmd->v1.data;
struct iwl_scan_req_umac_tail *sec_part = cmd_data +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
@ -1245,12 +1250,12 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
if (iwl_mvm_is_cdb_supported(mvm)) {
cmd->cdb.channel_flags = channel_flags;
cmd->cdb.n_channels = params->n_channels;
if (iwl_mvm_has_new_tx_api(mvm)) {
cmd->v6.channel_flags = channel_flags;
cmd->v6.n_channels = params->n_channels;
} else {
cmd->no_cdb.channel_flags = channel_flags;
cmd->no_cdb.n_channels = params->n_channels;
cmd->v1.channel_flags = channel_flags;
cmd->v1.n_channels = params->n_channels;
}
iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
@ -1692,10 +1697,10 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
int base_size = IWL_SCAN_REQ_UMAC_SIZE;
int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
if (iwl_mvm_is_cdb_supported(mvm))
base_size = IWL_SCAN_REQ_UMAC_SIZE_CDB;
if (iwl_mvm_has_new_tx_api(mvm))
base_size = IWL_SCAN_REQ_UMAC_SIZE;
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
return base_size +

View File

@ -235,7 +235,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
break;
case SF_FULL_ON:
if (sta_id == IWL_MVM_STATION_COUNT) {
if (sta_id == IWL_MVM_INVALID_STA) {
IWL_ERR(mvm,
"No station: Cannot switch SF to FULL_ON\n");
return -EINVAL;
@ -276,12 +276,12 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
bool remove_vif)
{
enum iwl_sf_state new_state;
u8 sta_id = IWL_MVM_STATION_COUNT;
u8 sta_id = IWL_MVM_INVALID_STA;
struct iwl_mvm_vif *mvmvif = NULL;
struct iwl_mvm_active_iface_iterator_data data = {
.ignore_vif = changed_vif,
.sta_vif_state = SF_UNINIT,
.sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT,
.sta_vif_ap_sta_id = IWL_MVM_INVALID_STA,
};
/*

View File

@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -98,7 +98,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
reserved_ids = BIT(0);
/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
if (BIT(sta_id) & reserved_ids)
continue;
@ -106,7 +106,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
lockdep_is_held(&mvm->mutex)))
return sta_id;
}
return IWL_MVM_STATION_COUNT;
return IWL_MVM_INVALID_STA;
}
/* send station add/update command to firmware */
@ -127,11 +127,17 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
u32 agg_size = 0, mpdu_dens = 0;
if (!update || (flags & STA_MODIFY_QUEUES)) {
add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
if (flags & STA_MODIFY_QUEUES)
add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
if (!iwl_mvm_has_new_tx_api(mvm)) {
add_sta_cmd.tfd_queue_msk =
cpu_to_le32(mvm_sta->tfd_queue_msk);
if (flags & STA_MODIFY_QUEUES)
add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
} else {
WARN_ON(flags & STA_MODIFY_QUEUES);
}
}
switch (sta->bandwidth) {
@ -209,13 +215,15 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BK);
add_sta_cmd.uapsd_acs |= BIT(AC_BK);
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BE);
add_sta_cmd.uapsd_acs |= BIT(AC_BE);
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VI);
add_sta_cmd.uapsd_acs |= BIT(AC_VI);
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VO);
add_sta_cmd.uapsd_acs |= BIT(AC_VO);
add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
}
status = ADD_STA_SUCCESS;
@ -337,6 +345,9 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
u8 sta_id;
int ret;
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
spin_unlock_bh(&mvm->queue_info_lock);
@ -387,6 +398,9 @@ static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
lockdep_assert_held(&mvm->mutex);
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
@ -426,6 +440,9 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
lockdep_assert_held(&mvm->mutex);
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
@ -468,6 +485,9 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
lockdep_assert_held(&mvm->mutex);
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
sta_id = mvm->queue_info[queue].ra_sta_id;
@ -512,6 +532,8 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
int i;
lockdep_assert_held(&mvm->queue_info_lock);
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
@ -596,6 +618,9 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
unsigned long mq;
int ret;
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
/*
* If the AC is lower than current one - FIFO needs to be redirected to
* the lowest one of the streams in the queue. Check if this is needed
@ -677,6 +702,41 @@ out:
return ret;
}
static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u8 ac,
int tid)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
u8 mac_queue = mvmsta->vif->hw_queue[ac];
int queue = -1;
lockdep_assert_held(&mvm->mutex);
IWL_DEBUG_TX_QUEUES(mvm,
"Allocating queue for sta %d on tid %d\n",
mvmsta->sta_id, tid);
queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
wdg_timeout);
if (queue < 0)
return queue;
IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
spin_lock_bh(&mvmsta->lock);
mvmsta->tid_data[tid].txq_id = queue;
mvmsta->tid_data[tid].is_tid_active = true;
mvmsta->tfd_queue_msk |= BIT(queue);
spin_unlock_bh(&mvmsta->lock);
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
return 0;
}
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u8 ac, int tid,
struct ieee80211_hdr *hdr)
@ -702,6 +762,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
if (iwl_mvm_has_new_tx_api(mvm))
return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
spin_lock_bh(&mvmsta->lock);
tfd_queue_mask = mvmsta->tfd_queue_msk;
spin_unlock_bh(&mvmsta->lock);
@ -880,6 +943,9 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
lockdep_assert_held(&mvm->mutex);
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return;
spin_lock_bh(&mvm->queue_info_lock);
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
spin_unlock_bh(&mvm->queue_info_lock);
@ -917,6 +983,10 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
int ssn;
int ret = true;
/* queue sharing is disabled on new TX path */
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return;
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->queue_info_lock);
@ -1199,18 +1269,30 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
ac = tid_to_mac80211_ac[i];
mac_queue = mvm_sta->vif->hw_queue[ac];
cfg.tid = i;
cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
if (iwl_mvm_has_new_tx_api(mvm)) {
IWL_DEBUG_TX_QUEUES(mvm,
"Re-mapping sta %d tid %d\n",
mvm_sta->sta_id, i);
txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
mvm_sta->sta_id,
i, wdg_timeout);
tid_data->txq_id = txq_id;
} else {
u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
IWL_DEBUG_TX_QUEUES(mvm,
"Re-mapping sta %d tid %d to queue %d\n",
mvm_sta->sta_id, i, txq_id);
cfg.tid = i;
cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
txq_id ==
IWL_MVM_DQA_BSS_CLIENT_QUEUE);
iwl_mvm_enable_txq(mvm, txq_id, mac_queue,
IEEE80211_SEQ_TO_SN(tid_data->seq_number),
&cfg, wdg_timeout);
IWL_DEBUG_TX_QUEUES(mvm,
"Re-mapping sta %d tid %d to queue %d\n",
mvm_sta->sta_id, i, txq_id);
iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
wdg_timeout);
}
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
}
@ -1235,7 +1317,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
else
sta_id = mvm_sta->sta_id;
if (sta_id == IWL_MVM_STATION_COUNT)
if (sta_id == IWL_MVM_INVALID_STA)
return -ENOSPC;
spin_lock_init(&mvm_sta->lock);
@ -1317,10 +1399,10 @@ update_fw:
if (vif->type == NL80211_IFTYPE_STATION) {
if (!sta->tdls) {
WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
mvmvif->ap_sta_id = sta_id;
} else {
WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
}
}
@ -1571,11 +1653,11 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
return ret;
/* unassoc - go ahead - remove the AP STA now */
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
/* clear d0i3_ap_sta_id if no longer relevant */
if (mvm->d0i3_ap_sta_id == sta_id)
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
}
}
@ -1584,7 +1666,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
* before the STA is removed.
*/
if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
cancel_delayed_work(&mvm->tdls_cs.dwork);
}
@ -1641,7 +1723,7 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
{
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
return -ENOSPC;
}
@ -1652,12 +1734,11 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
return 0;
}
static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
struct iwl_mvm_int_sta *sta)
void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
{
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
sta->sta_id = IWL_MVM_STATION_COUNT;
sta->sta_id = IWL_MVM_INVALID_STA;
}
static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
@ -1676,7 +1757,8 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
color));
cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
if (!iwl_mvm_has_new_tx_api(mvm))
cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
cmd.tid_disable_tx = cpu_to_le16(0xffff);
if (addr)
@ -1701,27 +1783,19 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
return ret;
}
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
{
unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
mvm->cfg->base_params->wd_timeout :
IWL_WATCHDOG_DISABLED;
int ret;
lockdep_assert_held(&mvm->mutex);
/* Map Aux queue to fifo - needs to happen before adding Aux station */
if (!iwl_mvm_is_dqa_supported(mvm))
iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
/* Allocate aux station and assign to it the aux queue */
ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
NL80211_IFTYPE_UNSPECIFIED);
if (ret)
return ret;
if (iwl_mvm_is_dqa_supported(mvm)) {
if (iwl_mvm_has_new_tx_api(mvm)) {
int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue,
mvm->aux_sta.sta_id,
IWL_MAX_TID_COUNT,
wdg_timeout);
mvm->aux_queue = queue;
} else if (iwl_mvm_is_dqa_supported(mvm)) {
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_MCAST,
.sta_id = mvm->aux_sta.sta_id,
@ -1732,14 +1806,43 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
wdg_timeout);
} else {
iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
}
}
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
{
int ret;
lockdep_assert_held(&mvm->mutex);
/* Allocate aux station and assign to it the aux queue */
ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
NL80211_IFTYPE_UNSPECIFIED);
if (ret)
return ret;
/* Map Aux queue to fifo - needs to happen before adding Aux station */
if (!iwl_mvm_has_new_tx_api(mvm))
iwl_mvm_enable_aux_queue(mvm);
ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
MAC_INDEX_AUX, 0);
if (ret)
if (ret) {
iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
return ret;
return ret;
}
/*
* For a000 firmware and on we cannot add queue to a station unknown
* to firmware so enable queue here - after the station was added
*/
if (iwl_mvm_has_new_tx_api(mvm))
iwl_mvm_enable_aux_queue(mvm);
return 0;
}
int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@ -1790,39 +1893,39 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
const u8 *baddr = _baddr;
int queue = 0;
int ret;
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_VO,
.sta_id = mvmvif->bcast_sta.sta_id,
.tid = IWL_MAX_TID_COUNT,
.aggregate = false,
.frame_limit = IWL_FRAME_LIMIT,
};
lockdep_assert_held(&mvm->mutex);
if (iwl_mvm_is_dqa_supported(mvm)) {
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_VO,
.sta_id = mvmvif->bcast_sta.sta_id,
.tid = IWL_MAX_TID_COUNT,
.aggregate = false,
.frame_limit = IWL_FRAME_LIMIT,
};
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
int queue;
if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC)
queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
queue = mvm->probe_queue;
else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
queue = mvm->p2p_dev_queue;
else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
return -EINVAL;
iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
wdg_timeout);
bsta->tfd_queue_msk |= BIT(queue);
iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
&cfg, wdg_timeout);
}
if (vif->type == NL80211_IFTYPE_ADHOC)
baddr = vif->bss_conf.bssid;
if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
return -ENOSPC;
ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
@ -1831,27 +1934,20 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ret;
/*
* In AP vif type, we also need to enable the cab_queue. However, we
* have to enable it after the ADD_STA command is sent, otherwise the
* FW will throw an assert once we send the ADD_STA command (it'll
* detect a mismatch in the tfd_queue_msk, as we can't add the
* enabled-cab_queue to the mask)
* For a000 firmware and on we cannot add queue to a station unknown
* to firmware so enable queue here - after the station was added
*/
if (iwl_mvm_is_dqa_supported(mvm) &&
(vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC)) {
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_MCAST,
.sta_id = mvmvif->bcast_sta.sta_id,
.tid = IWL_MAX_TID_COUNT,
.aggregate = false,
.frame_limit = IWL_FRAME_LIMIT,
};
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
if (iwl_mvm_has_new_tx_api(mvm)) {
int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
bsta->sta_id,
IWL_MAX_TID_COUNT,
wdg_timeout);
if (vif->type == NL80211_IFTYPE_AP)
mvm->probe_queue = queue;
else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
mvm->p2p_dev_queue = queue;
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue,
0, &cfg, wdg_timeout);
bsta->tfd_queue_msk |= BIT(queue);
}
return 0;
@ -1869,24 +1965,18 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MAX_TID_COUNT, 0);
if (mvmvif->bcast_sta.tfd_queue_msk &
BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)) {
iwl_mvm_disable_txq(mvm,
IWL_MVM_DQA_AP_PROBE_RESP_QUEUE,
if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->probe_queue)) {
iwl_mvm_disable_txq(mvm, mvm->probe_queue,
vif->hw_queue[0], IWL_MAX_TID_COUNT,
0);
mvmvif->bcast_sta.tfd_queue_msk &=
~BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->probe_queue);
}
if (mvmvif->bcast_sta.tfd_queue_msk &
BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)) {
iwl_mvm_disable_txq(mvm,
IWL_MVM_DQA_P2P_DEVICE_QUEUE,
if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->p2p_dev_queue)) {
iwl_mvm_disable_txq(mvm, mvm->p2p_dev_queue,
vif->hw_queue[0], IWL_MAX_TID_COUNT,
0);
mvmvif->bcast_sta.tfd_queue_msk &=
~BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->p2p_dev_queue);
}
}
@ -1982,6 +2072,88 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ret;
}
/*
* Allocate a new station entry for the multicast station to the given vif,
* and send it to the FW.
* Note that each AP/GO mac should have its own multicast station.
*
* @mvm: the mvm component
* @vif: the interface to which the multicast station is added
*/
int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
const u8 *maddr = _maddr;
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_MCAST,
.sta_id = msta->sta_id,
.tid = IWL_MAX_TID_COUNT,
.aggregate = false,
.frame_limit = IWL_FRAME_LIMIT,
};
unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
int ret;
lockdep_assert_held(&mvm->mutex);
if (!iwl_mvm_is_dqa_supported(mvm))
return 0;
if (WARN_ON(vif->type != NL80211_IFTYPE_AP))
return -ENOTSUPP;
ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
mvmvif->id, mvmvif->color);
if (ret) {
iwl_mvm_dealloc_int_sta(mvm, msta);
return ret;
}
/*
* Enable cab queue after the ADD_STA command is sent.
* This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
* command with unknown station id.
*/
if (iwl_mvm_has_new_tx_api(mvm)) {
int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
msta->sta_id,
IWL_MAX_TID_COUNT,
timeout);
vif->cab_queue = queue;
} else {
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
&cfg, timeout);
}
return 0;
}
/*
* Send the FW a request to remove the station from it's internal data
* structures, and in addition remove it from the local data structure.
*/
int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
lockdep_assert_held(&mvm->mutex);
if (!iwl_mvm_is_dqa_supported(mvm))
return 0;
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MAX_TID_COUNT, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
return ret;
}
#define IWL_MAX_RX_BA_SESSIONS 16
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
@ -2059,6 +2231,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
reorder_buf->mvm = mvm;
reorder_buf->queue = i;
reorder_buf->sta_id = sta_id;
reorder_buf->valid = false;
for (j = 0; j < reorder_buf->buf_size; j++)
__skb_queue_head_init(&reorder_buf->entries[j]);
}
@ -2226,7 +2399,9 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
cmd.sta_id = mvm_sta->sta_id;
cmd.add_modify = STA_MODE_MODIFY;
cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
if (!iwl_mvm_has_new_tx_api(mvm))
cmd.modify_mask = STA_MODIFY_QUEUES;
cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
@ -2426,6 +2601,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* changed from current (become smaller)
*/
if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
/*
* On new TX API rs and BA manager are offloaded.
* For now though, just don't support being reconfigured
*/
if (iwl_mvm_has_new_tx_api(mvm))
return -ENOTSUPP;
/*
* If reconfiguring an existing queue, it first must be
* drained
@ -2675,7 +2857,7 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
* station ID, then use AP's station ID.
*/
if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
u8 sta_id = mvmvif->ap_sta_id;
sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
@ -2697,68 +2879,97 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta,
struct ieee80211_key_conf *keyconf, bool mcast,
struct ieee80211_key_conf *key, bool mcast,
u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
u8 key_offset)
{
struct iwl_mvm_add_sta_key_cmd cmd = {};
union {
struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
struct iwl_mvm_add_sta_key_cmd cmd;
} u = {};
__le16 key_flags;
int ret;
u32 status;
u16 keyidx;
int i;
u8 sta_id = mvm_sta->sta_id;
u64 pn = 0;
int i, size;
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK;
key_flags = cpu_to_le16(keyidx);
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
switch (keyconf->cipher) {
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
cmd.tkip_rx_tsc_byte2 = tkip_iv32;
for (i = 0; i < 5; i++)
cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
memcpy(cmd.key, keyconf->key, keyconf->keylen);
if (new_api) {
memcpy((void *)&u.cmd.tx_mic_key,
&key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
IWL_MIC_KEY_SIZE);
memcpy((void *)&u.cmd.rx_mic_key,
&key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
IWL_MIC_KEY_SIZE);
pn = atomic64_read(&key->tx_pn);
} else {
u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
for (i = 0; i < 5; i++)
u.cmd_v1.tkip_rx_ttak[i] =
cpu_to_le16(tkip_p1k[i]);
}
memcpy(u.cmd.common.key, key->key, key->keylen);
break;
case WLAN_CIPHER_SUITE_CCMP:
key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
memcpy(cmd.key, keyconf->key, keyconf->keylen);
memcpy(u.cmd.common.key, key->key, key->keylen);
if (new_api)
pn = atomic64_read(&key->tx_pn);
break;
case WLAN_CIPHER_SUITE_WEP104:
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
/* fall through */
case WLAN_CIPHER_SUITE_WEP40:
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
memcpy(u.cmd.common.key + 3, key->key, key->keylen);
break;
case WLAN_CIPHER_SUITE_GCMP_256:
key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
/* fall through */
case WLAN_CIPHER_SUITE_GCMP:
key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
memcpy(cmd.key, keyconf->key, keyconf->keylen);
memcpy(u.cmd.common.key, key->key, key->keylen);
if (new_api)
pn = atomic64_read(&key->tx_pn);
break;
default:
key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
memcpy(cmd.key, keyconf->key, keyconf->keylen);
memcpy(u.cmd.common.key, key->key, key->keylen);
}
if (mcast)
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
cmd.key_offset = key_offset;
cmd.key_flags = key_flags;
cmd.sta_id = sta_id;
u.cmd.common.key_offset = key_offset;
u.cmd.common.key_flags = key_flags;
u.cmd.common.sta_id = mvm_sta->sta_id;
if (new_api) {
u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
size = sizeof(u.cmd);
} else {
size = sizeof(u.cmd_v1);
}
status = ADD_STA_SUCCESS;
if (cmd_flags & CMD_ASYNC)
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
sizeof(cmd), &cmd);
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
&u.cmd);
else
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
&cmd, &status);
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
&u.cmd, &status);
switch (status) {
case ADD_STA_SUCCESS:
@ -2858,7 +3069,7 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
return sta->addr;
if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
u8 sta_id = mvmvif->ap_sta_id;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
@ -2911,9 +3122,14 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
struct ieee80211_key_conf *keyconf,
bool mcast)
{
struct iwl_mvm_add_sta_key_cmd cmd = {};
union {
struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
struct iwl_mvm_add_sta_key_cmd cmd;
} u = {};
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
__le16 key_flags;
int ret;
int ret, size;
u32 status;
key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
@ -2924,13 +3140,19 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
if (mcast)
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
cmd.key_flags = key_flags;
cmd.key_offset = keyconf->hw_key_idx;
cmd.sta_id = sta_id;
/*
* The fields assigned here are in the same location at the start
* of the command, so we can do this union trick.
*/
u.cmd.common.key_flags = key_flags;
u.cmd.common.key_offset = keyconf->hw_key_idx;
u.cmd.common.sta_id = sta_id;
size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
status = ADD_STA_SUCCESS;
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
&cmd, &status);
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
&status);
switch (status) {
case ADD_STA_SUCCESS:
@ -3044,7 +3266,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
{
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
struct iwl_mvm_sta *mvm_sta;
u8 sta_id = IWL_MVM_STATION_COUNT;
u8 sta_id = IWL_MVM_INVALID_STA;
int ret, i;
lockdep_assert_held(&mvm->mutex);
@ -3301,7 +3523,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
/* Block/unblock all the stations of the given mvmvif */
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))

View File

@ -532,10 +532,13 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
struct iwl_mvm_int_sta *sta,
u32 qmask, enum nl80211_iftype iftype);
void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta);
int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm);

View File

@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -31,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -78,7 +80,7 @@ void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (!sta || IS_ERR(sta) || !sta->tdls)
@ -101,7 +103,7 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (!sta || IS_ERR(sta) || !sta->tdls)
@ -145,7 +147,7 @@ static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/* populate TDLS peer data */
cnt = 0;
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta) || !sta->tdls)
@ -251,7 +253,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
if (state == IWL_MVM_TDLS_SW_IDLE)
mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
}
void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
@ -305,7 +307,7 @@ iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
/* get the existing peer if it's there */
if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
struct ieee80211_sta *sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
lockdep_is_held(&mvm->mutex));
@ -523,7 +525,7 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
/* station might be gone, in that case do nothing */
if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT)
if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
goto out;
sta = rcu_dereference_protected(
@ -573,7 +575,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
sta->addr, chandef->chan->center_freq, chandef->width);
/* we only support a single peer for channel switching */
if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) {
if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
IWL_DEBUG_TDLS(mvm,
"Existing peer. Can't start switch with %pM\n",
sta->addr);
@ -633,7 +635,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
/* we only support a single peer for channel switching */
if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) {
if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
goto out;
}
@ -654,7 +656,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
wait_for_phy = true;
mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
dev_kfree_skb(mvm->tdls_cs.peer.skb);
mvm->tdls_cs.peer.skb = NULL;
@ -697,7 +699,7 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
params->status != 0 &&
mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
struct ieee80211_sta *cur_sta;
/* make sure it's the same peer */

View File

@ -80,7 +80,7 @@ void iwl_mvm_tof_init(struct iwl_mvm *mvm)
if (IWL_MVM_TOF_IS_RESPONDER) {
tof_data->responder_cfg.sub_grp_cmd_id =
cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT;
tof_data->responder_cfg.sta_id = IWL_MVM_INVALID_STA;
}
#endif

View File

@ -356,7 +356,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
struct iwl_mvm_sta *mvmsta;
int i, err;
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i);
if (!mvmsta)
continue;

View File

@ -475,6 +475,39 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
memset(dev_cmd, 0, sizeof(*dev_cmd));
dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) {
struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
u16 offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info);
/* padding is inserted later in transport */
/* FIXME - check for AMSDU may need to be removed */
if (ieee80211_hdrlen(hdr->frame_control) % 4 &&
!(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
offload_assist |= BIT(TX_CMD_OFFLD_PAD);
cmd->offload_assist |= cpu_to_le16(offload_assist);
/* Total # bytes to be transmitted */
cmd->len = cpu_to_le16((u16)skb->len);
/* Copy MAC header from skb into command buffer */
memcpy(cmd->hdr, hdr, hdrlen);
if (!info->control.hw_key)
cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS);
/* For data packets rate info comes from the fw */
if (ieee80211_is_data(hdr->frame_control) && sta)
goto out;
cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE);
cmd->rate_n_flags =
cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
goto out;
}
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
if (info->control.hw_key)
@ -484,6 +517,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
out:
return dev_cmd;
}
@ -514,21 +551,21 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
*/
if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
ieee80211_is_deauth(fc))
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
return mvm->probe_queue;
if (info->hw_queue == info->control.vif->cab_queue)
return info->hw_queue;
WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
"fc=0x%02x", le16_to_cpu(fc));
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
return mvm->probe_queue;
case NL80211_IFTYPE_P2P_DEVICE:
if (ieee80211_is_mgmt(fc))
return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
return mvm->p2p_dev_queue;
if (info->hw_queue == info->control.vif->cab_queue)
return info->hw_queue;
WARN_ON_ONCE(1);
return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
return mvm->p2p_dev_queue;
default:
WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
return -1;
@ -541,7 +578,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_info info;
struct iwl_device_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
int queue;
@ -598,7 +634,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
is_multicast_ether_addr(hdr->addr1)) {
u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
if (ap_sta_id != IWL_MVM_STATION_COUNT)
if (ap_sta_id != IWL_MVM_INVALID_STA)
sta_id = ap_sta_id;
} else if (iwl_mvm_is_dqa_supported(mvm) &&
info.control.vif->type == NL80211_IFTYPE_STATION &&
@ -616,11 +652,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
/* From now on, we cannot access info->control */
iwl_mvm_skb_prepare_status(skb, dev_cmd);
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
return -1;
@ -713,7 +744,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
* fifo to be able to send bursts.
*/
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
mvm->shared_mem_cfg.txfifo_size[txf] - 256);
mvm->smem_cfg.lmac[0].txfifo_size[txf] - 256);
if (unlikely(dbg_max_amsdu_len))
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
@ -862,6 +893,9 @@ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
unsigned long now = jiffies;
int tid;
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return false;
for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
IWL_MVM_DQA_QUEUE_TIMEOUT, now))
@ -881,7 +915,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_mvm_sta *mvmsta;
struct iwl_device_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
__le16 fc;
u16 seq_number = 0;
u8 tid = IWL_MAX_TID_COUNT;
@ -896,7 +929,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(!mvmsta))
return -1;
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
return -1;
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
@ -904,8 +937,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (!dev_cmd)
goto drop;
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
/*
* we handle that entirely ourselves -- for uAPSD the firmware
* will always send a notification, and for PS-Poll responses
@ -926,18 +957,27 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
goto drop_unlock_sta;
seq_number = mvmsta->tid_data[tid].seq_number;
seq_number &= IEEE80211_SCTL_SEQ;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(seq_number);
is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
if (WARN_ON_ONCE(is_ampdu &&
mvmsta->tid_data[tid].state != IWL_AGG_ON))
goto drop_unlock_sta;
seq_number = mvmsta->tid_data[tid].seq_number;
seq_number &= IEEE80211_SCTL_SEQ;
if (!iwl_mvm_has_new_tx_api(mvm)) {
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(seq_number);
/* update the tx_cmd hdr as it was already copied */
tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
}
}
if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu)
txq_id = mvmsta->tid_data[tid].txq_id;
if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
/* default to TID 0 for non-QoS packets */
u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
@ -945,9 +985,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
}
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
/* Check if TXQ needs to be allocated or re-activated */
@ -1036,7 +1073,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(!mvmsta))
return -1;
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
return -1;
memcpy(&info, skb->cb, sizeof(info));
@ -1245,6 +1282,26 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
}
}
/**
* iwl_mvm_get_scd_ssn - returns the SSN of the SCD
* @tx_resp: the Tx response from the fw (agg or non-agg)
*
* When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
* it can't know that everything will go well until the end of the AMPDU, it
* can't know in advance the number of MPDUs that will be sent in the current
* batch. This is why it writes the agg Tx response while it fetches the MPDUs.
* Hence, it can't know in advance what the SSN of the SCD will be at the end
* of the batch. This is why the SSN of the SCD is written at the end of the
* whole struct at a variable offset. This function knows how to cope with the
* variable offset and returns the SSN of the SCD.
*/
static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *tx_resp)
{
return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
tx_resp->frame_count) & 0xfff;
}
static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
@ -1254,8 +1311,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
u32 status = le16_to_cpu(tx_resp->status.status);
u16 ssn = iwl_mvm_get_scd_ssn(tx_resp);
struct agg_tx_status *agg_status =
iwl_mvm_get_agg_status(mvm, tx_resp);
u32 status = le16_to_cpu(agg_status->status);
u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
struct iwl_mvm_sta *mvmsta;
struct sk_buff_head skbs;
u8 skb_freed = 0;
@ -1264,6 +1323,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
__skb_queue_head_init(&skbs);
if (iwl_mvm_has_new_tx_api(mvm))
txq_id = le16_to_cpu(tx_resp->v6.tx_queue);
seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
/* we can free until ssn % q.n_bd not inclusive */
@ -1388,7 +1450,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
if (!IS_ERR(sta)) {
mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (tid != IWL_TID_NON_QOS) {
if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) {
struct iwl_mvm_tid_data *tid_data =
&mvmsta->tid_data[tid];
bool send_eosp_ndp = false;
@ -1520,7 +1582,8 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
struct agg_tx_status *frame_status = &tx_resp->status;
struct agg_tx_status *frame_status =
iwl_mvm_get_agg_status(mvm, tx_resp);
int i;
for (i = 0; i < tx_resp->frame_count; i++) {
@ -1722,6 +1785,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
ba_info.status.status_driver_data[0] =
(void *)(uintptr_t)ba_res->reduced_txp;
if (!le16_to_cpu(ba_res->tfd_cnt))
goto out;
/*
* TODO:
* When supporting multi TID aggregations - we need to move
@ -1730,12 +1796,16 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
* This will go together with SN and AddBA offload and cannot
* be handled properly for now.
*/
WARN_ON(le16_to_cpu(ba_res->tfd_cnt) != 1);
iwl_mvm_tx_reclaim(mvm, sta_id, ba_res->ra_tid[0].tid,
(int)ba_res->tfd[0].q_num,
WARN_ON(le16_to_cpu(ba_res->ra_tid_cnt) != 1);
tid = ba_res->ra_tid[0].tid;
if (tid == IWL_MGMT_TID)
tid = IWL_MAX_TID_COUNT;
iwl_mvm_tx_reclaim(mvm, sta_id, tid,
(int)(le16_to_cpu(ba_res->tfd[0].q_num)),
le16_to_cpu(ba_res->tfd[0].tfd_index),
&ba_info, le32_to_cpu(ba_res->tx_rate));
out:
IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
sta_id, le32_to_cpu(ba_res->flags),

View File

@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -34,6 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -597,6 +598,9 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
return i;
if (iwl_mvm_has_new_tx_api(mvm))
return -ENOSPC;
/*
* If no free queue found - settle for an inactive one to reconfigure
* Make sure that the inactive queue either already belongs to this STA,
@ -627,6 +631,9 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
};
int ret;
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
"Trying to reconfig unallocated queue %d\n", queue)) {
@ -644,20 +651,19 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
return ret;
}
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout)
static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
int mac80211_queue, u8 sta_id, u8 tid)
{
bool enable_queue = true;
spin_lock_bh(&mvm->queue_info_lock);
/* Make sure this TID isn't already enabled */
if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) {
if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
queue, cfg->tid);
return;
queue, tid);
return false;
}
/* Update mappings and refcounts */
@ -666,17 +672,17 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue);
mvm->queue_info[queue].hw_queue_refcount++;
mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
mvm->queue_info[queue].ra_sta_id = cfg->sta_id;
mvm->queue_info[queue].tid_bitmap |= BIT(tid);
mvm->queue_info[queue].ra_sta_id = sta_id;
if (enable_queue) {
if (cfg->tid != IWL_MAX_TID_COUNT)
if (tid != IWL_MAX_TID_COUNT)
mvm->queue_info[queue].mac80211_ac =
tid_to_mac80211_ac[cfg->tid];
tid_to_mac80211_ac[tid];
else
mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
mvm->queue_info[queue].txq_tid = cfg->tid;
mvm->queue_info[queue].txq_tid = tid;
}
IWL_DEBUG_TX_QUEUES(mvm,
@ -686,8 +692,49 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
spin_unlock_bh(&mvm->queue_info_lock);
return enable_queue;
}
int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
u8 sta_id, u8 tid, unsigned int timeout)
{
struct iwl_tx_queue_cfg_cmd cmd = {
.flags = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
.sta_id = sta_id,
.tid = tid,
};
int queue;
if (cmd.tid == IWL_MAX_TID_COUNT)
cmd.tid = IWL_MGMT_TID;
queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd,
SCD_QUEUE_CFG, timeout);
if (queue < 0) {
IWL_DEBUG_TX_QUEUES(mvm,
"Failed allocating TXQ for sta %d tid %d, ret: %d\n",
sta_id, tid, queue);
return queue;
}
IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
queue, sta_id, tid);
iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, sta_id, tid);
return queue;
}
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout)
{
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return;
/* Send the enabling command if we need to */
if (enable_queue) {
if (iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
cfg->sta_id, cfg->tid)) {
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.action = SCD_CFG_ENABLE_QUEUE,
@ -701,7 +748,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
wdg_timeout);
WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
sizeof(struct iwl_scd_txq_cfg_cmd),
&cmd),
"Failed to configure queue %d on FIFO %d\n", queue,
cfg->fifo);
@ -716,7 +764,6 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
.action = SCD_CFG_DISABLE_QUEUE,
};
bool remove_mac_queue = true;
int ret;
spin_lock_bh(&mvm->queue_info_lock);
@ -787,14 +834,23 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
spin_unlock_bh(&mvm->queue_info_lock);
iwl_trans_txq_disable(mvm->trans, queue, false);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
queue, ret);
if (iwl_mvm_has_new_tx_api(mvm)) {
iwl_trans_txq_free(mvm->trans, queue);
} else {
int ret;
return ret;
iwl_trans_txq_disable(mvm->trans, queue, false);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
sizeof(struct iwl_scd_txq_cfg_cmd),
&cmd);
if (ret)
IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
queue, ret);
return ret;
}
return 0;
}
/**
@ -816,7 +872,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
.data = { lq, },
};
if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT))
if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA))
return -EINVAL;
return iwl_mvm_send_cmd(mvm, &cmd);
@ -1088,6 +1144,9 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
lockdep_assert_held(&mvmsta->lock);
lockdep_assert_held(&mvm->queue_info_lock);
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return;
/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
/* If some TFDs are still queued - don't mark TID as inactive */
@ -1154,6 +1213,9 @@ void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
unsigned long now = jiffies;
int i;
if (iwl_mvm_has_new_tx_api(mvm))
return;
spin_lock_bh(&mvm->queue_info_lock);
for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
if (mvm->queue_info[i].hw_queue_refcount > 0)

Some files were not shown because too many files have changed in this diff Show More