wireless-drivers-next patches for 4.10

Major changes:
 
 rsi
 
 * filter rx frames
 * configure tx power
 * make it possible to select antenna
 * support 802.11d
 
 brcmfmac
 
 * cleanup of scheduled scan code
 * support for bcm43341 chipset with different chip id
 * support rev6 of PCIe device interface
 
 ath10k
 
 * add spectral scan support for QCA6174 and QCA9377 families
 * show used tx bitrate with 10.4 firmware
 
 wil6210
 
 * add power save mode support
 * add abort scan functionality
 * add support settings retry limit for short frames
 
 bcma
 
 * add Dell Inspiron 3148
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.11 (GNU/Linux)
 
 iQEcBAABAgAGBQJYQGivAAoJEG4XJFUm622bqG0IAJtSGt4Fxv2jL7GPmPpEUtYK
 F6G1PCk9LxO44rOZ15E/CT1vPk6Bnwqp9brdngmXwl7jc+jGs4MQN7g6cD4UZgPm
 gxjx8cah2HPRVgEE7PeOILthRxwPA+9klycsvwtglkgQ1SpQVmLHDTLpeOAkRluY
 olJGINoGHTD6osud6p3oKK+VP891omJvu8TPqRjhrhLhbQTWAuTxl2Gsdye30yag
 CsdaEZb9wdUEBoS80EVRwvgBzqrdKU5kGDGbuzytcyrFrRHo4flti1KgxDg3nIpI
 jC4Liwg0yE/aYZlfMqi/960rt8AttCJBDt/vwqp0mOE4IwFsE9Yaio6xXUonAC8=
 =a6a/
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-for-davem-2016-12-01' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for 4.10

Major changes:

rsi

* filter rx frames
* configure tx power
* make it possible to select antenna
* support 802.11d

brcmfmac

* cleanup of scheduled scan code
* support for bcm43341 chipset with different chip id
* support rev6 of PCIe device interface

ath10k

* add spectral scan support for QCA6174 and QCA9377 families
* show used tx bitrate with 10.4 firmware

wil6210

* add power save mode support
* add abort scan functionality
* add support settings retry limit for short frames

bcma

* add Dell Inspiron 3148
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-12-02 13:58:10 -05:00
commit ab17cb1fea
65 changed files with 2562 additions and 790 deletions

View File

@ -295,6 +295,7 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0018) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_FOXCONN, 0xe092) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },

View File

@ -326,6 +326,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl",
[ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param",
[ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war",
[ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST] = "allows-mesh-bcast",
};
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@ -1536,7 +1537,7 @@ static void ath10k_core_restart(struct work_struct *work)
switch (ar->state) {
case ATH10K_STATE_ON:
ar->state = ATH10K_STATE_RESTARTING;
ath10k_hif_stop(ar);
ath10k_halt(ar);
ath10k_scan_finish(ar);
ieee80211_restart_hw(ar->hw);
break;
@ -1857,7 +1858,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_wmi_detach;
}
status = ath10k_htt_tx_alloc(&ar->htt);
status = ath10k_htt_tx_start(&ar->htt);
if (status) {
ath10k_err(ar, "failed to alloc htt tx: %d\n", status);
goto err_wmi_detach;
@ -2052,7 +2053,7 @@ void ath10k_core_stop(struct ath10k *ar)
ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
ath10k_hif_stop(ar);
ath10k_htt_tx_free(&ar->htt);
ath10k_htt_tx_stop(&ar->htt);
ath10k_htt_rx_free(&ar->htt);
ath10k_wmi_detach(ar);
}
@ -2385,6 +2386,7 @@ void ath10k_core_destroy(struct ath10k *ar)
destroy_workqueue(ar->workqueue_aux);
ath10k_debug_destroy(ar);
ath10k_htt_tx_destroy(&ar->htt);
ath10k_wmi_free_host_mem(ar);
ath10k_mac_destroy(ar);
}

View File

@ -337,6 +337,7 @@ struct ath10k_sta {
u32 nss;
u32 smps;
u16 peer_id;
struct rate_info txrate;
struct work_struct update_wk;
@ -562,6 +563,13 @@ enum ath10k_fw_features {
*/
ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR = 15,
/* Firmware allow other BSS mesh broadcast/multicast frames without
* creating monitor interface. Appropriate rxfilters are programmed for
* mesh vdev by firmware itself. This feature flags will be used for
* not creating monitor vdev while configuring mesh node.
*/
ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST = 16,
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@ -693,6 +701,21 @@ struct ath10k_fw_components {
struct ath10k_fw_file fw_file;
};
struct ath10k_per_peer_tx_stats {
u32 succ_bytes;
u32 retry_bytes;
u32 failed_bytes;
u8 ratecode;
u8 flags;
u16 peer_id;
u16 succ_pkts;
u16 retry_pkts;
u16 failed_pkts;
u16 duration;
u32 reserved1;
u32 reserved2;
};
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
@ -906,6 +929,7 @@ struct ath10k {
struct ath10k_thermal thermal;
struct ath10k_wow wow;
struct ath10k_per_peer_tx_stats peer_tx_stats;
/* NAPI */
struct net_device napi_dev;

View File

@ -77,6 +77,19 @@ void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
sinfo->rx_duration = arsta->rx_duration;
sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION;
if (!arsta->txrate.legacy && !arsta->txrate.nss)
return;
if (arsta->txrate.legacy) {
sinfo->txrate.legacy = arsta->txrate.legacy;
} else {
sinfo->txrate.mcs = arsta->txrate.mcs;
sinfo->txrate.nss = arsta->txrate.nss;
sinfo->txrate.bw = arsta->txrate.bw;
}
sinfo->txrate.flags = arsta->txrate.flags;
sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE;
}
static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,

View File

@ -137,6 +137,8 @@ static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = {
HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
[HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND] =
HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
[HTT_10_4_T2H_MSG_TYPE_PEER_STATS] =
HTT_T2H_MSG_TYPE_PEER_STATS,
};
int ath10k_htt_connect(struct ath10k_htt *htt)

View File

@ -419,6 +419,7 @@ enum htt_10_4_t2h_msg_type {
HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18,
/* 0x19 to 0x2f are reserved */
HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND = 0x30,
HTT_10_4_T2H_MSG_TYPE_PEER_STATS = 0x31,
/* keep this last */
HTT_10_4_T2H_NUM_MSGS
};
@ -453,6 +454,7 @@ enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_TX_FETCH_IND,
HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM,
HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
HTT_T2H_MSG_TYPE_PEER_STATS,
/* keep this last */
HTT_T2H_NUM_MSGS
};
@ -1470,6 +1472,28 @@ struct htt_channel_change {
__le32 phymode;
} __packed;
struct htt_per_peer_tx_stats_ind {
__le32 succ_bytes;
__le32 retry_bytes;
__le32 failed_bytes;
u8 ratecode;
u8 flags;
__le16 peer_id;
__le16 succ_pkts;
__le16 retry_pkts;
__le16 failed_pkts;
__le16 tx_duration;
__le32 reserved1;
__le32 reserved2;
} __packed;
struct htt_peer_tx_stats {
u8 num_ppdu;
u8 ppdu_len;
u8 version;
u8 payload[0];
} __packed;
union htt_rx_pn_t {
/* WEP: 24-bit PN */
u32 pn24;
@ -1521,6 +1545,7 @@ struct htt_resp {
struct htt_tx_fetch_confirm tx_fetch_confirm;
struct htt_tx_mode_switch_ind tx_mode_switch_ind;
struct htt_channel_change chan_change;
struct htt_peer_tx_stats peer_tx_stats;
};
} __packed;
@ -1692,6 +1717,8 @@ struct ath10k_htt {
enum htt_tx_mode_switch_mode mode;
enum htt_q_depth_type type;
} tx_q_state;
bool tx_mem_allocated;
};
#define RX_HTT_HDR_STATUS_LEN 64
@ -1754,7 +1781,9 @@ int ath10k_htt_connect(struct ath10k_htt *htt);
int ath10k_htt_init(struct ath10k *ar);
int ath10k_htt_setup(struct ath10k_htt *htt);
int ath10k_htt_tx_alloc(struct ath10k_htt *htt);
int ath10k_htt_tx_start(struct ath10k_htt *htt);
void ath10k_htt_tx_stop(struct ath10k_htt *htt);
void ath10k_htt_tx_destroy(struct ath10k_htt *htt);
void ath10k_htt_tx_free(struct ath10k_htt *htt);
int ath10k_htt_rx_alloc(struct ath10k_htt *htt);

View File

@ -2194,6 +2194,128 @@ void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
dev_kfree_skb_any(skb);
}
static inline bool is_valid_legacy_rate(u8 rate)
{
static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
18, 24, 36, 48, 54};
int i;
for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
if (rate == legacy_rates[i])
return true;
}
return false;
}
static void
ath10k_update_per_peer_tx_stats(struct ath10k *ar,
struct ieee80211_sta *sta,
struct ath10k_per_peer_tx_stats *peer_stats)
{
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
u8 rate = 0, sgi;
struct rate_info txrate;
lockdep_assert_held(&ar->data_lock);
txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
txrate.bw = ATH10K_HW_BW(peer_stats->flags);
txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
sgi = ATH10K_HW_GI(peer_stats->flags);
if (((txrate.flags == WMI_RATE_PREAMBLE_HT) ||
(txrate.flags == WMI_RATE_PREAMBLE_VHT)) && txrate.mcs > 9) {
ath10k_warn(ar, "Invalid mcs %hhd peer stats", txrate.mcs);
return;
}
if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
if (!is_valid_legacy_rate(rate)) {
ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
rate);
return;
}
/* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
rate *= 10;
if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
rate = rate - 5;
arsta->txrate.legacy = rate * 10;
} else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
arsta->txrate.mcs = txrate.mcs;
} else {
arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
arsta->txrate.mcs = txrate.mcs;
}
if (sgi)
arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
arsta->txrate.nss = txrate.nss;
arsta->txrate.bw = txrate.bw + RATE_INFO_BW_20;
}
static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
struct sk_buff *skb)
{
struct htt_resp *resp = (struct htt_resp *)skb->data;
struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
struct htt_per_peer_tx_stats_ind *tx_stats;
struct ieee80211_sta *sta;
struct ath10k_peer *peer;
int peer_id, i;
u8 ppdu_len, num_ppdu;
num_ppdu = resp->peer_tx_stats.num_ppdu;
ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
return;
}
tx_stats = (struct htt_per_peer_tx_stats_ind *)
(resp->peer_tx_stats.payload);
peer_id = __le16_to_cpu(tx_stats->peer_id);
rcu_read_lock();
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer) {
ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
peer_id);
goto out;
}
sta = peer->sta;
for (i = 0; i < num_ppdu; i++) {
tx_stats = (struct htt_per_peer_tx_stats_ind *)
(resp->peer_tx_stats.payload + i * ppdu_len);
p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
p_tx_stats->failed_bytes =
__le32_to_cpu(tx_stats->failed_bytes);
p_tx_stats->ratecode = tx_stats->ratecode;
p_tx_stats->flags = tx_stats->flags;
p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
}
out:
spin_unlock_bh(&ar->data_lock);
rcu_read_unlock();
}
bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
@ -2354,6 +2476,9 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
break;
case HTT_T2H_MSG_TYPE_PEER_STATS:
ath10k_htt_fetch_peer_stats(ar, skb);
break;
case HTT_T2H_MSG_TYPE_EN_STATS:
default:
ath10k_warn(ar, "htt event (%d) not handled\n",

View File

@ -350,21 +350,15 @@ static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt)
return ret;
}
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
int ret;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
spin_lock_init(&htt->tx_lock);
idr_init(&htt->pending_tx);
ret = ath10k_htt_tx_alloc_cont_txbuf(htt);
if (ret) {
ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
goto free_idr_pending_tx;
return ret;
}
ret = ath10k_htt_tx_alloc_cont_frag_desc(htt);
@ -396,6 +390,31 @@ free_frag_desc:
free_txbuf:
ath10k_htt_tx_free_cont_txbuf(htt);
return ret;
}
int ath10k_htt_tx_start(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
int ret;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
spin_lock_init(&htt->tx_lock);
idr_init(&htt->pending_tx);
if (htt->tx_mem_allocated)
return 0;
ret = ath10k_htt_tx_alloc_buf(htt);
if (ret)
goto free_idr_pending_tx;
htt->tx_mem_allocated = true;
return 0;
free_idr_pending_tx:
idr_destroy(&htt->pending_tx);
@ -418,15 +437,28 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
return 0;
}
void ath10k_htt_tx_free(struct ath10k_htt *htt)
void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
{
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
idr_destroy(&htt->pending_tx);
if (!htt->tx_mem_allocated)
return;
ath10k_htt_tx_free_cont_txbuf(htt);
ath10k_htt_tx_free_txq(htt);
ath10k_htt_tx_free_cont_frag_desc(htt);
ath10k_htt_tx_free_txdone_fifo(htt);
htt->tx_mem_allocated = false;
}
void ath10k_htt_tx_stop(struct ath10k_htt *htt)
{
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
idr_destroy(&htt->pending_tx);
}
void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
ath10k_htt_tx_stop(htt);
ath10k_htt_tx_destroy(htt);
}
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)

View File

@ -1167,7 +1167,9 @@ static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
return false;
return ar->monitor ||
ar->filter_flags & FIF_OTHER_BSS ||
(!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST,
ar->running_fw->fw_file.fw_features) &&
(ar->filter_flags & FIF_OTHER_BSS)) ||
test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
}
@ -4449,7 +4451,6 @@ static int ath10k_start(struct ieee80211_hw *hw)
ar->state = ATH10K_STATE_ON;
break;
case ATH10K_STATE_RESTARTING:
ath10k_halt(ar);
ar->state = ATH10K_STATE_RESTARTED;
break;
case ATH10K_STATE_ON:
@ -6976,40 +6977,28 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
ieee80211_queue_work(hw, &arsta->update_wk);
}
static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
/*
* FIXME: Return 0 for time being. Need to figure out whether FW
* has the API to fetch 64-bit local TSF
*/
return 0;
}
static void ath10k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u64 tsf)
static void ath10k_offset_tsf(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, s64 tsf_offset)
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
u32 tsf_offset, vdev_param = ar->wmi.vdev_param->set_tsf;
u32 offset, vdev_param;
int ret;
/* Workaround:
*
* Given tsf argument is entire TSF value, but firmware accepts
* only TSF offset to current TSF.
*
* get_tsf function is used to get offset value, however since
* ath10k_get_tsf is not implemented properly, it will return 0 always.
* Luckily all the caller functions to set_tsf, as of now, also rely on
* get_tsf function to get entire tsf value such get_tsf() + tsf_delta,
* final tsf offset value to firmware will be arithmetically correct.
*/
tsf_offset = tsf - ath10k_get_tsf(hw, vif);
if (tsf_offset < 0) {
vdev_param = ar->wmi.vdev_param->dec_tsf;
offset = -tsf_offset;
} else {
vdev_param = ar->wmi.vdev_param->inc_tsf;
offset = tsf_offset;
}
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
vdev_param, tsf_offset);
vdev_param, offset);
if (ret && ret != -EOPNOTSUPP)
ath10k_warn(ar, "failed to set tsf offset: %d\n", ret);
ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n",
offset, vdev_param, ret);
}
static int ath10k_ampdu_action(struct ieee80211_hw *hw,
@ -7474,8 +7463,7 @@ static const struct ieee80211_ops ath10k_ops = {
.get_survey = ath10k_get_survey,
.set_bitrate_mask = ath10k_mac_op_set_bitrate_mask,
.sta_rc_update = ath10k_sta_rc_update,
.get_tsf = ath10k_get_tsf,
.set_tsf = ath10k_set_tsf,
.offset_tsf = ath10k_offset_tsf,
.ampdu_action = ath10k_ampdu_action,
.get_et_sset_count = ath10k_debug_get_et_sset_count,
.get_et_stats = ath10k_debug_get_et_stats,
@ -8006,6 +7994,7 @@ int ath10k_mac_register(struct ath10k *ar)
ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);

View File

@ -660,6 +660,9 @@ ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_vdev_spectral_conf)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
@ -675,6 +678,9 @@ ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_vdev_spectral_enable)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
enable);
if (IS_ERR(skb))

View File

@ -1313,8 +1313,8 @@ ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
cmd->regd = __cpu_to_le32(rd);
cmd->regd_2ghz = __cpu_to_le32(rd2g);
cmd->regd_5ghz = __cpu_to_le32(rd5g);
cmd->conform_limit_2ghz = __cpu_to_le32(rd2g);
cmd->conform_limit_5ghz = __cpu_to_le32(rd5g);
cmd->conform_limit_2ghz = __cpu_to_le32(ctl2g);
cmd->conform_limit_5ghz = __cpu_to_le32(ctl5g);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
return skb;
@ -3136,6 +3136,76 @@ ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value)
return skb;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_vdev_spectral_conf(struct ath10k *ar,
const struct wmi_vdev_spectral_conf_arg *arg)
{
struct wmi_vdev_spectral_conf_cmd *cmd;
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
size_t len;
len = sizeof(*tlv) + sizeof(*cmd);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
cmd->scan_count = __cpu_to_le32(arg->scan_count);
cmd->scan_period = __cpu_to_le32(arg->scan_period);
cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
return skb;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
u32 trigger, u32 enable)
{
struct wmi_vdev_spectral_enable_cmd *cmd;
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
size_t len;
len = sizeof(*tlv) + sizeof(*cmd);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->trigger_cmd = __cpu_to_le32(trigger);
cmd->enable_cmd = __cpu_to_le32(enable);
return skb;
}
/****************/
/* TLV mappings */
/****************/
@ -3464,7 +3534,6 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
.set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
};
static const struct wmi_ops wmi_tlv_ops = {
@ -3542,6 +3611,8 @@ static const struct wmi_ops wmi_tlv_ops = {
.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
.gen_echo = ath10k_wmi_tlv_op_gen_echo,
.gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf,
.gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable,
};
static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {

View File

@ -785,7 +785,6 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
.set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
};
/* 10.X WMI VDEV param map */
@ -861,7 +860,6 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
.set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
};
static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
@ -936,7 +934,6 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
.set_tsf = WMI_10X_VDEV_PARAM_TSF_INCREMENT,
};
static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
@ -1012,7 +1009,8 @@ static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
.meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
.rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
.bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
.set_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
.inc_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
.dec_tsf = WMI_10_4_VDEV_PARAM_TSF_DECREMENT,
};
static struct wmi_pdev_param_map wmi_pdev_param_map = {
@ -4489,7 +4487,7 @@ static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
if (!num_units)
return -ENOMEM;
paddr = dma_map_single(ar->dev, vaddr, pool_size, DMA_TO_DEVICE);
paddr = dma_map_single(ar->dev, vaddr, pool_size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(ar->dev, paddr)) {
kfree(vaddr);
return -ENOMEM;

View File

@ -4603,9 +4603,17 @@ enum wmi_rate_preamble {
#define ATH10K_HW_NSS(rate) (1 + (((rate) >> 4) & 0x3))
#define ATH10K_HW_PREAMBLE(rate) (((rate) >> 6) & 0x3)
#define ATH10K_HW_RATECODE(rate, nss, preamble) \
#define ATH10K_HW_MCS_RATE(rate) ((rate) & 0xf)
#define ATH10K_HW_LEGACY_RATE(rate) ((rate) & 0x3f)
#define ATH10K_HW_BW(flags) (((flags) >> 3) & 0x3)
#define ATH10K_HW_GI(flags) (((flags) >> 5) & 0x1)
#define ATH10K_HW_RATECODE(rate, nss, preamble) \
(((preamble) << 6) | ((nss) << 4) | (rate))
#define VHT_MCS_NUM 10
#define VHT_BW_NUM 4
#define VHT_NSS_NUM 4
/* Value to disable fixed rate setting */
#define WMI_FIXED_RATE_NONE (0xff)
@ -4676,7 +4684,8 @@ struct wmi_vdev_param_map {
u32 meru_vc;
u32 rx_decap_type;
u32 bw_nss_ratemask;
u32 set_tsf;
u32 inc_tsf;
u32 dec_tsf;
};
#define WMI_VDEV_PARAM_UNSUPPORTED 0
@ -5009,6 +5018,11 @@ enum wmi_10_4_vdev_param {
WMI_10_4_VDEV_PARAM_STA_KICKOUT,
WMI_10_4_VDEV_PARAM_CAPABILITIES,
WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
WMI_10_4_VDEV_PARAM_RX_FILTER,
WMI_10_4_VDEV_PARAM_MGMT_TX_POWER,
WMI_10_4_VDEV_PARAM_ATF_SSID_SCHED_POLICY,
WMI_10_4_VDEV_PARAM_DISABLE_DYN_BW_RTS,
WMI_10_4_VDEV_PARAM_TSF_DECREMENT,
};
#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)

View File

@ -66,7 +66,6 @@
#include <linux/seq_file.h>
#include <linux/list.h>
#include <linux/vmalloc.h>
#include "debug.h"
#include "ath5k.h"
#include "reg.h"

View File

@ -62,7 +62,7 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
return false;
}
static struct ath_bus_ops ath_ahb_bus_ops = {
static const struct ath_bus_ops ath_ahb_bus_ops = {
.ath_bus_type = ATH_AHB,
.read_cachesize = ath_ahb_read_cachesize,
.eeprom_read = ath_ahb_eeprom_read,

View File

@ -528,6 +528,9 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
return 0;
if (!spec_priv->rfs_chan_spec_scan)
return 1;
/* Output buffers are full, no need to process anything
* since there is no space to put the result anyway
*/
@ -1072,7 +1075,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = {
void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
{
if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS)) {
if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) {
relay_close(spec_priv->rfs_chan_spec_scan);
spec_priv->rfs_chan_spec_scan = NULL;
}
@ -1086,6 +1089,9 @@ void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv,
debugfs_phy,
1024, 256, &rfs_spec_scan_cb,
NULL);
if (!spec_priv->rfs_chan_spec_scan)
return;
debugfs_create_file("spectral_scan_ctl",
S_IRUSR | S_IWUSR,
debugfs_phy, spec_priv,

View File

@ -997,7 +997,8 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
err = usb_control_msg(hif_dev->udev,
usb_sndctrlpipe(hif_dev->udev, 0),
FIRMWARE_DOWNLOAD, 0x40 | USB_DIR_OUT,
addr >> 8, 0, buf, transfer, HZ);
addr >> 8, 0, buf, transfer,
USB_MSG_TIMEOUT);
if (err < 0) {
kfree(buf);
return err;
@ -1020,7 +1021,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
err = usb_control_msg(hif_dev->udev, usb_sndctrlpipe(hif_dev->udev, 0),
FIRMWARE_DOWNLOAD_COMP,
0x40 | USB_DIR_OUT,
firm_offset >> 8, 0, NULL, 0, HZ);
firm_offset >> 8, 0, NULL, 0, USB_MSG_TIMEOUT);
if (err)
return -EIO;
@ -1249,7 +1250,7 @@ static int send_eject_command(struct usb_interface *interface)
dev_info(&udev->dev, "Ejecting storage device...\n");
r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, bulk_out_ep),
cmd, 31, NULL, 2000);
cmd, 31, NULL, 2 * USB_MSG_TIMEOUT);
kfree(cmd);
if (r)
return r;
@ -1314,7 +1315,7 @@ static void ath9k_hif_usb_reboot(struct usb_device *udev)
return;
ret = usb_interrupt_msg(udev, usb_sndintpipe(udev, USB_REG_OUT_PIPE),
buf, 4, NULL, HZ);
buf, 4, NULL, USB_MSG_TIMEOUT);
if (ret)
dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n");

View File

@ -71,6 +71,8 @@ extern int htc_use_dev_fw;
#define USB_REG_IN_PIPE 3
#define USB_REG_OUT_PIPE 4
#define USB_MSG_TIMEOUT 1000 /* (ms) */
#define HIF_USB_MAX_RXPIPES 2
#define HIF_USB_MAX_TXPIPES 4

View File

@ -2792,7 +2792,7 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
WARN_ON(1);
}
return val;
return !!val;
}
EXPORT_SYMBOL(ath9k_hw_gpio_get);

View File

@ -867,10 +867,21 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
* can be dropped.
*/
if (rx_stats->rs_status & ATH9K_RXERR_PHY) {
ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime);
if (ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats, rx_status->mactime))
/*
* DFS and spectral are mutually exclusive
*
* Since some chips use PHYERR_RADAR as indication for both, we
* need to double check which feature is enabled to prevent
* feeding spectral or dfs-detector with wrong frames.
*/
if (hw->conf.radar_enabled) {
ath9k_dfs_process_phyerr(sc, hdr, rx_stats,
rx_status->mactime);
} else if (sc->spec_priv.spectral_mode != SPECTRAL_DISABLED &&
ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats,
rx_status->mactime)) {
RX_STAT_INC(rx_spectral);
}
return -EINVAL;
}

View File

@ -354,14 +354,6 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n",
__func__, wdev, wdev->iftype);
mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
wil_err(wil, "Already scanning\n");
mutex_unlock(&wil->p2p_wdev_mutex);
return -EAGAIN;
}
mutex_unlock(&wil->p2p_wdev_mutex);
/* check we are client side */
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
@ -378,12 +370,24 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
return -EBUSY;
}
mutex_lock(&wil->mutex);
mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request || wil->p2p.discovery_started) {
wil_err(wil, "Already scanning\n");
mutex_unlock(&wil->p2p_wdev_mutex);
rc = -EAGAIN;
goto out;
}
mutex_unlock(&wil->p2p_wdev_mutex);
/* social scan on P2P_DEVICE is handled as p2p search */
if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE &&
wil_p2p_is_social_scan(request)) {
if (!wil->p2p.p2p_dev_started) {
wil_err(wil, "P2P search requested on stopped P2P device\n");
return -EIO;
rc = -EIO;
goto out;
}
wil->scan_request = request;
wil->radio_wdev = wdev;
@ -392,7 +396,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
wil->radio_wdev = wil_to_wdev(wil);
wil->scan_request = NULL;
}
return rc;
goto out;
}
(void)wil_p2p_stop_discovery(wil);
@ -415,7 +419,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
if (rc) {
wil_err(wil, "set SSID for scan request failed: %d\n", rc);
return rc;
goto out;
}
wil->scan_request = request;
@ -448,7 +452,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
if (rc)
goto out;
goto out_restore;
if (wil->discovery_mode && cmd.cmd.scan_type == WMI_ACTIVE_SCAN) {
cmd.cmd.discovery_mode = 1;
@ -459,16 +463,45 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
out:
out_restore:
if (rc) {
del_timer_sync(&wil->scan_timer);
wil->radio_wdev = wil_to_wdev(wil);
wil->scan_request = NULL;
}
out:
mutex_unlock(&wil->mutex);
return rc;
}
static void wil_cfg80211_abort_scan(struct wiphy *wiphy,
struct wireless_dev *wdev)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "wdev=0x%p iftype=%d\n", wdev, wdev->iftype);
mutex_lock(&wil->mutex);
mutex_lock(&wil->p2p_wdev_mutex);
if (!wil->scan_request)
goto out;
if (wdev != wil->scan_request->wdev) {
wil_dbg_misc(wil, "abort scan was called on the wrong iface\n");
goto out;
}
if (wil->radio_wdev == wil->p2p_wdev)
wil_p2p_stop_radio_operations(wil);
else
wil_abort_scan(wil, true);
out:
mutex_unlock(&wil->p2p_wdev_mutex);
mutex_unlock(&wil->mutex);
}
static void wil_print_crypto(struct wil6210_priv *wil,
struct cfg80211_crypto_settings *c)
{
@ -674,6 +707,26 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
return rc;
}
static int wil_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
/* these parameters are explicitly not supported */
if (changed & (WIPHY_PARAM_RETRY_LONG |
WIPHY_PARAM_FRAG_THRESHOLD |
WIPHY_PARAM_RTS_THRESHOLD))
return -ENOTSUPP;
if (changed & WIPHY_PARAM_RETRY_SHORT) {
rc = wmi_set_mgmt_retry(wil, wiphy->retry_short);
if (rc)
return rc;
}
return 0;
}
int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_mgmt_tx_params *params,
u64 *cookie)
@ -940,16 +993,8 @@ static int wil_remain_on_channel(struct wiphy *wiphy,
wil_dbg_misc(wil, "%s() center_freq=%d, duration=%d iftype=%d\n",
__func__, chan->center_freq, duration, wdev->iftype);
rc = wil_p2p_listen(wil, duration, chan, cookie);
if (rc)
return rc;
wil->radio_wdev = wdev;
cfg80211_ready_on_channel(wdev, *cookie, chan, duration,
GFP_KERNEL);
return 0;
rc = wil_p2p_listen(wil, wdev, duration, chan, cookie);
return rc;
}
static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
@ -1419,17 +1464,49 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
wil_dbg_misc(wil, "%s: entered\n", __func__);
mutex_lock(&wil->mutex);
mutex_lock(&wil->p2p_wdev_mutex);
wil_p2p_stop_radio_operations(wil);
p2p->p2p_dev_started = 0;
mutex_unlock(&wil->p2p_wdev_mutex);
mutex_unlock(&wil->mutex);
}
static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy,
struct net_device *dev,
bool enabled, int timeout)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_ps_profile_type ps_profile;
int rc;
if (!test_bit(WMI_FW_CAPABILITY_PS_CONFIG, wil->fw_capabilities)) {
wil_err(wil, "set_power_mgmt not supported\n");
return -EOPNOTSUPP;
}
wil_dbg_misc(wil, "enabled=%d, timeout=%d\n",
enabled, timeout);
if (enabled)
ps_profile = WMI_PS_PROFILE_TYPE_DEFAULT;
else
ps_profile = WMI_PS_PROFILE_TYPE_PS_DISABLED;
rc = wmi_ps_dev_profile_cfg(wil, ps_profile);
if (rc)
wil_err(wil, "wmi_ps_dev_profile_cfg failed (%d)\n", rc);
return rc;
}
static struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
.scan = wil_cfg80211_scan,
.abort_scan = wil_cfg80211_abort_scan,
.connect = wil_cfg80211_connect,
.disconnect = wil_cfg80211_disconnect,
.set_wiphy_params = wil_cfg80211_set_wiphy_params,
.change_virtual_intf = wil_cfg80211_change_iface,
.get_station = wil_cfg80211_get_station,
.dump_station = wil_cfg80211_dump_station,
@ -1450,6 +1527,7 @@ static struct cfg80211_ops wil_cfg80211_ops = {
/* P2P device */
.start_p2p_device = wil_cfg80211_start_p2p_device,
.stop_p2p_device = wil_cfg80211_stop_p2p_device,
.set_power_mgmt = wil_cfg80211_set_power_mgmt,
};
static void wil_wiphy_init(struct wiphy *wiphy)
@ -1466,7 +1544,8 @@ static void wil_wiphy_init(struct wiphy *wiphy)
BIT(NL80211_IFTYPE_MONITOR);
wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_PS_ON_BY_DEFAULT;
dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
__func__, wiphy->flags);
wiphy->probe_resp_offload =

View File

@ -24,6 +24,7 @@
#include "boot_loader.h"
#define WAIT_FOR_HALP_VOTE_MS 100
#define WAIT_FOR_SCAN_ABORT_MS 1000
bool debug_fw; /* = false; */
module_param(debug_fw, bool, S_IRUGO);
@ -213,7 +214,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
memset(&sta->stats, 0, sizeof(sta->stats));
}
static bool wil_ap_is_connected(struct wil6210_priv *wil)
static bool wil_is_connected(struct wil6210_priv *wil)
{
int i;
@ -267,7 +268,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
wil_bcast_fini(wil);
netif_tx_stop_all_queues(ndev);
wil_update_net_queues_bh(wil, NULL, true);
netif_carrier_off(ndev);
if (test_bit(wil_status_fwconnected, wil->status)) {
@ -283,8 +284,12 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
if (!wil_ap_is_connected(wil))
if (!wil_is_connected(wil)) {
wil_update_net_queues_bh(wil, NULL, true);
clear_bit(wil_status_fwconnected, wil->status);
} else {
wil_update_net_queues_bh(wil, NULL, false);
}
break;
default:
break;
@ -384,18 +389,19 @@ static void wil_fw_error_worker(struct work_struct *work)
wil->last_fw_recovery = jiffies;
wil_info(wil, "fw error recovery requested (try %d)...\n",
wil->recovery_count);
if (!no_fw_recovery)
wil->recovery_state = fw_recovery_running;
if (wil_wait_for_recovery(wil) != 0)
return;
mutex_lock(&wil->mutex);
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_MONITOR:
wil_info(wil, "fw error recovery requested (try %d)...\n",
wil->recovery_count);
if (!no_fw_recovery)
wil->recovery_state = fw_recovery_running;
if (0 != wil_wait_for_recovery(wil))
break;
/* silent recovery, upper layers will see disconnect */
__wil_down(wil);
__wil_up(wil);
break;
@ -512,10 +518,13 @@ int wil_priv_init(struct wil6210_priv *wil)
INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
INIT_WORK(&wil->probe_client_worker, wil_probe_client_worker);
INIT_WORK(&wil->p2p.delayed_listen_work, wil_p2p_delayed_listen_work);
INIT_LIST_HEAD(&wil->pending_wmi_ev);
INIT_LIST_HEAD(&wil->probe_client_pending);
spin_lock_init(&wil->wmi_ev_lock);
spin_lock_init(&wil->net_queue_lock);
wil->net_queue_stopped = 1;
init_waitqueue_head(&wil->wq);
wil->wmi_wq = create_singlethread_workqueue(WIL_NAME "_wmi");
@ -571,6 +580,7 @@ void wil_priv_deinit(struct wil6210_priv *wil)
cancel_work_sync(&wil->disconnect_worker);
cancel_work_sync(&wil->fw_error_worker);
cancel_work_sync(&wil->p2p.discovery_expired_work);
cancel_work_sync(&wil->p2p.delayed_listen_work);
mutex_lock(&wil->mutex);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
mutex_unlock(&wil->mutex);
@ -685,6 +695,19 @@ static int wil_target_reset(struct wil6210_priv *wil)
return 0;
}
static void wil_collect_fw_info(struct wil6210_priv *wil)
{
struct wiphy *wiphy = wil_to_wiphy(wil);
u8 retry_short;
int rc;
rc = wmi_get_mgmt_retry(wil, &retry_short);
if (!rc) {
wiphy->retry_short = retry_short;
wil_dbg_misc(wil, "FW retry_short: %d\n", retry_short);
}
}
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
{
le32_to_cpus(&r->base);
@ -801,6 +824,34 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
return 0;
}
void wil_abort_scan(struct wil6210_priv *wil, bool sync)
{
int rc;
struct cfg80211_scan_info info = {
.aborted = true,
};
lockdep_assert_held(&wil->p2p_wdev_mutex);
if (!wil->scan_request)
return;
wil_dbg_misc(wil, "Abort scan_request 0x%p\n", wil->scan_request);
del_timer_sync(&wil->scan_timer);
mutex_unlock(&wil->p2p_wdev_mutex);
rc = wmi_abort_scan(wil);
if (!rc && sync)
wait_event_interruptible_timeout(wil->wq, !wil->scan_request,
msecs_to_jiffies(
WAIT_FOR_SCAN_ABORT_MS));
mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
}
}
/*
* We reset all the structures, and we reset the UMAC.
* After calling this routine, you're expected to reload
@ -853,17 +904,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
mutex_unlock(&wil->wmi_mutex);
mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
struct cfg80211_scan_info info = {
.aborted = true,
};
wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
wil->scan_request);
del_timer_sync(&wil->scan_timer);
cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
}
wil_abort_scan(wil, false);
mutex_unlock(&wil->p2p_wdev_mutex);
wil_mask_irq(wil);
@ -940,6 +981,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
}
wil_collect_fw_info(wil);
if (wil->platform_ops.notify) {
rc = wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_FW_RDY);
@ -1056,20 +1099,9 @@ int __wil_down(struct wil6210_priv *wil)
}
wil_enable_irq(wil);
wil_p2p_stop_radio_operations(wil);
mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
struct cfg80211_scan_info info = {
.aborted = true,
};
wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
wil->scan_request);
del_timer_sync(&wil->scan_timer);
cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
}
wil_p2p_stop_radio_operations(wil);
wil_abort_scan(wil, false);
mutex_unlock(&wil->p2p_wdev_mutex);
wil_reset(wil, false);

View File

@ -214,7 +214,7 @@ int wil_if_add(struct wil6210_priv *wil)
netif_tx_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
WIL6210_NAPI_BUDGET);
netif_tx_stop_all_queues(ndev);
wil_update_net_queues_bh(wil, NULL, true);
rc = register_netdev(ndev);
if (rc < 0) {

View File

@ -22,6 +22,43 @@
#define P2P_SEARCH_DURATION_MS 500
#define P2P_DEFAULT_BI 100
static int wil_p2p_start_listen(struct wil6210_priv *wil)
{
struct wil_p2p_info *p2p = &wil->p2p;
u8 channel = p2p->listen_chan.hw_value;
int rc;
lockdep_assert_held(&wil->mutex);
rc = wmi_p2p_cfg(wil, channel, P2P_DEFAULT_BI);
if (rc) {
wil_err(wil, "wmi_p2p_cfg failed\n");
goto out;
}
rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
if (rc) {
wil_err(wil, "wmi_set_ssid failed\n");
goto out_stop;
}
rc = wmi_start_listen(wil);
if (rc) {
wil_err(wil, "wmi_start_listen failed\n");
goto out_stop;
}
INIT_WORK(&p2p->discovery_expired_work, wil_p2p_listen_expired);
mod_timer(&p2p->discovery_timer,
jiffies + msecs_to_jiffies(p2p->listen_duration));
out_stop:
if (rc)
wmi_stop_discovery(wil);
out:
return rc;
}
bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request)
{
return (request->n_channels == 1) &&
@ -46,7 +83,7 @@ int wil_p2p_search(struct wil6210_priv *wil,
wil_dbg_misc(wil, "%s: channel %d\n",
__func__, P2P_DMG_SOCIAL_CHANNEL);
mutex_lock(&wil->mutex);
lockdep_assert_held(&wil->mutex);
if (p2p->discovery_started) {
wil_err(wil, "%s: search failed. discovery already ongoing\n",
@ -103,22 +140,19 @@ out_stop:
wmi_stop_discovery(wil);
out:
mutex_unlock(&wil->mutex);
return rc;
}
int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
struct ieee80211_channel *chan, u64 *cookie)
int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev,
unsigned int duration, struct ieee80211_channel *chan,
u64 *cookie)
{
struct wil_p2p_info *p2p = &wil->p2p;
u8 channel = P2P_DMG_SOCIAL_CHANNEL;
int rc;
if (!chan)
return -EINVAL;
channel = chan->hw_value;
wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration);
mutex_lock(&wil->mutex);
@ -129,35 +163,30 @@ int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
goto out;
}
rc = wmi_p2p_cfg(wil, channel, P2P_DEFAULT_BI);
if (rc) {
wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__);
goto out;
}
rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
if (rc) {
wil_err(wil, "%s: wmi_set_ssid failed\n", __func__);
goto out_stop;
}
rc = wmi_start_listen(wil);
if (rc) {
wil_err(wil, "%s: wmi_start_listen failed\n", __func__);
goto out_stop;
}
memcpy(&p2p->listen_chan, chan, sizeof(*chan));
*cookie = ++p2p->cookie;
p2p->listen_duration = duration;
mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
wil_dbg_misc(wil, "Delaying p2p listen until scan done\n");
p2p->pending_listen_wdev = wdev;
p2p->discovery_started = 1;
rc = 0;
mutex_unlock(&wil->p2p_wdev_mutex);
goto out;
}
mutex_unlock(&wil->p2p_wdev_mutex);
rc = wil_p2p_start_listen(wil);
if (rc)
goto out;
p2p->discovery_started = 1;
INIT_WORK(&p2p->discovery_expired_work, wil_p2p_listen_expired);
mod_timer(&p2p->discovery_timer,
jiffies + msecs_to_jiffies(duration));
wil->radio_wdev = wdev;
out_stop:
if (rc)
wmi_stop_discovery(wil);
cfg80211_ready_on_channel(wdev, *cookie, chan, duration,
GFP_KERNEL);
out:
mutex_unlock(&wil->mutex);
@ -170,9 +199,14 @@ u8 wil_p2p_stop_discovery(struct wil6210_priv *wil)
u8 started = p2p->discovery_started;
if (p2p->discovery_started) {
del_timer_sync(&p2p->discovery_timer);
if (p2p->pending_listen_wdev) {
/* discovery not really started, only pending */
p2p->pending_listen_wdev = NULL;
} else {
del_timer_sync(&p2p->discovery_timer);
wmi_stop_discovery(wil);
}
p2p->discovery_started = 0;
wmi_stop_discovery(wil);
}
return started;
@ -257,13 +291,59 @@ void wil_p2p_search_expired(struct work_struct *work)
};
mutex_lock(&wil->p2p_wdev_mutex);
cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
wil->radio_wdev = wil->wdev;
if (wil->scan_request) {
cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
wil->radio_wdev = wil->wdev;
}
mutex_unlock(&wil->p2p_wdev_mutex);
}
}
void wil_p2p_delayed_listen_work(struct work_struct *work)
{
struct wil_p2p_info *p2p = container_of(work,
struct wil_p2p_info, delayed_listen_work);
struct wil6210_priv *wil = container_of(p2p,
struct wil6210_priv, p2p);
int rc;
mutex_lock(&wil->mutex);
wil_dbg_misc(wil, "Checking delayed p2p listen\n");
if (!p2p->discovery_started || !p2p->pending_listen_wdev)
goto out;
mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
/* another scan started, wait again... */
mutex_unlock(&wil->p2p_wdev_mutex);
goto out;
}
mutex_unlock(&wil->p2p_wdev_mutex);
rc = wil_p2p_start_listen(wil);
mutex_lock(&wil->p2p_wdev_mutex);
if (rc) {
cfg80211_remain_on_channel_expired(p2p->pending_listen_wdev,
p2p->cookie,
&p2p->listen_chan,
GFP_KERNEL);
wil->radio_wdev = wil->wdev;
} else {
cfg80211_ready_on_channel(p2p->pending_listen_wdev, p2p->cookie,
&p2p->listen_chan,
p2p->listen_duration, GFP_KERNEL);
wil->radio_wdev = p2p->pending_listen_wdev;
}
p2p->pending_listen_wdev = NULL;
mutex_unlock(&wil->p2p_wdev_mutex);
out:
mutex_unlock(&wil->mutex);
}
void wil_p2p_stop_radio_operations(struct wil6210_priv *wil)
{
struct wil_p2p_info *p2p = &wil->p2p;
@ -272,8 +352,7 @@ void wil_p2p_stop_radio_operations(struct wil6210_priv *wil)
};
lockdep_assert_held(&wil->mutex);
mutex_lock(&wil->p2p_wdev_mutex);
lockdep_assert_held(&wil->p2p_wdev_mutex);
if (wil->radio_wdev != wil->p2p_wdev)
goto out;
@ -281,10 +360,8 @@ void wil_p2p_stop_radio_operations(struct wil6210_priv *wil)
if (!p2p->discovery_started) {
/* Regular scan on the p2p device */
if (wil->scan_request &&
wil->scan_request->wdev == wil->p2p_wdev) {
cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
}
wil->scan_request->wdev == wil->p2p_wdev)
wil_abort_scan(wil, true);
goto out;
}
@ -307,5 +384,4 @@ void wil_p2p_stop_radio_operations(struct wil6210_priv *wil)
out:
wil->radio_wdev = wil->wdev;
mutex_unlock(&wil->p2p_wdev_mutex);
}

View File

@ -54,6 +54,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
struct pmc_ctx *pmc = &wil->pmc;
struct device *dev = wil_to_dev(wil);
struct wmi_pmc_cmd pmc_cmd = {0};
int last_cmd_err = -ENOMEM;
mutex_lock(&pmc->lock);
@ -62,6 +63,29 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__);
goto no_release_err;
}
if ((num_descriptors <= 0) || (descriptor_size <= 0)) {
wil_err(wil,
"Invalid params num_descriptors(%d), descriptor_size(%d)\n",
num_descriptors, descriptor_size);
last_cmd_err = -EINVAL;
goto no_release_err;
}
if (num_descriptors > (1 << WIL_RING_SIZE_ORDER_MAX)) {
wil_err(wil,
"num_descriptors(%d) exceeds max ring size %d\n",
num_descriptors, 1 << WIL_RING_SIZE_ORDER_MAX);
last_cmd_err = -EINVAL;
goto no_release_err;
}
if (num_descriptors > INT_MAX / descriptor_size) {
wil_err(wil,
"Overflow in num_descriptors(%d)*descriptor_size(%d)\n",
num_descriptors, descriptor_size);
last_cmd_err = -EINVAL;
goto no_release_err;
}
pmc->num_descriptors = num_descriptors;
pmc->descriptor_size = descriptor_size;
@ -189,7 +213,7 @@ release_pmc_skb_list:
pmc->descriptors = NULL;
no_release_err:
pmc->last_cmd_status = -ENOMEM;
pmc->last_cmd_status = last_cmd_err;
mutex_unlock(&pmc->lock);
}
@ -295,7 +319,7 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
size_t retval = 0;
unsigned long long idx;
loff_t offset;
size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
size_t pmc_size;
mutex_lock(&pmc->lock);
@ -306,6 +330,8 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
return -EPERM;
}
pmc_size = pmc->descriptor_size * pmc->num_descriptors;
wil_dbg_misc(wil,
"%s: size %u, pos %lld\n",
__func__, (unsigned)count, *f_pos);
@ -345,7 +371,18 @@ loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
loff_t newpos;
struct wil6210_priv *wil = filp->private_data;
struct pmc_ctx *pmc = &wil->pmc;
size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
size_t pmc_size;
mutex_lock(&pmc->lock);
if (!wil_is_pmc_allocated(pmc)) {
wil_err(wil, "error, pmc is not allocated!\n");
pmc->last_cmd_status = -EPERM;
mutex_unlock(&pmc->lock);
return -EPERM;
}
pmc_size = pmc->descriptor_size * pmc->num_descriptors;
switch (whence) {
case 0: /* SEEK_SET */
@ -361,15 +398,21 @@ loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
break;
default: /* can't happen */
return -EINVAL;
newpos = -EINVAL;
goto out;
}
if (newpos < 0)
return -EINVAL;
if (newpos < 0) {
newpos = -EINVAL;
goto out;
}
if (newpos > pmc_size)
newpos = pmc_size;
filp->f_pos = newpos;
out:
mutex_unlock(&pmc->lock);
return newpos;
}

View File

@ -88,6 +88,18 @@ static inline int wil_vring_wmark_high(struct vring *vring)
return vring->size/4;
}
/* returns true if num avail descriptors is lower than wmark_low */
static inline int wil_vring_avail_low(struct vring *vring)
{
return wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring);
}
/* returns true if num avail descriptors is higher than wmark_high */
static inline int wil_vring_avail_high(struct vring *vring)
{
return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring);
}
/* wil_val_in_range - check if value in [min,max) */
static inline bool wil_val_in_range(int val, int min, int max)
{
@ -1780,6 +1792,89 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
return rc;
}
/**
* Check status of tx vrings and stop/wake net queues if needed
*
* This function does one of two checks:
* In case check_stop is true, will check if net queues need to be stopped. If
* the conditions for stopping are met, netif_tx_stop_all_queues() is called.
* In case check_stop is false, will check if net queues need to be waked. If
* the conditions for waking are met, netif_tx_wake_all_queues() is called.
* vring is the vring which is currently being modified by either adding
* descriptors (tx) into it or removing descriptors (tx complete) from it. Can
* be null when irrelevant (e.g. connect/disconnect events).
*
* The implementation is to stop net queues if modified vring has low
* descriptor availability. Wake if all vrings are not in low descriptor
* availability and modified vring has high descriptor availability.
*/
static inline void __wil_update_net_queues(struct wil6210_priv *wil,
struct vring *vring,
bool check_stop)
{
int i;
if (vring)
wil_dbg_txrx(wil, "vring %d, check_stop=%d, stopped=%d",
(int)(vring - wil->vring_tx), check_stop,
wil->net_queue_stopped);
else
wil_dbg_txrx(wil, "check_stop=%d, stopped=%d",
check_stop, wil->net_queue_stopped);
if (check_stop == wil->net_queue_stopped)
/* net queues already in desired state */
return;
if (check_stop) {
if (!vring || unlikely(wil_vring_avail_low(vring))) {
/* not enough room in the vring */
netif_tx_stop_all_queues(wil_to_ndev(wil));
wil->net_queue_stopped = true;
wil_dbg_txrx(wil, "netif_tx_stop called\n");
}
return;
}
/* check wake */
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
struct vring *cur_vring = &wil->vring_tx[i];
struct vring_tx_data *txdata = &wil->vring_tx_data[i];
if (!cur_vring->va || !txdata->enabled || cur_vring == vring)
continue;
if (wil_vring_avail_low(cur_vring)) {
wil_dbg_txrx(wil, "vring %d full, can't wake\n",
(int)(cur_vring - wil->vring_tx));
return;
}
}
if (!vring || wil_vring_avail_high(vring)) {
/* enough room in the vring */
wil_dbg_txrx(wil, "calling netif_tx_wake\n");
netif_tx_wake_all_queues(wil_to_ndev(wil));
wil->net_queue_stopped = false;
}
}
void wil_update_net_queues(struct wil6210_priv *wil, struct vring *vring,
bool check_stop)
{
spin_lock(&wil->net_queue_lock);
__wil_update_net_queues(wil, vring, check_stop);
spin_unlock(&wil->net_queue_lock);
}
void wil_update_net_queues_bh(struct wil6210_priv *wil, struct vring *vring,
bool check_stop)
{
spin_lock_bh(&wil->net_queue_lock);
__wil_update_net_queues(wil, vring, check_stop);
spin_unlock_bh(&wil->net_queue_lock);
}
netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
@ -1822,14 +1917,10 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* set up vring entry */
rc = wil_tx_vring(wil, vring, skb);
/* do we still have enough room in the vring? */
if (unlikely(wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))) {
netif_tx_stop_all_queues(wil_to_ndev(wil));
wil_dbg_txrx(wil, "netif_tx_stop : ring full\n");
}
switch (rc) {
case 0:
/* shall we stop net queues? */
wil_update_net_queues_bh(wil, vring, true);
/* statistics will be updated on the tx_complete */
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@ -1978,10 +2069,9 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
txdata->last_idle = get_cycles();
}
if (wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring)) {
wil_dbg_txrx(wil, "netif_tx_wake : ring not full\n");
netif_tx_wake_all_queues(wil_to_ndev(wil));
}
/* shall we wake net queues? */
if (done)
wil_update_net_queues(wil, vring, false);
return done;
}

View File

@ -276,10 +276,11 @@ struct fw_map {
u32 to; /* linker address - to, exclusive */
u32 host; /* PCI/Host address - BAR0 + 0x880000 */
const char *name; /* for debugfs */
bool fw; /* true if FW mapping, false if UCODE mapping */
};
/* array size should be in sync with actual definition in the wmi.c */
extern const struct fw_map fw_mapping[8];
extern const struct fw_map fw_mapping[10];
/**
* mk_cidxtid - construct @cidxtid field
@ -461,8 +462,11 @@ struct wil_p2p_info {
u8 discovery_started;
u8 p2p_dev_started;
u64 cookie;
struct wireless_dev *pending_listen_wdev;
unsigned int listen_duration;
struct timer_list discovery_timer; /* listen/search duration */
struct work_struct discovery_expired_work; /* listen/search expire */
struct work_struct delayed_listen_work; /* listen after scan done */
};
enum wil_sta_status {
@ -624,6 +628,8 @@ struct wil6210_priv {
* - consumed in thread by wmi_event_worker
*/
spinlock_t wmi_ev_lock;
spinlock_t net_queue_lock; /* guarding stop/wake netif queue */
int net_queue_stopped; /* netif_tx_stop_all_queues invoked */
struct napi_struct napi_rx;
struct napi_struct napi_tx;
/* keep alive */
@ -817,6 +823,10 @@ int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason);
int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason);
int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
u16 status, bool amsdu, u16 agg_wsize, u16 timeout);
int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil,
enum wmi_ps_profile_type ps_profile);
int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short);
int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short);
int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
u8 dialog_token, __le16 ba_param_set,
__le16 ba_timeout, __le16 ba_seq_ctrl);
@ -837,13 +847,15 @@ bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request);
void wil_p2p_discovery_timer_fn(ulong x);
int wil_p2p_search(struct wil6210_priv *wil,
struct cfg80211_scan_request *request);
int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
struct ieee80211_channel *chan, u64 *cookie);
int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev,
unsigned int duration, struct ieee80211_channel *chan,
u64 *cookie);
u8 wil_p2p_stop_discovery(struct wil6210_priv *wil);
int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie);
void wil_p2p_listen_expired(struct work_struct *work);
void wil_p2p_search_expired(struct work_struct *work);
void wil_p2p_stop_radio_operations(struct wil6210_priv *wil);
void wil_p2p_delayed_listen_work(struct work_struct *work);
/* WMI for P2P */
int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi);
@ -869,6 +881,9 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
u8 chan, u8 hidden_ssid, u8 is_go);
int wmi_pcp_stop(struct wil6210_priv *wil);
int wmi_led_cfg(struct wil6210_priv *wil, bool enable);
int wmi_abort_scan(struct wil6210_priv *wil);
void wil_abort_scan(struct wil6210_priv *wil, bool sync);
void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
u16 reason_code, bool from_event);
void wil_probe_client_flush(struct wil6210_priv *wil);
@ -886,6 +901,10 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size);
int wil_bcast_init(struct wil6210_priv *wil);
void wil_bcast_fini(struct wil6210_priv *wil);
void wil_update_net_queues(struct wil6210_priv *wil, struct vring *vring,
bool should_stop);
void wil_update_net_queues_bh(struct wil6210_priv *wil, struct vring *vring,
bool check_stop);
netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
int wil_tx_complete(struct wil6210_priv *wil, int ringid);
void wil6210_unmask_irq_tx(struct wil6210_priv *wil);

View File

@ -36,6 +36,9 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
if (!map->fw)
continue;
if (map->host < host_min)
host_min = map->host;
@ -73,6 +76,9 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
if (!map->fw)
continue;
data = (void * __force)wil->csr + HOSTADDR(map->host);
len = map->to - map->from;
offset = map->host - host_min;

View File

@ -84,19 +84,29 @@ MODULE_PARM_DESC(led_id,
* array size should be in sync with the declaration in the wil6210.h
*/
const struct fw_map fw_mapping[] = {
{0x000000, 0x040000, 0x8c0000, "fw_code"}, /* FW code RAM 256k */
{0x800000, 0x808000, 0x900000, "fw_data"}, /* FW data RAM 32k */
{0x840000, 0x860000, 0x908000, "fw_peri"}, /* periph. data RAM 128k */
{0x880000, 0x88a000, 0x880000, "rgf"}, /* various RGF 40k */
{0x88a000, 0x88b000, 0x88a000, "AGC_tbl"}, /* AGC table 4k */
{0x88b000, 0x88c000, 0x88b000, "rgf_ext"}, /* Pcie_ext_rgf 4k */
{0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext"}, /* mac_ext_rgf 512b */
{0x8c0000, 0x949000, 0x8c0000, "upper"}, /* upper area 548k */
/*
* 920000..930000 ucode code RAM
* 930000..932000 ucode data RAM
* 932000..949000 back-door debug data
/* FW code RAM 256k */
{0x000000, 0x040000, 0x8c0000, "fw_code", true},
/* FW data RAM 32k */
{0x800000, 0x808000, 0x900000, "fw_data", true},
/* periph data 128k */
{0x840000, 0x860000, 0x908000, "fw_peri", true},
/* various RGF 40k */
{0x880000, 0x88a000, 0x880000, "rgf", true},
/* AGC table 4k */
{0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true},
/* Pcie_ext_rgf 4k */
{0x88b000, 0x88c000, 0x88b000, "rgf_ext", true},
/* mac_ext_rgf 512b */
{0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true},
/* upper area 548k */
{0x8c0000, 0x949000, 0x8c0000, "upper", true},
/* UCODE areas - accessible by debugfs blobs but not by
* wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
*/
/* ucode code RAM 128k */
{0x000000, 0x020000, 0x920000, "uc_code", false},
/* ucode data RAM 16k */
{0x800000, 0x804000, 0x940000, "uc_data", false},
};
struct blink_on_off_time led_blink_time[] = {
@ -108,7 +118,7 @@ struct blink_on_off_time led_blink_time[] = {
u8 led_polarity = LED_POLARITY_LOW_ACTIVE;
/**
* return AHB address for given firmware/ucode internal (linker) address
* return AHB address for given firmware internal (linker) address
* @x - internal address
* If address have no valid AHB mapping, return 0
*/
@ -117,7 +127,8 @@ static u32 wmi_addr_remap(u32 x)
uint i;
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
if ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to))
if (fw_mapping[i].fw &&
((x >= fw_mapping[i].from) && (x < fw_mapping[i].to)))
return x + fw_mapping[i].host - fw_mapping[i].from;
}
@ -427,18 +438,24 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
struct wmi_scan_complete_event *data = d;
int status = le32_to_cpu(data->status);
struct cfg80211_scan_info info = {
.aborted = (data->status != WMI_SCAN_SUCCESS),
.aborted = ((status != WMI_SCAN_SUCCESS) &&
(status != WMI_SCAN_ABORT_REJECTED)),
};
wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", status);
wil_dbg_misc(wil, "Complete scan_request 0x%p aborted %d\n",
wil->scan_request, info.aborted);
del_timer_sync(&wil->scan_timer);
cfg80211_scan_done(wil->scan_request, &info);
wil->radio_wdev = wil->wdev;
wil->scan_request = NULL;
wake_up_interruptible(&wil->wq);
if (wil->p2p.pending_listen_wdev) {
wil_dbg_misc(wil, "Scheduling delayed listen\n");
schedule_work(&wil->p2p.delayed_listen_work);
}
} else {
wil_err(wil, "SCAN_COMPLETE while not scanning\n");
}
@ -548,7 +565,6 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
(wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
if (rc) {
netif_tx_stop_all_queues(ndev);
netif_carrier_off(ndev);
wil_err(wil,
"%s: cfg80211_connect_result with failure\n",
@ -588,7 +604,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
wil->sta[evt->cid].status = wil_sta_connected;
set_bit(wil_status_fwconnected, wil->status);
netif_tx_wake_all_queues(ndev);
wil_update_net_queues_bh(wil, NULL, false);
out:
if (rc)
@ -1564,6 +1580,112 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
return rc;
}
int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil,
enum wmi_ps_profile_type ps_profile)
{
int rc;
struct wmi_ps_dev_profile_cfg_cmd cmd = {
.ps_profile = ps_profile,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_ps_dev_profile_cfg_event evt;
} __packed reply;
u32 status;
wil_dbg_wmi(wil, "Setting ps dev profile %d\n", ps_profile);
reply.evt.status = cpu_to_le32(WMI_PS_CFG_CMD_STATUS_ERROR);
rc = wmi_call(wil, WMI_PS_DEV_PROFILE_CFG_CMDID, &cmd, sizeof(cmd),
WMI_PS_DEV_PROFILE_CFG_EVENTID, &reply, sizeof(reply),
100);
if (rc)
return rc;
status = le32_to_cpu(reply.evt.status);
if (status != WMI_PS_CFG_CMD_STATUS_SUCCESS) {
wil_err(wil, "ps dev profile cfg failed with status %d\n",
status);
rc = -EINVAL;
}
return rc;
}
int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short)
{
int rc;
struct wmi_set_mgmt_retry_limit_cmd cmd = {
.mgmt_retry_limit = retry_short,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_set_mgmt_retry_limit_event evt;
} __packed reply;
wil_dbg_wmi(wil, "Setting mgmt retry short %d\n", retry_short);
if (!test_bit(WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT, wil->fw_capabilities))
return -ENOTSUPP;
reply.evt.status = WMI_FW_STATUS_FAILURE;
rc = wmi_call(wil, WMI_SET_MGMT_RETRY_LIMIT_CMDID, &cmd, sizeof(cmd),
WMI_SET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply),
100);
if (rc)
return rc;
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "set mgmt retry limit failed with status %d\n",
reply.evt.status);
rc = -EINVAL;
}
return rc;
}
int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short)
{
int rc;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_get_mgmt_retry_limit_event evt;
} __packed reply;
wil_dbg_wmi(wil, "getting mgmt retry short\n");
if (!test_bit(WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT, wil->fw_capabilities))
return -ENOTSUPP;
reply.evt.mgmt_retry_limit = 0;
rc = wmi_call(wil, WMI_GET_MGMT_RETRY_LIMIT_CMDID, NULL, 0,
WMI_GET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply),
100);
if (rc)
return rc;
if (retry_short)
*retry_short = reply.evt.mgmt_retry_limit;
return 0;
}
int wmi_abort_scan(struct wil6210_priv *wil)
{
int rc;
wil_dbg_wmi(wil, "sending WMI_ABORT_SCAN_CMDID\n");
rc = wmi_send(wil, WMI_ABORT_SCAN_CMDID, NULL, 0);
if (rc)
wil_err(wil, "Failed to abort scan (%d)\n", rc);
return rc;
}
void wmi_event_flush(struct wil6210_priv *wil)
{
struct pending_wmi_event *evt, *t;

View File

@ -35,6 +35,7 @@
#define WMI_MAC_LEN (6)
#define WMI_PROX_RANGE_NUM (3)
#define WMI_MAX_LOSS_DMG_BEACONS (20)
#define MAX_NUM_OF_SECTORS (128)
/* Mailbox interface
* used for commands and events
@ -51,8 +52,10 @@ enum wmi_mid {
* the host
*/
enum wmi_fw_capability {
WMI_FW_CAPABILITY_FTM = 0,
WMI_FW_CAPABILITY_PS_CONFIG = 1,
WMI_FW_CAPABILITY_FTM = 0,
WMI_FW_CAPABILITY_PS_CONFIG = 1,
WMI_FW_CAPABILITY_RF_SECTORS = 2,
WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3,
WMI_FW_CAPABILITY_MAX,
};
@ -66,137 +69,149 @@ struct wmi_cmd_hdr {
/* List of Commands */
enum wmi_command_id {
WMI_CONNECT_CMDID = 0x01,
WMI_DISCONNECT_CMDID = 0x03,
WMI_DISCONNECT_STA_CMDID = 0x04,
WMI_START_SCAN_CMDID = 0x07,
WMI_SET_BSS_FILTER_CMDID = 0x09,
WMI_SET_PROBED_SSID_CMDID = 0x0A,
WMI_SET_LISTEN_INT_CMDID = 0x0B,
WMI_BCON_CTRL_CMDID = 0x0F,
WMI_ADD_CIPHER_KEY_CMDID = 0x16,
WMI_DELETE_CIPHER_KEY_CMDID = 0x17,
WMI_PCP_CONF_CMDID = 0x18,
WMI_SET_APPIE_CMDID = 0x3F,
WMI_SET_WSC_STATUS_CMDID = 0x41,
WMI_PXMT_RANGE_CFG_CMDID = 0x42,
WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x43,
WMI_MEM_READ_CMDID = 0x800,
WMI_MEM_WR_CMDID = 0x801,
WMI_ECHO_CMDID = 0x803,
WMI_DEEP_ECHO_CMDID = 0x804,
WMI_CONFIG_MAC_CMDID = 0x805,
WMI_CONFIG_PHY_DEBUG_CMDID = 0x806,
WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x808,
WMI_PHY_GET_STATISTICS_CMDID = 0x809,
WMI_FS_TUNE_CMDID = 0x80A,
WMI_CORR_MEASURE_CMDID = 0x80B,
WMI_READ_RSSI_CMDID = 0x80C,
WMI_TEMP_SENSE_CMDID = 0x80E,
WMI_DC_CALIB_CMDID = 0x80F,
WMI_SEND_TONE_CMDID = 0x810,
WMI_IQ_TX_CALIB_CMDID = 0x811,
WMI_IQ_RX_CALIB_CMDID = 0x812,
WMI_SET_UCODE_IDLE_CMDID = 0x813,
WMI_SET_WORK_MODE_CMDID = 0x815,
WMI_LO_LEAKAGE_CALIB_CMDID = 0x816,
WMI_MARLON_R_READ_CMDID = 0x818,
WMI_MARLON_R_WRITE_CMDID = 0x819,
WMI_MARLON_R_TXRX_SEL_CMDID = 0x81A,
MAC_IO_STATIC_PARAMS_CMDID = 0x81B,
MAC_IO_DYNAMIC_PARAMS_CMDID = 0x81C,
WMI_SILENT_RSSI_CALIB_CMDID = 0x81D,
WMI_RF_RX_TEST_CMDID = 0x81E,
WMI_CFG_RX_CHAIN_CMDID = 0x820,
WMI_VRING_CFG_CMDID = 0x821,
WMI_BCAST_VRING_CFG_CMDID = 0x822,
WMI_VRING_BA_EN_CMDID = 0x823,
WMI_VRING_BA_DIS_CMDID = 0x824,
WMI_RCP_ADDBA_RESP_CMDID = 0x825,
WMI_RCP_DELBA_CMDID = 0x826,
WMI_SET_SSID_CMDID = 0x827,
WMI_GET_SSID_CMDID = 0x828,
WMI_SET_PCP_CHANNEL_CMDID = 0x829,
WMI_GET_PCP_CHANNEL_CMDID = 0x82A,
WMI_SW_TX_REQ_CMDID = 0x82B,
WMI_READ_MAC_RXQ_CMDID = 0x830,
WMI_READ_MAC_TXQ_CMDID = 0x831,
WMI_WRITE_MAC_RXQ_CMDID = 0x832,
WMI_WRITE_MAC_TXQ_CMDID = 0x833,
WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x834,
WMI_MLME_PUSH_CMDID = 0x835,
WMI_BEAMFORMING_MGMT_CMDID = 0x836,
WMI_BF_TXSS_MGMT_CMDID = 0x837,
WMI_BF_SM_MGMT_CMDID = 0x838,
WMI_BF_RXSS_MGMT_CMDID = 0x839,
WMI_BF_TRIG_CMDID = 0x83A,
WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
WMI_SET_SECTORS_CMDID = 0x849,
WMI_MAINTAIN_PAUSE_CMDID = 0x850,
WMI_MAINTAIN_RESUME_CMDID = 0x851,
WMI_RS_MGMT_CMDID = 0x852,
WMI_RF_MGMT_CMDID = 0x853,
WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x854,
WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x855,
WMI_OTP_READ_CMDID = 0x856,
WMI_OTP_WRITE_CMDID = 0x857,
WMI_LED_CFG_CMDID = 0x858,
WMI_CONNECT_CMDID = 0x01,
WMI_DISCONNECT_CMDID = 0x03,
WMI_DISCONNECT_STA_CMDID = 0x04,
WMI_START_SCAN_CMDID = 0x07,
WMI_SET_BSS_FILTER_CMDID = 0x09,
WMI_SET_PROBED_SSID_CMDID = 0x0A,
WMI_SET_LISTEN_INT_CMDID = 0x0B,
WMI_BCON_CTRL_CMDID = 0x0F,
WMI_ADD_CIPHER_KEY_CMDID = 0x16,
WMI_DELETE_CIPHER_KEY_CMDID = 0x17,
WMI_PCP_CONF_CMDID = 0x18,
WMI_SET_APPIE_CMDID = 0x3F,
WMI_SET_WSC_STATUS_CMDID = 0x41,
WMI_PXMT_RANGE_CFG_CMDID = 0x42,
WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x43,
WMI_MEM_READ_CMDID = 0x800,
WMI_MEM_WR_CMDID = 0x801,
WMI_ECHO_CMDID = 0x803,
WMI_DEEP_ECHO_CMDID = 0x804,
WMI_CONFIG_MAC_CMDID = 0x805,
WMI_CONFIG_PHY_DEBUG_CMDID = 0x806,
WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x808,
WMI_PHY_GET_STATISTICS_CMDID = 0x809,
WMI_FS_TUNE_CMDID = 0x80A,
WMI_CORR_MEASURE_CMDID = 0x80B,
WMI_READ_RSSI_CMDID = 0x80C,
WMI_TEMP_SENSE_CMDID = 0x80E,
WMI_DC_CALIB_CMDID = 0x80F,
WMI_SEND_TONE_CMDID = 0x810,
WMI_IQ_TX_CALIB_CMDID = 0x811,
WMI_IQ_RX_CALIB_CMDID = 0x812,
WMI_SET_UCODE_IDLE_CMDID = 0x813,
WMI_SET_WORK_MODE_CMDID = 0x815,
WMI_LO_LEAKAGE_CALIB_CMDID = 0x816,
WMI_MARLON_R_READ_CMDID = 0x818,
WMI_MARLON_R_WRITE_CMDID = 0x819,
WMI_MARLON_R_TXRX_SEL_CMDID = 0x81A,
MAC_IO_STATIC_PARAMS_CMDID = 0x81B,
MAC_IO_DYNAMIC_PARAMS_CMDID = 0x81C,
WMI_SILENT_RSSI_CALIB_CMDID = 0x81D,
WMI_RF_RX_TEST_CMDID = 0x81E,
WMI_CFG_RX_CHAIN_CMDID = 0x820,
WMI_VRING_CFG_CMDID = 0x821,
WMI_BCAST_VRING_CFG_CMDID = 0x822,
WMI_VRING_BA_EN_CMDID = 0x823,
WMI_VRING_BA_DIS_CMDID = 0x824,
WMI_RCP_ADDBA_RESP_CMDID = 0x825,
WMI_RCP_DELBA_CMDID = 0x826,
WMI_SET_SSID_CMDID = 0x827,
WMI_GET_SSID_CMDID = 0x828,
WMI_SET_PCP_CHANNEL_CMDID = 0x829,
WMI_GET_PCP_CHANNEL_CMDID = 0x82A,
WMI_SW_TX_REQ_CMDID = 0x82B,
WMI_READ_MAC_RXQ_CMDID = 0x830,
WMI_READ_MAC_TXQ_CMDID = 0x831,
WMI_WRITE_MAC_RXQ_CMDID = 0x832,
WMI_WRITE_MAC_TXQ_CMDID = 0x833,
WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x834,
WMI_MLME_PUSH_CMDID = 0x835,
WMI_BEAMFORMING_MGMT_CMDID = 0x836,
WMI_BF_TXSS_MGMT_CMDID = 0x837,
WMI_BF_SM_MGMT_CMDID = 0x838,
WMI_BF_RXSS_MGMT_CMDID = 0x839,
WMI_BF_TRIG_CMDID = 0x83A,
WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
WMI_SET_SECTORS_CMDID = 0x849,
WMI_MAINTAIN_PAUSE_CMDID = 0x850,
WMI_MAINTAIN_RESUME_CMDID = 0x851,
WMI_RS_MGMT_CMDID = 0x852,
WMI_RF_MGMT_CMDID = 0x853,
WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x854,
WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x855,
WMI_OTP_READ_CMDID = 0x856,
WMI_OTP_WRITE_CMDID = 0x857,
WMI_LED_CFG_CMDID = 0x858,
/* Performance monitoring commands */
WMI_BF_CTRL_CMDID = 0x862,
WMI_NOTIFY_REQ_CMDID = 0x863,
WMI_GET_STATUS_CMDID = 0x864,
WMI_GET_RF_STATUS_CMDID = 0x866,
WMI_GET_BASEBAND_TYPE_CMDID = 0x867,
WMI_UNIT_TEST_CMDID = 0x900,
WMI_HICCUP_CMDID = 0x901,
WMI_FLASH_READ_CMDID = 0x902,
WMI_FLASH_WRITE_CMDID = 0x903,
WMI_BF_CTRL_CMDID = 0x862,
WMI_NOTIFY_REQ_CMDID = 0x863,
WMI_GET_STATUS_CMDID = 0x864,
WMI_GET_RF_STATUS_CMDID = 0x866,
WMI_GET_BASEBAND_TYPE_CMDID = 0x867,
WMI_UNIT_TEST_CMDID = 0x900,
WMI_HICCUP_CMDID = 0x901,
WMI_FLASH_READ_CMDID = 0x902,
WMI_FLASH_WRITE_CMDID = 0x903,
/* Power management */
WMI_TRAFFIC_DEFERRAL_CMDID = 0x904,
WMI_TRAFFIC_RESUME_CMDID = 0x905,
WMI_TRAFFIC_DEFERRAL_CMDID = 0x904,
WMI_TRAFFIC_RESUME_CMDID = 0x905,
/* P2P */
WMI_P2P_CFG_CMDID = 0x910,
WMI_PORT_ALLOCATE_CMDID = 0x911,
WMI_PORT_DELETE_CMDID = 0x912,
WMI_POWER_MGMT_CFG_CMDID = 0x913,
WMI_START_LISTEN_CMDID = 0x914,
WMI_START_SEARCH_CMDID = 0x915,
WMI_DISCOVERY_START_CMDID = 0x916,
WMI_DISCOVERY_STOP_CMDID = 0x917,
WMI_PCP_START_CMDID = 0x918,
WMI_PCP_STOP_CMDID = 0x919,
WMI_GET_PCP_FACTOR_CMDID = 0x91B,
WMI_P2P_CFG_CMDID = 0x910,
WMI_PORT_ALLOCATE_CMDID = 0x911,
WMI_PORT_DELETE_CMDID = 0x912,
WMI_POWER_MGMT_CFG_CMDID = 0x913,
WMI_START_LISTEN_CMDID = 0x914,
WMI_START_SEARCH_CMDID = 0x915,
WMI_DISCOVERY_START_CMDID = 0x916,
WMI_DISCOVERY_STOP_CMDID = 0x917,
WMI_PCP_START_CMDID = 0x918,
WMI_PCP_STOP_CMDID = 0x919,
WMI_GET_PCP_FACTOR_CMDID = 0x91B,
/* Power Save Configuration Commands */
WMI_PS_DEV_PROFILE_CFG_CMDID = 0x91C,
WMI_PS_DEV_PROFILE_CFG_CMDID = 0x91C,
/* Not supported yet */
WMI_PS_DEV_CFG_CMDID = 0x91D,
WMI_PS_DEV_CFG_CMDID = 0x91D,
/* Not supported yet */
WMI_PS_DEV_CFG_READ_CMDID = 0x91E,
WMI_PS_DEV_CFG_READ_CMDID = 0x91E,
/* Per MAC Power Save Configuration commands
* Not supported yet
*/
WMI_PS_MID_CFG_CMDID = 0x91F,
WMI_PS_MID_CFG_CMDID = 0x91F,
/* Not supported yet */
WMI_PS_MID_CFG_READ_CMDID = 0x920,
WMI_RS_CFG_CMDID = 0x921,
WMI_GET_DETAILED_RS_RES_CMDID = 0x922,
WMI_AOA_MEAS_CMDID = 0x923,
WMI_TOF_SESSION_START_CMDID = 0x991,
WMI_TOF_GET_CAPABILITIES_CMDID = 0x992,
WMI_TOF_SET_LCR_CMDID = 0x993,
WMI_TOF_SET_LCI_CMDID = 0x994,
WMI_TOF_CHANNEL_INFO_CMDID = 0x995,
WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
WMI_ABORT_SCAN_CMDID = 0xF007,
WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
WMI_GET_PMK_CMDID = 0xF048,
WMI_SET_PASSPHRASE_CMDID = 0xF049,
WMI_SEND_ASSOC_RES_CMDID = 0xF04A,
WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xF04B,
WMI_MAC_ADDR_REQ_CMDID = 0xF04D,
WMI_FW_VER_CMDID = 0xF04E,
WMI_PMC_CMDID = 0xF04F,
WMI_PS_MID_CFG_READ_CMDID = 0x920,
WMI_RS_CFG_CMDID = 0x921,
WMI_GET_DETAILED_RS_RES_CMDID = 0x922,
WMI_AOA_MEAS_CMDID = 0x923,
WMI_SET_MGMT_RETRY_LIMIT_CMDID = 0x930,
WMI_GET_MGMT_RETRY_LIMIT_CMDID = 0x931,
WMI_TOF_SESSION_START_CMDID = 0x991,
WMI_TOF_GET_CAPABILITIES_CMDID = 0x992,
WMI_TOF_SET_LCR_CMDID = 0x993,
WMI_TOF_SET_LCI_CMDID = 0x994,
WMI_TOF_CHANNEL_INFO_CMDID = 0x995,
WMI_TOF_SET_TX_RX_OFFSET_CMDID = 0x997,
WMI_TOF_GET_TX_RX_OFFSET_CMDID = 0x998,
WMI_GET_RF_SECTOR_PARAMS_CMDID = 0x9A0,
WMI_SET_RF_SECTOR_PARAMS_CMDID = 0x9A1,
WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID = 0x9A2,
WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID = 0x9A3,
WMI_SET_RF_SECTOR_ON_CMDID = 0x9A4,
WMI_PRIO_TX_SECTORS_ORDER_CMDID = 0x9A5,
WMI_PRIO_TX_SECTORS_NUMBER_CMDID = 0x9A6,
WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID = 0x9A7,
WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
WMI_ABORT_SCAN_CMDID = 0xF007,
WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
WMI_GET_PMK_CMDID = 0xF048,
WMI_SET_PASSPHRASE_CMDID = 0xF049,
WMI_SEND_ASSOC_RES_CMDID = 0xF04A,
WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xF04B,
WMI_MAC_ADDR_REQ_CMDID = 0xF04D,
WMI_FW_VER_CMDID = 0xF04E,
WMI_PMC_CMDID = 0xF04F,
};
/* WMI_CONNECT_CMDID */
@ -879,6 +894,14 @@ struct wmi_aoa_meas_cmd {
__le32 meas_rf_mask;
} __packed;
/* WMI_SET_MGMT_RETRY_LIMIT_CMDID */
struct wmi_set_mgmt_retry_limit_cmd {
/* MAC retransmit limit for mgmt frames */
u8 mgmt_retry_limit;
/* alignment to 32b */
u8 reserved[3];
} __packed;
enum wmi_tof_burst_duration {
WMI_TOF_BURST_DURATION_250_USEC = 2,
WMI_TOF_BURST_DURATION_500_USEC = 3,
@ -942,6 +965,15 @@ struct wmi_tof_channel_info_cmd {
__le32 channel_info_report_request;
} __packed;
/* WMI_TOF_SET_TX_RX_OFFSET_CMDID */
struct wmi_tof_set_tx_rx_offset_cmd {
/* TX delay offset */
__le32 tx_offset;
/* RX delay offset */
__le32 rx_offset;
__le32 reserved[2];
} __packed;
/* WMI Events
* List of Events (target to host)
*/
@ -1035,12 +1067,24 @@ enum wmi_event_id {
WMI_RS_CFG_DONE_EVENTID = 0x1921,
WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922,
WMI_AOA_MEAS_EVENTID = 0x1923,
WMI_SET_MGMT_RETRY_LIMIT_EVENTID = 0x1930,
WMI_GET_MGMT_RETRY_LIMIT_EVENTID = 0x1931,
WMI_TOF_SESSION_END_EVENTID = 0x1991,
WMI_TOF_GET_CAPABILITIES_EVENTID = 0x1992,
WMI_TOF_SET_LCR_EVENTID = 0x1993,
WMI_TOF_SET_LCI_EVENTID = 0x1994,
WMI_TOF_FTM_PER_DEST_RES_EVENTID = 0x1995,
WMI_TOF_CHANNEL_INFO_EVENTID = 0x1996,
WMI_TOF_SET_TX_RX_OFFSET_EVENTID = 0x1997,
WMI_TOF_GET_TX_RX_OFFSET_EVENTID = 0x1998,
WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID = 0x19A0,
WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID = 0x19A1,
WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID = 0x19A2,
WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID = 0x19A3,
WMI_SET_RF_SECTOR_ON_DONE_EVENTID = 0x19A4,
WMI_PRIO_TX_SECTORS_ORDER_EVENTID = 0x19A5,
WMI_PRIO_TX_SECTORS_NUMBER_EVENTID = 0x19A6,
WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID = 0x19A7,
WMI_SET_CHANNEL_EVENTID = 0x9000,
WMI_ASSOC_REQ_EVENTID = 0x9001,
WMI_EAPOL_RX_EVENTID = 0x9002,
@ -1166,6 +1210,7 @@ enum baseband_type {
BASEBAND_SPARROW_M_B0 = 0x05,
BASEBAND_SPARROW_M_C0 = 0x06,
BASEBAND_SPARROW_M_D0 = 0x07,
BASEBAND_TALYN_M_A0 = 0x08,
};
/* WMI_GET_BASEBAND_TYPE_EVENTID */
@ -2070,6 +2115,22 @@ struct wmi_aoa_meas_event {
u8 meas_data[WMI_AOA_MAX_DATA_SIZE];
} __packed;
/* WMI_SET_MGMT_RETRY_LIMIT_EVENTID */
struct wmi_set_mgmt_retry_limit_event {
/* enum wmi_fw_status */
u8 status;
/* alignment to 32b */
u8 reserved[3];
} __packed;
/* WMI_GET_MGMT_RETRY_LIMIT_EVENTID */
struct wmi_get_mgmt_retry_limit_event {
/* MAC retransmit limit for mgmt frames */
u8 mgmt_retry_limit;
/* alignment to 32b */
u8 reserved[3];
} __packed;
/* WMI_TOF_GET_CAPABILITIES_EVENTID */
struct wmi_tof_get_capabilities_event {
u8 ftm_capability;
@ -2184,4 +2245,283 @@ struct wmi_tof_channel_info_event {
u8 report[0];
} __packed;
/* WMI_TOF_SET_TX_RX_OFFSET_EVENTID */
struct wmi_tof_set_tx_rx_offset_event {
/* enum wmi_fw_status */
u8 status;
u8 reserved[3];
} __packed;
/* WMI_TOF_GET_TX_RX_OFFSET_EVENTID */
struct wmi_tof_get_tx_rx_offset_event {
/* enum wmi_fw_status */
u8 status;
u8 reserved1[3];
/* TX delay offset */
__le32 tx_offset;
/* RX delay offset */
__le32 rx_offset;
__le32 reserved2[2];
} __packed;
/* Result status codes for WMI commands */
enum wmi_rf_sector_status {
WMI_RF_SECTOR_STATUS_SUCCESS = 0x00,
WMI_RF_SECTOR_STATUS_BAD_PARAMETERS_ERROR = 0x01,
WMI_RF_SECTOR_STATUS_BUSY_ERROR = 0x02,
WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR = 0x03,
};
/* Types of the RF sector (TX,RX) */
enum wmi_rf_sector_type {
WMI_RF_SECTOR_TYPE_RX = 0x00,
WMI_RF_SECTOR_TYPE_TX = 0x01,
};
/* Content of RF Sector (six 32-bits registers) */
struct wmi_rf_sector_info {
/* Phase values for RF Chains[15-0] (2bits per RF chain) */
__le32 psh_hi;
/* Phase values for RF Chains[31-16] (2bits per RF chain) */
__le32 psh_lo;
/* ETYPE Bit0 for all RF chains[31-0] - bit0 of Edge amplifier gain
* index
*/
__le32 etype0;
/* ETYPE Bit1 for all RF chains[31-0] - bit1 of Edge amplifier gain
* index
*/
__le32 etype1;
/* ETYPE Bit2 for all RF chains[31-0] - bit2 of Edge amplifier gain
* index
*/
__le32 etype2;
/* D-Type values (3bits each) for 8 Distribution amplifiers + X16
* switch bits
*/
__le32 dtype_swch_off;
} __packed;
#define WMI_INVALID_RF_SECTOR_INDEX (0xFFFF)
#define WMI_MAX_RF_MODULES_NUM (8)
/* WMI_GET_RF_SECTOR_PARAMS_CMD */
struct wmi_get_rf_sector_params_cmd {
/* Sector number to be retrieved */
__le16 sector_idx;
/* enum wmi_rf_sector_type - type of requested RF sector */
u8 sector_type;
/* bitmask vector specifying destination RF modules */
u8 rf_modules_vec;
} __packed;
/* \WMI_GET_RF_SECTOR_PARAMS_DONE_EVENT */
struct wmi_get_rf_sector_params_done_event {
/* result status of WMI_GET_RF_SECTOR_PARAMS_CMD (enum
* wmi_rf_sector_status)
*/
u8 status;
/* align next field to U64 boundary */
u8 reserved[7];
/* TSF timestamp when RF sectors where retrieved */
__le64 tsf;
/* Content of RF sector retrieved from each RF module */
struct wmi_rf_sector_info sectors_info[WMI_MAX_RF_MODULES_NUM];
} __packed;
/* WMI_SET_RF_SECTOR_PARAMS_CMD */
struct wmi_set_rf_sector_params_cmd {
/* Sector number to be retrieved */
__le16 sector_idx;
/* enum wmi_rf_sector_type - type of requested RF sector */
u8 sector_type;
/* bitmask vector specifying destination RF modules */
u8 rf_modules_vec;
/* Content of RF sector to be written to each RF module */
struct wmi_rf_sector_info sectors_info[WMI_MAX_RF_MODULES_NUM];
} __packed;
/* \WMI_SET_RF_SECTOR_PARAMS_DONE_EVENT */
struct wmi_set_rf_sector_params_done_event {
/* result status of WMI_SET_RF_SECTOR_PARAMS_CMD (enum
* wmi_rf_sector_status)
*/
u8 status;
} __packed;
/* WMI_GET_SELECTED_RF_SECTOR_INDEX_CMD - Get RF sector index selected by
* TXSS/BRP for communication with specified CID
*/
struct wmi_get_selected_rf_sector_index_cmd {
/* Connection/Station ID in [0:7] range */
u8 cid;
/* type of requested RF sector (enum wmi_rf_sector_type) */
u8 sector_type;
/* align to U32 boundary */
u8 reserved[2];
} __packed;
/* \WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT - Returns retrieved RF sector
* index selected by TXSS/BRP for communication with specified CID
*/
struct wmi_get_selected_rf_sector_index_done_event {
/* Retrieved sector index selected in TXSS (for TX sector request) or
* BRP (for RX sector request)
*/
__le16 sector_idx;
/* result status of WMI_GET_SELECTED_RF_SECTOR_INDEX_CMD (enum
* wmi_rf_sector_status)
*/
u8 status;
/* align next field to U64 boundary */
u8 reserved[5];
/* TSF timestamp when result was retrieved */
__le64 tsf;
} __packed;
/* WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD - Force RF sector index for
* communication with specified CID. Assumes that TXSS/BRP is disabled by
* other command
*/
struct wmi_set_selected_rf_sector_index_cmd {
/* Connection/Station ID in [0:7] range */
u8 cid;
/* type of requested RF sector (enum wmi_rf_sector_type) */
u8 sector_type;
/* Forced sector index */
__le16 sector_idx;
} __packed;
/* \WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT - Success/Fail status for
* WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD
*/
struct wmi_set_selected_rf_sector_index_done_event {
/* result status of WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD (enum
* wmi_rf_sector_status)
*/
u8 status;
/* align to U32 boundary */
u8 reserved[3];
} __packed;
/* WMI_SET_RF_SECTOR_ON_CMD - Activates specified sector for specified rf
* modules
*/
struct wmi_set_rf_sector_on_cmd {
/* Sector index to be activated */
__le16 sector_idx;
/* type of requested RF sector (enum wmi_rf_sector_type) */
u8 sector_type;
/* bitmask vector specifying destination RF modules */
u8 rf_modules_vec;
} __packed;
/* \WMI_SET_RF_SECTOR_ON_DONE_EVENT - Success/Fail status for
* WMI_SET_RF_SECTOR_ON_CMD
*/
struct wmi_set_rf_sector_on_done_event {
/* result status of WMI_SET_RF_SECTOR_ON_CMD (enum
* wmi_rf_sector_status)
*/
u8 status;
/* align to U32 boundary */
u8 reserved[3];
} __packed;
enum wmi_sector_sweep_type {
WMI_SECTOR_SWEEP_TYPE_TXSS = 0x00,
WMI_SECTOR_SWEEP_TYPE_BCON = 0x01,
WMI_SECTOR_SWEEP_TYPE_TXSS_AND_BCON = 0x02,
WMI_SECTOR_SWEEP_TYPE_NUM = 0x03,
};
/* WMI_PRIO_TX_SECTORS_ORDER_CMDID
*
* Set the order of TX sectors in TXSS and/or Beacon(AP).
*
* Returned event:
* - WMI_PRIO_TX_SECTORS_ORDER_EVENTID
*/
struct wmi_prio_tx_sectors_order_cmd {
/* tx sectors order to be applied, 0xFF for end of array */
u8 tx_sectors_priority_array[MAX_NUM_OF_SECTORS];
/* enum wmi_sector_sweep_type, TXSS and/or Beacon */
u8 sector_sweep_type;
/* needed only for TXSS configuration */
u8 cid;
/* alignment to 32b */
u8 reserved[2];
} __packed;
/* completion status codes */
enum wmi_prio_tx_sectors_cmd_status {
WMI_PRIO_TX_SECT_CMD_STATUS_SUCCESS = 0x00,
WMI_PRIO_TX_SECT_CMD_STATUS_BAD_PARAM = 0x01,
/* other error */
WMI_PRIO_TX_SECT_CMD_STATUS_ERROR = 0x02,
};
/* WMI_PRIO_TX_SECTORS_ORDER_EVENTID */
struct wmi_prio_tx_sectors_order_event {
/* enum wmi_prio_tx_sectors_cmd_status */
u8 status;
/* alignment to 32b */
u8 reserved[3];
} __packed;
struct wmi_prio_tx_sectors_num_cmd {
/* [0-128], 0 = No changes */
u8 beacon_number_of_sectors;
/* [0-128], 0 = No changes */
u8 txss_number_of_sectors;
/* [0-8] needed only for TXSS configuration */
u8 cid;
} __packed;
/* WMI_PRIO_TX_SECTORS_NUMBER_CMDID
*
* Set the number of active sectors in TXSS and/or Beacon.
*
* Returned event:
* - WMI_PRIO_TX_SECTORS_NUMBER_EVENTID
*/
struct wmi_prio_tx_sectors_number_cmd {
struct wmi_prio_tx_sectors_num_cmd active_sectors_num;
/* alignment to 32b */
u8 reserved;
} __packed;
/* WMI_PRIO_TX_SECTORS_NUMBER_EVENTID */
struct wmi_prio_tx_sectors_number_event {
/* enum wmi_prio_tx_sectors_cmd_status */
u8 status;
/* alignment to 32b */
u8 reserved[3];
} __packed;
/* WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID
*
* Set default sectors order and number (hard coded in board file)
* in TXSS and/or Beacon.
*
* Returned event:
* - WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID
*/
struct wmi_prio_tx_sectors_set_default_cfg_cmd {
/* enum wmi_sector_sweep_type, TXSS and/or Beacon */
u8 sector_sweep_type;
/* needed only for TXSS configuration */
u8 cid;
/* alignment to 32b */
u8 reserved[2];
} __packed;
/* WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID */
struct wmi_prio_tx_sectors_set_default_cfg_event {
/* enum wmi_prio_tx_sectors_cmd_status */
u8 status;
/* alignment to 32b */
u8 reserved[3];
} __packed;
#endif /* __WILOCITY_WMI_H__ */

View File

@ -35,7 +35,8 @@ brcmfmac-objs += \
firmware.o \
feature.o \
btcoex.o \
vendor.o
vendor.o \
pno.o
brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \
bcdc.o
brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \

View File

@ -22,10 +22,12 @@
/* IDs of the 6 default common rings of msgbuf protocol */
#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT 0
#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT 1
#define BRCMF_H2D_MSGRING_FLOWRING_IDSTART 2
#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE 2
#define BRCMF_D2H_MSGRING_TX_COMPLETE 3
#define BRCMF_D2H_MSGRING_RX_COMPLETE 4
#define BRCMF_NROF_H2D_COMMON_MSGRINGS 2
#define BRCMF_NROF_D2H_COMMON_MSGRINGS 3
#define BRCMF_NROF_COMMON_MSGRINGS (BRCMF_NROF_H2D_COMMON_MSGRINGS + \
@ -95,14 +97,18 @@ struct brcmf_bus_ops {
* @flowrings: commonrings which are dynamically created and destroyed for data.
* @rx_dataoffset: if set then all rx data has this this offset.
* @max_rxbufpost: maximum number of buffers to post for rx.
* @nrof_flowrings: number of flowrings.
* @max_flowrings: maximum number of tx flow rings supported.
* @max_submissionrings: maximum number of submission rings(h2d) supported.
* @max_completionrings: maximum number of completion rings(d2h) supported.
*/
struct brcmf_bus_msgbuf {
struct brcmf_commonring *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
struct brcmf_commonring **flowrings;
u32 rx_dataoffset;
u32 max_rxbufpost;
u32 nrof_flowrings;
u16 max_flowrings;
u16 max_submissionrings;
u16 max_completionrings;
};

View File

@ -32,6 +32,7 @@
#include "fwil_types.h"
#include "p2p.h"
#include "btcoex.h"
#include "pno.h"
#include "cfg80211.h"
#include "feature.h"
#include "fwil.h"
@ -41,16 +42,6 @@
#include "common.h"
#define BRCMF_SCAN_IE_LEN_MAX 2048
#define BRCMF_PNO_VERSION 2
#define BRCMF_PNO_TIME 30
#define BRCMF_PNO_REPEAT 4
#define BRCMF_PNO_FREQ_EXPO_MAX 3
#define BRCMF_PNO_MAX_PFN_COUNT 16
#define BRCMF_PNO_ENABLE_ADAPTSCAN_BIT 6
#define BRCMF_PNO_HIDDEN_BIT 2
#define BRCMF_PNO_WPA_AUTH_ANY 0xFFFFFFFF
#define BRCMF_PNO_SCAN_COMPLETE 1
#define BRCMF_PNO_SCAN_INCOMPLETE 0
#define WPA_OUI "\x00\x50\xF2" /* WPA OUI */
#define WPA_OUI_TYPE 1
@ -768,12 +759,12 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
brcmf_scan_config_mpc(ifp, 1);
/*
* e-scan can be initiated by scheduled scan
* e-scan can be initiated internally
* which takes precedence.
*/
if (cfg->sched_escan) {
if (cfg->internal_escan) {
brcmf_dbg(SCAN, "scheduled scan completed\n");
cfg->sched_escan = false;
cfg->internal_escan = false;
if (!aborted)
cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
} else if (scan_request) {
@ -1091,9 +1082,9 @@ exit:
}
static s32
brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
struct brcmf_if *ifp, struct cfg80211_scan_request *request)
brcmf_do_escan(struct brcmf_if *ifp, struct cfg80211_scan_request *request)
{
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
s32 err;
u32 passive_scan;
struct brcmf_scan_results *results;
@ -1101,7 +1092,7 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
brcmf_dbg(SCAN, "Enter\n");
escan->ifp = ifp;
escan->wiphy = wiphy;
escan->wiphy = cfg->wiphy;
escan->escan_state = WL_ESCAN_STATE_SCANNING;
passive_scan = cfg->active_scan ? 0 : 1;
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PASSIVE_SCAN,
@ -1181,7 +1172,7 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
if (err)
goto scan_out;
err = brcmf_do_escan(cfg, wiphy, vif->ifp, request);
err = brcmf_do_escan(vif->ifp, request);
if (err)
goto scan_out;
} else {
@ -3024,7 +3015,7 @@ void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
struct escan_info *escan = &cfg->escan_info;
set_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
if (cfg->scan_request) {
if (cfg->internal_escan || cfg->scan_request) {
escan->escan_state = WL_ESCAN_STATE_IDLE;
brcmf_notify_escan_complete(cfg, escan->ifp, true, true);
}
@ -3047,7 +3038,7 @@ static void brcmf_escan_timeout(unsigned long data)
struct brcmf_cfg80211_info *cfg =
(struct brcmf_cfg80211_info *)data;
if (cfg->scan_request) {
if (cfg->internal_escan || cfg->scan_request) {
brcmf_err("timer expired\n");
schedule_work(&cfg->escan_timeout_work);
}
@ -3130,7 +3121,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
if (brcmf_p2p_scan_finding_common_channel(cfg, bss_info_le))
goto exit;
if (!cfg->scan_request) {
if (!cfg->internal_escan && !cfg->scan_request) {
brcmf_dbg(SCAN, "result without cfg80211 request\n");
goto exit;
}
@ -3176,7 +3167,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
if (brcmf_p2p_scan_finding_common_channel(cfg, NULL))
goto exit;
if (cfg->scan_request) {
if (cfg->internal_escan || cfg->scan_request) {
brcmf_inform_bss(cfg);
aborted = status != BRCMF_E_STATUS_SUCCESS;
brcmf_notify_escan_complete(cfg, ifp, aborted, false);
@ -3201,6 +3192,95 @@ static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg)
brcmf_cfg80211_escan_timeout_worker);
}
static struct cfg80211_scan_request *
brcmf_alloc_internal_escan_request(struct wiphy *wiphy, u32 n_netinfo) {
struct cfg80211_scan_request *req;
size_t req_size;
req_size = sizeof(*req) +
n_netinfo * sizeof(req->channels[0]) +
n_netinfo * sizeof(*req->ssids);
req = kzalloc(req_size, GFP_KERNEL);
if (req) {
req->wiphy = wiphy;
req->ssids = (void *)(&req->channels[0]) +
n_netinfo * sizeof(req->channels[0]);
}
return req;
}
static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req,
u8 *ssid, u8 ssid_len, u8 channel)
{
struct ieee80211_channel *chan;
enum nl80211_band band;
int freq;
if (channel <= CH_MAX_2G_CHANNEL)
band = NL80211_BAND_2GHZ;
else
band = NL80211_BAND_5GHZ;
freq = ieee80211_channel_to_frequency(channel, band);
if (!freq)
return -EINVAL;
chan = ieee80211_get_channel(req->wiphy, freq);
if (!chan)
return -EINVAL;
req->channels[req->n_channels++] = chan;
memcpy(req->ssids[req->n_ssids].ssid, ssid, ssid_len);
req->ssids[req->n_ssids++].ssid_len = ssid_len;
return 0;
}
static int brcmf_start_internal_escan(struct brcmf_if *ifp,
struct cfg80211_scan_request *request)
{
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
int err;
if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
/* Abort any on-going scan */
brcmf_abort_scanning(cfg);
}
set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
cfg->escan_info.run = brcmf_run_escan;
err = brcmf_do_escan(ifp, request);
if (err) {
clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
return err;
}
cfg->internal_escan = true;
return 0;
}
static struct brcmf_pno_net_info_le *
brcmf_get_netinfo_array(struct brcmf_pno_scanresults_le *pfn_v1)
{
struct brcmf_pno_scanresults_v2_le *pfn_v2;
struct brcmf_pno_net_info_le *netinfo;
switch (pfn_v1->version) {
default:
WARN_ON(1);
/* fall-thru */
case cpu_to_le32(1):
netinfo = (struct brcmf_pno_net_info_le *)(pfn_v1 + 1);
break;
case cpu_to_le32(2):
pfn_v2 = (struct brcmf_pno_scanresults_v2_le *)pfn_v1;
netinfo = (struct brcmf_pno_net_info_le *)(pfn_v2 + 1);
break;
}
return netinfo;
}
/* PFN result doesn't have all the info which are required by the supplicant
* (For e.g IEs) Do a target Escan so that sched scan results are reported
* via wl_inform_single_bss in the required format. Escan does require the
@ -3214,12 +3294,8 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
struct brcmf_pno_net_info_le *netinfo, *netinfo_start;
struct cfg80211_scan_request *request = NULL;
struct cfg80211_ssid *ssid = NULL;
struct ieee80211_channel *channel = NULL;
struct wiphy *wiphy = cfg_to_wiphy(cfg);
int err = 0;
int channel_req = 0;
int band = 0;
int i, err = 0;
struct brcmf_pno_scanresults_le *pfn_result;
u32 result_count;
u32 status;
@ -3245,254 +3321,86 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
*/
WARN_ON(status != BRCMF_PNO_SCAN_COMPLETE);
brcmf_dbg(SCAN, "PFN NET FOUND event. count: %d\n", result_count);
if (result_count > 0) {
int i;
request = kzalloc(sizeof(*request), GFP_KERNEL);
ssid = kcalloc(result_count, sizeof(*ssid), GFP_KERNEL);
channel = kcalloc(result_count, sizeof(*channel), GFP_KERNEL);
if (!request || !ssid || !channel) {
err = -ENOMEM;
goto out_err;
}
request->wiphy = wiphy;
data += sizeof(struct brcmf_pno_scanresults_le);
netinfo_start = (struct brcmf_pno_net_info_le *)data;
for (i = 0; i < result_count; i++) {
netinfo = &netinfo_start[i];
if (!netinfo) {
brcmf_err("Invalid netinfo ptr. index: %d\n",
i);
err = -EINVAL;
goto out_err;
}
brcmf_dbg(SCAN, "SSID:%s Channel:%d\n",
netinfo->SSID, netinfo->channel);
memcpy(ssid[i].ssid, netinfo->SSID, netinfo->SSID_len);
ssid[i].ssid_len = netinfo->SSID_len;
request->n_ssids++;
channel_req = netinfo->channel;
if (channel_req <= CH_MAX_2G_CHANNEL)
band = NL80211_BAND_2GHZ;
else
band = NL80211_BAND_5GHZ;
channel[i].center_freq =
ieee80211_channel_to_frequency(channel_req,
band);
channel[i].band = band;
channel[i].flags |= IEEE80211_CHAN_NO_HT40;
request->channels[i] = &channel[i];
request->n_channels++;
}
/* assign parsed ssid array */
if (request->n_ssids)
request->ssids = &ssid[0];
if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
/* Abort any on-going scan */
brcmf_abort_scanning(cfg);
}
set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
cfg->escan_info.run = brcmf_run_escan;
err = brcmf_do_escan(cfg, wiphy, ifp, request);
if (err) {
clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
goto out_err;
}
cfg->sched_escan = true;
cfg->scan_request = request;
} else {
if (!result_count) {
brcmf_err("FALSE PNO Event. (pfn_count == 0)\n");
goto out_err;
}
request = brcmf_alloc_internal_escan_request(wiphy,
result_count);
if (!request) {
err = -ENOMEM;
goto out_err;
}
kfree(ssid);
kfree(channel);
kfree(request);
return 0;
data += sizeof(struct brcmf_pno_scanresults_le);
netinfo_start = brcmf_get_netinfo_array(pfn_result);
for (i = 0; i < result_count; i++) {
netinfo = &netinfo_start[i];
if (!netinfo) {
brcmf_err("Invalid netinfo ptr. index: %d\n",
i);
err = -EINVAL;
goto out_err;
}
brcmf_dbg(SCAN, "SSID:%.32s Channel:%d\n",
netinfo->SSID, netinfo->channel);
err = brcmf_internal_escan_add_info(request,
netinfo->SSID,
netinfo->SSID_len,
netinfo->channel);
if (err)
goto out_err;
}
err = brcmf_start_internal_escan(ifp, request);
if (!err)
goto free_req;
out_err:
kfree(ssid);
kfree(channel);
kfree(request);
cfg80211_sched_scan_stopped(wiphy);
return err;
}
static int brcmf_dev_pno_clean(struct net_device *ndev)
{
int ret;
/* Disable pfn */
ret = brcmf_fil_iovar_int_set(netdev_priv(ndev), "pfn", 0);
if (ret == 0) {
/* clear pfn */
ret = brcmf_fil_iovar_data_set(netdev_priv(ndev), "pfnclear",
NULL, 0);
}
if (ret < 0)
brcmf_err("failed code %d\n", ret);
return ret;
}
static int brcmf_dev_pno_config(struct brcmf_if *ifp,
struct cfg80211_sched_scan_request *request)
{
struct brcmf_pno_param_le pfn_param;
struct brcmf_pno_macaddr_le pfn_mac;
s32 err;
u8 *mac_mask;
int i;
memset(&pfn_param, 0, sizeof(pfn_param));
pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
/* set extra pno params */
pfn_param.flags = cpu_to_le16(1 << BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
pfn_param.repeat = BRCMF_PNO_REPEAT;
pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
/* set up pno scan fr */
pfn_param.scan_freq = cpu_to_le32(BRCMF_PNO_TIME);
err = brcmf_fil_iovar_data_set(ifp, "pfn_set", &pfn_param,
sizeof(pfn_param));
if (err) {
brcmf_err("pfn_set failed, err=%d\n", err);
return err;
}
/* Find out if mac randomization should be turned on */
if (!(request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR))
return 0;
pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER;
pfn_mac.flags = BRCMF_PFN_MAC_OUI_ONLY | BRCMF_PFN_SET_MAC_UNASSOC;
memcpy(pfn_mac.mac, request->mac_addr, ETH_ALEN);
mac_mask = request->mac_addr_mask;
for (i = 0; i < ETH_ALEN; i++) {
pfn_mac.mac[i] &= mac_mask[i];
pfn_mac.mac[i] |= get_random_int() & ~(mac_mask[i]);
}
/* Clear multi bit */
pfn_mac.mac[0] &= 0xFE;
/* Set locally administered */
pfn_mac.mac[0] |= 0x02;
err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
sizeof(pfn_mac));
if (err)
brcmf_err("pfn_macaddr failed, err=%d\n", err);
free_req:
kfree(request);
return err;
}
static int
brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_sched_scan_request *request)
struct cfg80211_sched_scan_request *req)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
struct brcmf_pno_net_param_le pfn;
int i;
int ret = 0;
brcmf_dbg(SCAN, "Enter n_match_sets:%d n_ssids:%d\n",
request->n_match_sets, request->n_ssids);
if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status);
return -EAGAIN;
}
req->n_match_sets, req->n_ssids);
if (test_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status)) {
brcmf_err("Scanning suppressed: status (%lu)\n",
cfg->scan_status);
return -EAGAIN;
}
if (!request->n_ssids || !request->n_match_sets) {
brcmf_dbg(SCAN, "Invalid sched scan req!! n_ssids:%d\n",
request->n_ssids);
if (req->n_match_sets <= 0) {
brcmf_dbg(SCAN, "invalid number of matchsets specified: %d\n",
req->n_match_sets);
return -EINVAL;
}
if (request->n_ssids > 0) {
for (i = 0; i < request->n_ssids; i++) {
/* Active scan req for ssids */
brcmf_dbg(SCAN, ">>> Active scan req for ssid (%s)\n",
request->ssids[i].ssid);
/* match_set ssids is a supert set of n_ssid list,
* so we need not add these set separately.
*/
}
}
if (request->n_match_sets > 0) {
/* clean up everything */
ret = brcmf_dev_pno_clean(ndev);
if (ret < 0) {
brcmf_err("failed error=%d\n", ret);
return ret;
}
/* configure pno */
if (brcmf_dev_pno_config(ifp, request))
return -EINVAL;
/* configure each match set */
for (i = 0; i < request->n_match_sets; i++) {
struct cfg80211_ssid *ssid;
u32 ssid_len;
ssid = &request->match_sets[i].ssid;
ssid_len = ssid->ssid_len;
if (!ssid_len) {
brcmf_err("skip broadcast ssid\n");
continue;
}
pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
pfn.wsec = cpu_to_le32(0);
pfn.infra = cpu_to_le32(1);
pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
pfn.ssid.SSID_len = cpu_to_le32(ssid_len);
memcpy(pfn.ssid.SSID, ssid->ssid, ssid_len);
ret = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn,
sizeof(pfn));
brcmf_dbg(SCAN, ">>> PNO filter %s for ssid (%s)\n",
ret == 0 ? "set" : "failed", ssid->ssid);
}
/* Enable the PNO */
if (brcmf_fil_iovar_int_set(ifp, "pfn", 1) < 0) {
brcmf_err("PNO enable failed!! ret=%d\n", ret);
return -EINVAL;
}
} else {
return -EINVAL;
}
return 0;
return brcmf_pno_start_sched_scan(ifp, req);
}
static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
struct net_device *ndev)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
brcmf_dbg(SCAN, "enter\n");
brcmf_dev_pno_clean(ndev);
if (cfg->sched_escan)
brcmf_notify_escan_complete(cfg, netdev_priv(ndev), true, true);
brcmf_pno_clean(ifp);
if (cfg->internal_escan)
brcmf_notify_escan_complete(cfg, ifp, true, true);
return 0;
}
@ -6428,6 +6336,7 @@ static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
wiphy->max_sched_scan_plan_interval = BRCMF_PNO_SCHED_SCAN_MAX_PERIOD;
wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
}

View File

@ -271,7 +271,7 @@ struct brcmf_cfg80211_wowl {
* @pub: common driver information.
* @channel: current channel.
* @active_scan: current scan mode.
* @sched_escan: e-scan for scheduled scan support running.
* @internal_escan: indicates internally initiated e-scan is running.
* @ibss_starter: indicates this sta is ibss starter.
* @pwr_save: indicate whether dongle to support power save mode.
* @dongle_up: indicate whether dongle up or not.
@ -303,7 +303,7 @@ struct brcmf_cfg80211_info {
struct brcmf_pub *pub;
u32 channel;
bool active_scan;
bool sched_escan;
bool internal_escan;
bool ibss_starter;
bool pwr_save;
bool dongle_up;

View File

@ -131,6 +131,7 @@
#define BRCMF_TXBF_MU_BFR_CAP BIT(1)
#define BRCMF_MAXPMKID 16 /* max # PMKID cache entries */
#define BRCMF_NUMCHANNELS 64
#define BRCMF_PFN_MACADDR_CFG_VER 1
#define BRCMF_PFN_MAC_OUI_ONLY BIT(0)
@ -718,6 +719,21 @@ struct brcmf_pno_param_le {
__le32 slow_freq;
};
/**
* struct brcmf_pno_config_le - PNO channel configuration.
*
* @reporttype: determines what is reported.
* @channel_num: number of channels specified in @channel_list.
* @channel_list: channels to use in PNO scan.
* @flags: reserved.
*/
struct brcmf_pno_config_le {
__le32 reporttype;
__le32 channel_num;
__le16 channel_list[BRCMF_NUMCHANNELS];
__le32 flags;
};
/**
* struct brcmf_pno_net_param_le - scan parameters per preferred network.
*
@ -769,6 +785,13 @@ struct brcmf_pno_scanresults_le {
__le32 count;
};
struct brcmf_pno_scanresults_v2_le {
__le32 version;
__le32 status;
__le32 count;
__le32 scan_ch_bucket;
};
/**
* struct brcmf_pno_macaddr_le - to configure PNO macaddr randomization.
*

View File

@ -87,11 +87,6 @@ struct msgbuf_common_hdr {
__le32 request_id;
};
struct msgbuf_buf_addr {
__le32 low_addr;
__le32 high_addr;
};
struct msgbuf_ioctl_req_hdr {
struct msgbuf_common_hdr msg;
__le32 cmd;
@ -227,7 +222,10 @@ struct brcmf_msgbuf {
struct brcmf_commonring **commonrings;
struct brcmf_commonring **flowrings;
dma_addr_t *flowring_dma_handle;
u16 nrof_flowrings;
u16 max_flowrings;
u16 max_submissionrings;
u16 max_completionrings;
u16 rx_dataoffset;
u32 max_rxbufpost;
@ -610,7 +608,7 @@ brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
create->msg.request_id = 0;
create->tid = brcmf_flowring_tid(msgbuf->flow, flowid);
create->flow_ring_id = cpu_to_le16(flowid +
BRCMF_NROF_H2D_COMMON_MSGRINGS);
BRCMF_H2D_MSGRING_FLOWRING_IDSTART);
memcpy(create->sa, work->sa, ETH_ALEN);
memcpy(create->da, work->da, ETH_ALEN);
address = (u64)msgbuf->flowring_dma_handle[flowid];
@ -760,7 +758,7 @@ static void brcmf_msgbuf_txflow_worker(struct work_struct *worker)
u32 flowid;
msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work);
for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->nrof_flowrings) {
for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->max_flowrings) {
clear_bit(flowid, msgbuf->flow_map);
brcmf_msgbuf_txflow(msgbuf, flowid);
}
@ -866,7 +864,7 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
tx_status = (struct msgbuf_tx_status *)buf;
idx = le32_to_cpu(tx_status->msg.request_id);
flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id);
flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
msgbuf->tx_pktids, idx);
if (!skb)
@ -1174,7 +1172,7 @@ brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf,
flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf;
flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id);
flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
status = le16_to_cpu(flowring_create_resp->compl_hdr.status);
if (status) {
@ -1202,7 +1200,7 @@ brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf,
flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf;
flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id);
flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
status = le16_to_cpu(flowring_delete_resp->compl_hdr.status);
if (status) {
@ -1307,7 +1305,7 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
brcmf_msgbuf_process_rx(msgbuf, buf);
for_each_set_bit(flowid, msgbuf->txstatus_done_map,
msgbuf->nrof_flowrings) {
msgbuf->max_flowrings) {
clear_bit(flowid, msgbuf->txstatus_done_map);
commonring = msgbuf->flowrings[flowid];
qlen = brcmf_flowring_qlen(msgbuf->flow, flowid);
@ -1349,7 +1347,7 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
delete->msg.request_id = 0;
delete->flow_ring_id = cpu_to_le16(flowid +
BRCMF_NROF_H2D_COMMON_MSGRINGS);
BRCMF_H2D_MSGRING_FLOWRING_IDSTART);
delete->reason = 0;
brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n",
@ -1427,10 +1425,10 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
if_msgbuf = drvr->bus_if->msgbuf;
if (if_msgbuf->nrof_flowrings >= BRCMF_FLOWRING_HASHSIZE) {
if (if_msgbuf->max_flowrings >= BRCMF_FLOWRING_HASHSIZE) {
brcmf_err("driver not configured for this many flowrings %d\n",
if_msgbuf->nrof_flowrings);
if_msgbuf->nrof_flowrings = BRCMF_FLOWRING_HASHSIZE - 1;
if_msgbuf->max_flowrings);
if_msgbuf->max_flowrings = BRCMF_FLOWRING_HASHSIZE - 1;
}
msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
@ -1443,7 +1441,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
goto fail;
}
INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker);
count = BITS_TO_LONGS(if_msgbuf->nrof_flowrings);
count = BITS_TO_LONGS(if_msgbuf->max_flowrings);
count = count * sizeof(unsigned long);
msgbuf->flow_map = kzalloc(count, GFP_KERNEL);
if (!msgbuf->flow_map)
@ -1479,8 +1477,8 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
msgbuf->commonrings =
(struct brcmf_commonring **)if_msgbuf->commonrings;
msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings;
msgbuf->nrof_flowrings = if_msgbuf->nrof_flowrings;
msgbuf->flowring_dma_handle = kzalloc(msgbuf->nrof_flowrings *
msgbuf->max_flowrings = if_msgbuf->max_flowrings;
msgbuf->flowring_dma_handle = kzalloc(msgbuf->max_flowrings *
sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL);
if (!msgbuf->flowring_dma_handle)
goto fail;
@ -1501,7 +1499,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
goto fail;
msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev,
if_msgbuf->nrof_flowrings);
if_msgbuf->max_flowrings);
if (!msgbuf->flow)
goto fail;

View File

@ -31,6 +31,10 @@
#define BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE 32
#define BRCMF_H2D_TXFLOWRING_ITEMSIZE 48
struct msgbuf_buf_addr {
__le32 low_addr;
__le32 high_addr;
};
int brcmf_proto_msgbuf_rx_trigger(struct device *dev);
void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid);

View File

@ -135,7 +135,7 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_PCIE_MB_INT_D2H3_DB1)
#define BRCMF_PCIE_MIN_SHARED_VERSION 5
#define BRCMF_PCIE_MAX_SHARED_VERSION 5
#define BRCMF_PCIE_MAX_SHARED_VERSION 6
#define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
#define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
#define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
@ -166,17 +166,6 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
#define BRCMF_RING_MEM_SZ 16
#define BRCMF_RING_STATE_SZ 8
#define BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET 4
#define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8
#define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12
#define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16
#define BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET 20
#define BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET 28
#define BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET 36
#define BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET 44
#define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0
#define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52
#define BRCMF_DEF_MAX_RXBUFPOST 255
#define BRCMF_CONSOLE_BUFADDR_OFFSET 8
@ -231,7 +220,9 @@ struct brcmf_pcie_shared_info {
struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
struct brcmf_pcie_ringbuf *flowrings;
u16 max_rxbufpost;
u32 nrof_flowrings;
u16 max_flowrings;
u16 max_submissionrings;
u16 max_completionrings;
u32 rx_dataoffset;
u32 htod_mb_data_addr;
u32 dtoh_mb_data_addr;
@ -241,6 +232,7 @@ struct brcmf_pcie_shared_info {
dma_addr_t scratch_dmahandle;
void *ringupd;
dma_addr_t ringupd_dmahandle;
u8 version;
};
struct brcmf_pcie_core_info {
@ -284,6 +276,36 @@ struct brcmf_pcie_ringbuf {
u8 id;
};
/**
* struct brcmf_pcie_dhi_ringinfo - dongle/host interface shared ring info
*
* @ringmem: dongle memory pointer to ring memory location
* @h2d_w_idx_ptr: h2d ring write indices dongle memory pointers
* @h2d_r_idx_ptr: h2d ring read indices dongle memory pointers
* @d2h_w_idx_ptr: d2h ring write indices dongle memory pointers
* @d2h_r_idx_ptr: d2h ring read indices dongle memory pointers
* @h2d_w_idx_hostaddr: h2d ring write indices host memory pointers
* @h2d_r_idx_hostaddr: h2d ring read indices host memory pointers
* @d2h_w_idx_hostaddr: d2h ring write indices host memory pointers
* @d2h_r_idx_hostaddr: d2h ring reaD indices host memory pointers
* @max_flowrings: maximum number of tx flow rings supported.
* @max_submissionrings: maximum number of submission rings(h2d) supported.
* @max_completionrings: maximum number of completion rings(d2h) supported.
*/
struct brcmf_pcie_dhi_ringinfo {
__le32 ringmem;
__le32 h2d_w_idx_ptr;
__le32 h2d_r_idx_ptr;
__le32 d2h_w_idx_ptr;
__le32 d2h_r_idx_ptr;
struct msgbuf_buf_addr h2d_w_idx_hostaddr;
struct msgbuf_buf_addr h2d_r_idx_hostaddr;
struct msgbuf_buf_addr d2h_w_idx_hostaddr;
struct msgbuf_buf_addr d2h_r_idx_hostaddr;
__le16 max_flowrings;
__le16 max_submissionrings;
__le16 max_completionrings;
};
static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
@ -1054,26 +1076,35 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
{
struct brcmf_pcie_ringbuf *ring;
struct brcmf_pcie_ringbuf *rings;
u32 ring_addr;
u32 d2h_w_idx_ptr;
u32 d2h_r_idx_ptr;
u32 h2d_w_idx_ptr;
u32 h2d_r_idx_ptr;
u32 addr;
u32 ring_mem_ptr;
u32 i;
u64 address;
u32 bufsz;
u16 max_sub_queues;
u8 idx_offset;
struct brcmf_pcie_dhi_ringinfo ringinfo;
u16 max_flowrings;
u16 max_submissionrings;
u16 max_completionrings;
ring_addr = devinfo->shared.ring_info_addr;
brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr);
addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
memcpy_fromio(&ringinfo, devinfo->tcm + devinfo->shared.ring_info_addr,
sizeof(ringinfo));
if (devinfo->shared.version >= 6) {
max_submissionrings = le16_to_cpu(ringinfo.max_submissionrings);
max_flowrings = le16_to_cpu(ringinfo.max_flowrings);
max_completionrings = le16_to_cpu(ringinfo.max_completionrings);
} else {
max_submissionrings = le16_to_cpu(ringinfo.max_flowrings);
max_flowrings = max_submissionrings -
BRCMF_NROF_H2D_COMMON_MSGRINGS;
max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
}
if (devinfo->dma_idx_sz != 0) {
bufsz = (BRCMF_NROF_D2H_COMMON_MSGRINGS + max_sub_queues) *
bufsz = (max_submissionrings + max_completionrings) *
devinfo->dma_idx_sz * 2;
devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
&devinfo->idxbuf_dmahandle,
@ -1083,14 +1114,10 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
}
if (devinfo->dma_idx_sz == 0) {
addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
d2h_w_idx_ptr = le32_to_cpu(ringinfo.d2h_w_idx_ptr);
d2h_r_idx_ptr = le32_to_cpu(ringinfo.d2h_r_idx_ptr);
h2d_w_idx_ptr = le32_to_cpu(ringinfo.h2d_w_idx_ptr);
h2d_r_idx_ptr = le32_to_cpu(ringinfo.h2d_r_idx_ptr);
idx_offset = sizeof(u32);
devinfo->write_ptr = brcmf_pcie_write_tcm16;
devinfo->read_ptr = brcmf_pcie_read_tcm16;
@ -1103,34 +1130,42 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
devinfo->read_ptr = brcmf_pcie_read_idx;
h2d_w_idx_ptr = 0;
addr = ring_addr + BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET;
address = (u64)devinfo->idxbuf_dmahandle;
brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
ringinfo.h2d_w_idx_hostaddr.low_addr =
cpu_to_le32(address & 0xffffffff);
ringinfo.h2d_w_idx_hostaddr.high_addr =
cpu_to_le32(address >> 32);
h2d_r_idx_ptr = h2d_w_idx_ptr + max_sub_queues * idx_offset;
addr = ring_addr + BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET;
address += max_sub_queues * idx_offset;
brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
h2d_r_idx_ptr = h2d_w_idx_ptr +
max_submissionrings * idx_offset;
address += max_submissionrings * idx_offset;
ringinfo.h2d_r_idx_hostaddr.low_addr =
cpu_to_le32(address & 0xffffffff);
ringinfo.h2d_r_idx_hostaddr.high_addr =
cpu_to_le32(address >> 32);
d2h_w_idx_ptr = h2d_r_idx_ptr + max_sub_queues * idx_offset;
addr = ring_addr + BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET;
address += max_sub_queues * idx_offset;
brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
d2h_w_idx_ptr = h2d_r_idx_ptr +
max_submissionrings * idx_offset;
address += max_submissionrings * idx_offset;
ringinfo.d2h_w_idx_hostaddr.low_addr =
cpu_to_le32(address & 0xffffffff);
ringinfo.d2h_w_idx_hostaddr.high_addr =
cpu_to_le32(address >> 32);
d2h_r_idx_ptr = d2h_w_idx_ptr +
BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
addr = ring_addr + BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET;
address += BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
max_completionrings * idx_offset;
address += max_completionrings * idx_offset;
ringinfo.d2h_r_idx_hostaddr.low_addr =
cpu_to_le32(address & 0xffffffff);
ringinfo.d2h_r_idx_hostaddr.high_addr =
cpu_to_le32(address >> 32);
memcpy_toio(devinfo->tcm + devinfo->shared.ring_info_addr,
&ringinfo, sizeof(ringinfo));
brcmf_dbg(PCIE, "Using host memory indices\n");
}
addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET;
ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
ring_mem_ptr = le32_to_cpu(ringinfo.ringmem);
for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
@ -1161,20 +1196,19 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
ring_mem_ptr += BRCMF_RING_MEM_SZ;
}
devinfo->shared.nrof_flowrings =
max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS;
rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring),
GFP_KERNEL);
devinfo->shared.max_flowrings = max_flowrings;
devinfo->shared.max_submissionrings = max_submissionrings;
devinfo->shared.max_completionrings = max_completionrings;
rings = kcalloc(max_flowrings, sizeof(*ring), GFP_KERNEL);
if (!rings)
goto fail;
brcmf_dbg(PCIE, "Nr of flowrings is %d\n",
devinfo->shared.nrof_flowrings);
brcmf_dbg(PCIE, "Nr of flowrings is %d\n", max_flowrings);
for (i = 0; i < devinfo->shared.nrof_flowrings; i++) {
for (i = 0; i < max_flowrings; i++) {
ring = &rings[i];
ring->devinfo = devinfo;
ring->id = i + BRCMF_NROF_COMMON_MSGRINGS;
ring->id = i + BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
brcmf_commonring_register_cb(&ring->commonring,
brcmf_pcie_ring_mb_ring_bell,
brcmf_pcie_ring_mb_update_rptr,
@ -1357,17 +1391,16 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
{
struct brcmf_pcie_shared_info *shared;
u32 addr;
u32 version;
shared = &devinfo->shared;
shared->tcm_base_address = sharedram_addr;
shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
version = shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK;
brcmf_dbg(PCIE, "PCIe protocol version %d\n", version);
if ((version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
(version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
brcmf_err("Unsupported PCIE version %d\n", version);
shared->version = (u8)(shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK);
brcmf_dbg(PCIE, "PCIe protocol version %d\n", shared->version);
if ((shared->version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
(shared->version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
brcmf_err("Unsupported PCIE version %d\n", shared->version);
return -EINVAL;
}
@ -1661,18 +1694,18 @@ static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
bus->msgbuf->commonrings[i] =
&devinfo->shared.commonrings[i]->commonring;
flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*flowrings),
flowrings = kcalloc(devinfo->shared.max_flowrings, sizeof(*flowrings),
GFP_KERNEL);
if (!flowrings)
goto fail;
for (i = 0; i < devinfo->shared.nrof_flowrings; i++)
for (i = 0; i < devinfo->shared.max_flowrings; i++)
flowrings[i] = &devinfo->shared.flowrings[i].commonring;
bus->msgbuf->flowrings = flowrings;
bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
bus->msgbuf->nrof_flowrings = devinfo->shared.nrof_flowrings;
bus->msgbuf->max_flowrings = devinfo->shared.max_flowrings;
init_waitqueue_head(&devinfo->mbdata_resp_wait);

View File

@ -0,0 +1,242 @@
/*
* Copyright (c) 2016 Broadcom
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/netdevice.h>
#include <net/cfg80211.h>
#include "core.h"
#include "debug.h"
#include "fwil.h"
#include "fwil_types.h"
#include "cfg80211.h"
#include "pno.h"
#define BRCMF_PNO_VERSION 2
#define BRCMF_PNO_REPEAT 4
#define BRCMF_PNO_FREQ_EXPO_MAX 3
#define BRCMF_PNO_IMMEDIATE_SCAN_BIT 3
#define BRCMF_PNO_ENABLE_BD_SCAN_BIT 5
#define BRCMF_PNO_ENABLE_ADAPTSCAN_BIT 6
#define BRCMF_PNO_REPORT_SEPARATELY_BIT 11
#define BRCMF_PNO_SCAN_INCOMPLETE 0
#define BRCMF_PNO_WPA_AUTH_ANY 0xFFFFFFFF
#define BRCMF_PNO_HIDDEN_BIT 2
#define BRCMF_PNO_SCHED_SCAN_PERIOD 30
static int brcmf_pno_channel_config(struct brcmf_if *ifp,
struct brcmf_pno_config_le *cfg)
{
cfg->reporttype = 0;
cfg->flags = 0;
return brcmf_fil_iovar_data_set(ifp, "pfn_cfg", cfg, sizeof(*cfg));
}
static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq,
u32 mscan, u32 bestn)
{
struct brcmf_pno_param_le pfn_param;
u16 flags;
u32 pfnmem;
s32 err;
memset(&pfn_param, 0, sizeof(pfn_param));
pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
/* set extra pno params */
flags = BIT(BRCMF_PNO_IMMEDIATE_SCAN_BIT) |
BIT(BRCMF_PNO_REPORT_SEPARATELY_BIT) |
BIT(BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
pfn_param.repeat = BRCMF_PNO_REPEAT;
pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
/* set up pno scan fr */
if (scan_freq < BRCMF_PNO_SCHED_SCAN_MIN_PERIOD) {
brcmf_dbg(SCAN, "scan period too small, using minimum\n");
scan_freq = BRCMF_PNO_SCHED_SCAN_MIN_PERIOD;
}
pfn_param.scan_freq = cpu_to_le32(scan_freq);
if (mscan) {
pfnmem = bestn;
/* set bestn in firmware */
err = brcmf_fil_iovar_int_set(ifp, "pfnmem", pfnmem);
if (err < 0) {
brcmf_err("failed to set pfnmem\n");
goto exit;
}
/* get max mscan which the firmware supports */
err = brcmf_fil_iovar_int_get(ifp, "pfnmem", &pfnmem);
if (err < 0) {
brcmf_err("failed to get pfnmem\n");
goto exit;
}
mscan = min_t(u32, mscan, pfnmem);
pfn_param.mscan = mscan;
pfn_param.bestn = bestn;
flags |= BIT(BRCMF_PNO_ENABLE_BD_SCAN_BIT);
brcmf_dbg(INFO, "mscan=%d, bestn=%d\n", mscan, bestn);
}
pfn_param.flags = cpu_to_le16(flags);
err = brcmf_fil_iovar_data_set(ifp, "pfn_set", &pfn_param,
sizeof(pfn_param));
if (err)
brcmf_err("pfn_set failed, err=%d\n", err);
exit:
return err;
}
static int brcmf_pno_set_random(struct brcmf_if *ifp, u8 *mac_addr,
u8 *mac_mask)
{
struct brcmf_pno_macaddr_le pfn_mac;
int err, i;
pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER;
pfn_mac.flags = BRCMF_PFN_MAC_OUI_ONLY | BRCMF_PFN_SET_MAC_UNASSOC;
memcpy(pfn_mac.mac, mac_addr, ETH_ALEN);
for (i = 0; i < ETH_ALEN; i++) {
pfn_mac.mac[i] &= mac_mask[i];
pfn_mac.mac[i] |= get_random_int() & ~(mac_mask[i]);
}
/* Clear multi bit */
pfn_mac.mac[0] &= 0xFE;
/* Set locally administered */
pfn_mac.mac[0] |= 0x02;
err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
sizeof(pfn_mac));
if (err)
brcmf_err("pfn_macaddr failed, err=%d\n", err);
return err;
}
static int brcmf_pno_add_ssid(struct brcmf_if *ifp, struct cfg80211_ssid *ssid,
bool active)
{
struct brcmf_pno_net_param_le pfn;
pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
pfn.wsec = cpu_to_le32(0);
pfn.infra = cpu_to_le32(1);
if (active)
pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
pfn.ssid.SSID_len = cpu_to_le32(ssid->ssid_len);
memcpy(pfn.ssid.SSID, ssid->ssid, ssid->ssid_len);
return brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn));
}
static bool brcmf_is_ssid_active(struct cfg80211_ssid *ssid,
struct cfg80211_sched_scan_request *req)
{
int i;
if (!ssid || !req->ssids || !req->n_ssids)
return false;
for (i = 0; i < req->n_ssids; i++) {
if (ssid->ssid_len == req->ssids[i].ssid_len) {
if (!strncmp(ssid->ssid, req->ssids[i].ssid,
ssid->ssid_len))
return true;
}
}
return false;
}
int brcmf_pno_clean(struct brcmf_if *ifp)
{
int ret;
/* Disable pfn */
ret = brcmf_fil_iovar_int_set(ifp, "pfn", 0);
if (ret == 0) {
/* clear pfn */
ret = brcmf_fil_iovar_data_set(ifp, "pfnclear", NULL, 0);
}
if (ret < 0)
brcmf_err("failed code %d\n", ret);
return ret;
}
int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
struct cfg80211_sched_scan_request *req)
{
struct brcmu_d11inf *d11inf;
struct brcmf_pno_config_le pno_cfg;
struct cfg80211_ssid *ssid;
u16 chan;
int i, ret;
/* clean up everything */
ret = brcmf_pno_clean(ifp);
if (ret < 0) {
brcmf_err("failed error=%d\n", ret);
return ret;
}
/* configure pno */
ret = brcmf_pno_config(ifp, req->scan_plans[0].interval, 0, 0);
if (ret < 0)
return ret;
/* configure random mac */
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
ret = brcmf_pno_set_random(ifp, req->mac_addr,
req->mac_addr_mask);
if (ret < 0)
return ret;
}
/* configure channels to use */
d11inf = &ifp->drvr->config->d11inf;
for (i = 0; i < req->n_channels; i++) {
chan = req->channels[i]->hw_value;
pno_cfg.channel_list[i] = cpu_to_le16(chan);
}
if (req->n_channels) {
pno_cfg.channel_num = cpu_to_le32(req->n_channels);
brcmf_pno_channel_config(ifp, &pno_cfg);
}
/* configure each match set */
for (i = 0; i < req->n_match_sets; i++) {
ssid = &req->match_sets[i].ssid;
if (!ssid->ssid_len) {
brcmf_err("skip broadcast ssid\n");
continue;
}
ret = brcmf_pno_add_ssid(ifp, ssid,
brcmf_is_ssid_active(ssid, req));
if (ret < 0)
brcmf_dbg(SCAN, ">>> PNO filter %s for ssid (%s)\n",
ret == 0 ? "set" : "failed", ssid->ssid);
}
/* Enable the PNO */
ret = brcmf_fil_iovar_int_set(ifp, "pfn", 1);
if (ret < 0)
brcmf_err("PNO enable failed!! ret=%d\n", ret);
return ret;
}

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2016 Broadcom
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _BRCMF_PNO_H
#define _BRCMF_PNO_H
#define BRCMF_PNO_SCAN_COMPLETE 1
#define BRCMF_PNO_MAX_PFN_COUNT 16
#define BRCMF_PNO_SCHED_SCAN_MIN_PERIOD 10
#define BRCMF_PNO_SCHED_SCAN_MAX_PERIOD 508
/**
* brcmf_pno_clean - disable and clear pno in firmware.
*
* @ifp: interface object used.
*/
int brcmf_pno_clean(struct brcmf_if *ifp);
/**
* brcmf_pno_start_sched_scan - initiate scheduled scan on device.
*
* @ifp: interface object used.
* @req: configuration parameters for scheduled scan.
*/
int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
struct cfg80211_sched_scan_request *req);
#endif /* _BRCMF_PNO_H */

View File

@ -621,6 +621,7 @@ static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, 4330),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, 4334),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43340_CHIP_ID, 0xFFFFFFFF, 43340),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43341_CHIP_ID, 0xFFFFFFFF, 43340),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, 4335),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, 43362),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, 4339),

View File

@ -179,7 +179,7 @@ s16 qm_norm32(s32 op)
return u16extraSignBits;
}
/* This table is log2(1+(i/32)) where i=[0:1:31], in q.15 format */
/* This table is log2(1+(i/32)) where i=[0:1:32], in q.15 format */
static const s16 log_table[] = {
0,
1455,
@ -212,7 +212,8 @@ static const s16 log_table[] = {
29717,
30498,
31267,
32024
32024,
32768
};
#define LOG_TABLE_SIZE 32 /* log_table size */

View File

@ -36,6 +36,7 @@
#define BRCM_CC_4330_CHIP_ID 0x4330
#define BRCM_CC_4334_CHIP_ID 0x4334
#define BRCM_CC_43340_CHIP_ID 43340
#define BRCM_CC_43341_CHIP_ID 43341
#define BRCM_CC_43362_CHIP_ID 43362
#define BRCM_CC_4335_CHIP_ID 0x4335
#define BRCM_CC_4339_CHIP_ID 0x4339

View File

@ -2143,6 +2143,16 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len,
ret = mwifiex_set_encode(priv, NULL, NULL, 0, 0, NULL, 1);
if (mode == NL80211_IFTYPE_ADHOC) {
u16 enable = true;
/* set ibss coalescing_status */
ret = mwifiex_send_cmd(
priv,
HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
HostCmd_ACT_GEN_SET, 0, &enable, true);
if (ret)
return ret;
/* "privacy" is set only for ad-hoc mode */
if (privacy) {
/*
@ -3980,13 +3990,11 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
struct mwifiex_ds_misc_cmd *hostcmd;
struct nlattr *tb[MWIFIEX_TM_ATTR_MAX + 1];
struct mwifiex_adapter *adapter;
struct sk_buff *skb;
int err;
if (!priv)
return -EINVAL;
adapter = priv->adapter;
err = nla_parse(tb, MWIFIEX_TM_ATTR_MAX, data, len,
mwifiex_tm_policy);

View File

@ -219,6 +219,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
#define ISSUPP_DRCS_ENABLED(FwCapInfo) (FwCapInfo & BIT(15))
#define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16))
#define ISSUPP_ADHOC_ENABLED(FwCapInfo) (FwCapInfo & BIT(25))
#define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \
(1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \

View File

@ -518,7 +518,6 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
{
int ret;
char fmt[64];
struct mwifiex_private *priv;
struct mwifiex_adapter *adapter = context;
struct mwifiex_fw_image fw;
bool init_failed = false;
@ -576,8 +575,6 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
goto err_init_fw;
}
priv = adapter->priv[MWIFIEX_BSS_ROLE_STA];
if (!adapter->wiphy) {
if (mwifiex_register_cfg80211(adapter)) {
mwifiex_dbg(adapter, ERROR,

View File

@ -1424,8 +1424,13 @@ static inline void mwifiex_disable_wake(struct mwifiex_adapter *adapter)
{
if (adapter->irq_wakeup >= 0) {
disable_irq_wake(adapter->irq_wakeup);
if (!adapter->wake_by_wifi)
disable_irq(adapter->irq_wakeup);
disable_irq(adapter->irq_wakeup);
if (adapter->wake_by_wifi)
/* Undo our disable, since interrupt handler already
* did this.
*/
enable_irq(adapter->irq_wakeup);
}
}

View File

@ -135,6 +135,7 @@ static int mwifiex_pcie_suspend(struct device *dev)
mwifiex_dbg(adapter, ERROR,
"cmd: failed to suspend\n");
adapter->hs_enabling = false;
mwifiex_disable_wake(adapter);
return -EFAULT;
}
@ -2050,7 +2051,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
}
/* Wait for the command done interrupt */
do {
for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS,
&ireg_intr)) {
mwifiex_dbg(adapter, ERROR,
@ -2062,8 +2063,18 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
ret = -1;
goto done;
}
} while ((ireg_intr & CPU_INTR_DOOR_BELL) ==
CPU_INTR_DOOR_BELL);
if (!(ireg_intr & CPU_INTR_DOOR_BELL))
break;
usleep_range(10, 20);
}
if (ireg_intr & CPU_INTR_DOOR_BELL) {
mwifiex_dbg(adapter, ERROR, "%s: Card failed to ACK download\n",
__func__);
mwifiex_unmap_pci_memory(adapter, skb,
PCI_DMA_TODEVICE);
ret = -1;
goto done;
}
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);

View File

@ -827,7 +827,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
u32 num_probes;
u32 ssid_len;
u32 chan_idx;
u32 chan_num;
u32 scan_type;
u16 scan_dur;
u8 channel;
@ -1105,13 +1104,12 @@ mwifiex_config_scan(struct mwifiex_private *priv,
mwifiex_dbg(adapter, INFO,
"info: Scan: Scanning current channel only\n");
}
chan_num = chan_idx;
} else {
mwifiex_dbg(adapter, INFO,
"info: Scan: Creating full region channel list\n");
chan_num = mwifiex_scan_create_channel_list(priv, user_scan_in,
scan_chan_list,
*filtered_scan);
mwifiex_scan_create_channel_list(priv, user_scan_in,
scan_chan_list,
*filtered_scan);
}
}

View File

@ -186,9 +186,7 @@ static int mwifiex_sdio_resume(struct device *dev)
struct sdio_func *func = dev_to_sdio_func(dev);
struct sdio_mmc_card *card;
struct mwifiex_adapter *adapter;
mmc_pm_flag_t pm_flag = 0;
pm_flag = sdio_get_host_pm_caps(func);
card = sdio_get_drvdata(func);
if (!card || !card->adapter) {
dev_err(dev, "resume: invalid card or adapter\n");
@ -298,6 +296,7 @@ static int mwifiex_sdio_suspend(struct device *dev)
mwifiex_dbg(adapter, ERROR,
"cmd: failed to suspend\n");
adapter->hs_enabling = false;
mwifiex_disable_wake(adapter);
return -EFAULT;
}
@ -1136,7 +1135,6 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
{
u32 total_pkt_len, pkt_len;
struct sk_buff *skb_deaggr;
u32 pkt_type;
u16 blk_size;
u8 blk_num;
u8 *data;
@ -1157,8 +1155,6 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
break;
}
pkt_len = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET));
pkt_type = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET +
2));
if ((pkt_len + SDIO_HEADER_OFFSET) > blk_size) {
mwifiex_dbg(adapter, ERROR,
"%s: error in pkt_len,\t"

View File

@ -1746,7 +1746,6 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
{
struct host_cmd_ds_tdls_oper *tdls_oper = &cmd->params.tdls_oper;
struct mwifiex_ds_tdls_oper *oper = data_buf;
struct mwifiex_sta_node *sta_ptr;
struct host_cmd_tlv_rates *tlv_rates;
struct mwifiex_ie_types_htcap *ht_capab;
struct mwifiex_ie_types_qos_info *wmm_qos_info;
@ -1764,7 +1763,6 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
tdls_oper->reason = 0;
memcpy(tdls_oper->peer_mac, oper->peer_mac, ETH_ALEN);
sta_ptr = mwifiex_get_sta_entry(priv, oper->peer_mac);
pos = (u8 *)tdls_oper + sizeof(struct host_cmd_ds_tdls_oper);
@ -1902,6 +1900,24 @@ static int mwifiex_cmd_get_wakeup_reason(struct mwifiex_private *priv,
return 0;
}
/* This function check if the command is supported by firmware */
static int mwifiex_is_cmd_supported(struct mwifiex_private *priv, u16 cmd_no)
{
if (!ISSUPP_ADHOC_ENABLED(priv->adapter->fw_cap_info)) {
switch (cmd_no) {
case HostCmd_CMD_802_11_IBSS_COALESCING_STATUS:
case HostCmd_CMD_802_11_AD_HOC_START:
case HostCmd_CMD_802_11_AD_HOC_JOIN:
case HostCmd_CMD_802_11_AD_HOC_STOP:
return -EOPNOTSUPP;
default:
break;
}
}
return 0;
}
/*
* This function prepares the commands before sending them to the firmware.
*
@ -1915,6 +1931,13 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
struct host_cmd_ds_command *cmd_ptr = cmd_buf;
int ret = 0;
if (mwifiex_is_cmd_supported(priv, cmd_no)) {
mwifiex_dbg(priv->adapter, ERROR,
"0x%x command not supported by firmware\n",
cmd_no);
return -EOPNOTSUPP;
}
/* Prepare command */
switch (cmd_no) {
case HostCmd_CMD_GET_HW_SPEC:
@ -2208,7 +2231,6 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
{
struct mwifiex_adapter *adapter = priv->adapter;
int ret;
u16 enable = true;
struct mwifiex_ds_11n_amsdu_aggr_ctrl amsdu_aggr_ctrl;
struct mwifiex_ds_auto_ds auto_ds;
enum state_11d_t state_11d;
@ -2321,16 +2343,6 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
if (ret)
return -1;
if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) {
/* set ibss coalescing_status */
ret = mwifiex_send_cmd(
priv,
HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
HostCmd_ACT_GEN_SET, 0, &enable, true);
if (ret)
return -1;
}
memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl));
amsdu_aggr_ctrl.enable = true;
/* Send request to firmware */

View File

@ -379,7 +379,7 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
struct usb_endpoint_descriptor *epd;
int ret, i;
struct usb_card_rec *card;
u16 id_vendor, id_product, bcd_device, bcd_usb;
u16 id_vendor, id_product, bcd_device;
card = devm_kzalloc(&intf->dev, sizeof(*card), GFP_KERNEL);
if (!card)
@ -390,7 +390,6 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
id_vendor = le16_to_cpu(udev->descriptor.idVendor);
id_product = le16_to_cpu(udev->descriptor.idProduct);
bcd_device = le16_to_cpu(udev->descriptor.bcdDevice);
bcd_usb = le16_to_cpu(udev->descriptor.bcdUSB);
pr_debug("info: VID/PID = %X/%X, Boot2 version = %X\n",
id_vendor, id_product, bcd_device);

View File

@ -293,13 +293,13 @@ static void mt7601u_mac_stop_hw(struct mt7601u_dev *dev)
ok = 0;
i = 200;
while (i--) {
if ((mt76_rr(dev, 0x0430) & 0x00ff0000) ||
(mt76_rr(dev, 0x0a30) & 0xffffffff) ||
(mt76_rr(dev, 0x0a34) & 0xffffffff))
ok++;
if (ok > 6)
break;
if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) &&
!mt76_rr(dev, 0x0a30) &&
!mt76_rr(dev, 0x0a34)) {
if (ok++ > 5)
break;
continue;
}
msleep(1);
}

View File

@ -192,6 +192,9 @@
#define MT_BCN_OFFSET_BASE 0x041c
#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2))
#define MT_RXQ_STA 0x0430
#define MT_TXQ_STA 0x0434
#define MT_RF_CSR_CFG 0x0500
#define MT_RF_CSR_CFG_DATA GENMASK(7, 0)
#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8)

View File

@ -1337,10 +1337,11 @@ struct rtl8xxxu_fileops {
u32 ramask, int sgi);
void (*report_connect) (struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
void (*fill_txdesc) (struct ieee80211_hdr *hdr,
struct rtl8xxxu_txdesc32 *tx_desc, u32 rate,
u16 rate_flag, bool sgi, bool short_preamble,
bool ampdu_enable);
void (*fill_txdesc) (struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *tx_info,
struct rtl8xxxu_txdesc32 *tx_desc, bool sgi,
bool short_preamble, bool ampdu_enable,
u32 rts_rate);
int writeN_block_size;
int rx_agg_buf_size;
char tx_desc_size;
@ -1434,14 +1435,16 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb);
int rtl8xxxu_gen2_channel_to_group(int channel);
bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv,
int result[][8], int c1, int c2);
void rtl8xxxu_fill_txdesc_v1(struct ieee80211_hdr *hdr,
struct rtl8xxxu_txdesc32 *tx_desc, u32 rate,
u16 rate_flag, bool sgi, bool short_preamble,
bool ampdu_enable);
void rtl8xxxu_fill_txdesc_v2(struct ieee80211_hdr *hdr,
struct rtl8xxxu_txdesc32 *tx_desc32, u32 rate,
u16 rate_flag, bool sgi, bool short_preamble,
bool ampdu_enable);
void rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *tx_info,
struct rtl8xxxu_txdesc32 *tx_desc, bool sgi,
bool short_preamble, bool ampdu_enable,
u32 rts_rate);
void rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *tx_info,
struct rtl8xxxu_txdesc32 *tx_desc32, bool sgi,
bool short_preamble, bool ampdu_enable,
u32 rts_rate);
extern struct rtl8xxxu_fileops rtl8192cu_fops;
extern struct rtl8xxxu_fileops rtl8192eu_fops;

View File

@ -1556,7 +1556,7 @@ exit:
return ret;
}
void rtl8192eu_power_off(struct rtl8xxxu_priv *priv)
static void rtl8192eu_power_off(struct rtl8xxxu_priv *priv)
{
u8 val8;
u16 val16;

View File

@ -4372,6 +4372,13 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
u8 macid, bool connect)
{
#ifdef RTL8XXXU_GEN2_REPORT_CONNECT
/*
* Barry Day reports this causes issues with 8192eu and 8723bu
* devices reconnecting. The reason for this is unclear, but
* until it is better understood, leave the code in place but
* disabled, so it is not lost.
*/
struct h2c_cmd h2c;
memset(&h2c, 0, sizeof(struct h2c_cmd));
@ -4383,6 +4390,7 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
h2c.media_status_rpt.parm &= ~BIT(0);
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
#endif
}
void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)
@ -4759,13 +4767,28 @@ static void rtl8xxxu_dump_action(struct device *dev,
* This format is used on 8188cu/8192cu/8723au
*/
void
rtl8xxxu_fill_txdesc_v1(struct ieee80211_hdr *hdr,
struct rtl8xxxu_txdesc32 *tx_desc, u32 rate,
u16 rate_flag, bool sgi, bool short_preamble,
bool ampdu_enable)
rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *tx_info,
struct rtl8xxxu_txdesc32 *tx_desc, bool sgi,
bool short_preamble, bool ampdu_enable, u32 rts_rate)
{
struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
u32 rate;
u16 rate_flags = tx_info->control.rates[0].flags;
u16 seq_number;
if (rate_flags & IEEE80211_TX_RC_MCS &&
!ieee80211_is_mgmt(hdr->frame_control))
rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
else
rate = tx_rate->hw_value;
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX)
dev_info(dev, "%s: TX rate: %d, pkt size %d\n",
__func__, rate, cpu_to_le16(tx_desc->pkt_size));
seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
tx_desc->txdw5 = cpu_to_le32(rate);
@ -4796,15 +4819,16 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hdr *hdr,
if (sgi)
tx_desc->txdw5 |= cpu_to_le32(TXDESC32_SHORT_GI);
if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
/*
* Use RTS rate 24M - does the mac80211 tell
* us which to use?
*/
tx_desc->txdw4 |= cpu_to_le32(DESC_RATE_24M <<
TXDESC32_RTS_RATE_SHIFT);
/*
* rts_rate is zero if RTS/CTS or CTS to SELF are not enabled
*/
tx_desc->txdw4 |= cpu_to_le32(rts_rate << TXDESC32_RTS_RATE_SHIFT);
if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
tx_desc->txdw4 |= cpu_to_le32(TXDESC32_RTS_CTS_ENABLE);
tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
} else if (rate_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
tx_desc->txdw4 |= cpu_to_le32(TXDESC32_CTS_SELF_ENABLE);
tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
}
}
@ -4813,16 +4837,31 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hdr *hdr,
* This format is used on 8192eu/8723bu
*/
void
rtl8xxxu_fill_txdesc_v2(struct ieee80211_hdr *hdr,
struct rtl8xxxu_txdesc32 *tx_desc32, u32 rate,
u16 rate_flag, bool sgi, bool short_preamble,
bool ampdu_enable)
rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *tx_info,
struct rtl8xxxu_txdesc32 *tx_desc32, bool sgi,
bool short_preamble, bool ampdu_enable, u32 rts_rate)
{
struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
struct rtl8xxxu_txdesc40 *tx_desc40;
u32 rate;
u16 rate_flags = tx_info->control.rates[0].flags;
u16 seq_number;
tx_desc40 = (struct rtl8xxxu_txdesc40 *)tx_desc32;
if (rate_flags & IEEE80211_TX_RC_MCS &&
!ieee80211_is_mgmt(hdr->frame_control))
rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
else
rate = tx_rate->hw_value;
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX)
dev_info(dev, "%s: TX rate: %d, pkt size %d\n",
__func__, rate, cpu_to_le16(tx_desc40->pkt_size));
seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
tx_desc40->txdw4 = cpu_to_le32(rate);
@ -4849,15 +4888,19 @@ rtl8xxxu_fill_txdesc_v2(struct ieee80211_hdr *hdr,
if (short_preamble)
tx_desc40->txdw5 |= cpu_to_le32(TXDESC40_SHORT_PREAMBLE);
if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
/*
* Use RTS rate 24M - does the mac80211 tell
* us which to use?
*/
tx_desc40->txdw4 |= cpu_to_le32(DESC_RATE_24M <<
TXDESC40_RTS_RATE_SHIFT);
tx_desc40->txdw4 |= cpu_to_le32(rts_rate << TXDESC40_RTS_RATE_SHIFT);
/*
* rts_rate is zero if RTS/CTS or CTS to SELF are not enabled
*/
if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_RTS_CTS_ENABLE);
tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_HW_RTS_ENABLE);
} else if (rate_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
/*
* For some reason the vendor driver doesn't set
* TXDESC40_HW_RTS_ENABLE for CTS to SELF
*/
tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_CTS_SELF_ENABLE);
}
}
@ -4867,14 +4910,13 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
struct rtl8xxxu_priv *priv = hw->priv;
struct rtl8xxxu_txdesc32 *tx_desc;
struct rtl8xxxu_tx_urb *tx_urb;
struct ieee80211_sta *sta = NULL;
struct ieee80211_vif *vif = tx_info->control.vif;
struct device *dev = &priv->udev->dev;
u32 queue, rate;
u32 queue, rts_rate;
u16 pktlen = skb->len;
u16 seq_number;
u16 rate_flag = tx_info->control.rates[0].flags;
@ -4901,10 +4943,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
goto error;
}
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX)
dev_info(dev, "%s: TX rate: %d (%d), pkt size %d\n",
__func__, tx_rate->bitrate, tx_rate->hw_value, pktlen);
if (ieee80211_is_action(hdr->frame_control))
rtl8xxxu_dump_action(dev, hdr);
@ -4958,12 +4996,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
}
}
if (rate_flag & IEEE80211_TX_RC_MCS &&
!ieee80211_is_mgmt(hdr->frame_control))
rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
else
rate = tx_rate->hw_value;
if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
(ieee80211_is_data_qos(hdr->frame_control) &&
sta && sta->ht_cap.cap &
@ -4974,10 +5006,17 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
(sta && vif && vif->bss_conf.use_short_preamble))
short_preamble = true;
if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS)
rts_rate = ieee80211_get_rts_cts_rate(hw, tx_info)->hw_value;
else if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT)
rts_rate = ieee80211_get_rts_cts_rate(hw, tx_info)->hw_value;
else
rts_rate = 0;
seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
priv->fops->fill_txdesc(hdr, tx_desc, rate, rate_flag,
sgi, short_preamble, ampdu_enable);
priv->fops->fill_txdesc(hw, hdr, tx_info, tx_desc, sgi, short_preamble,
ampdu_enable, rts_rate);
rtl8xxxu_calc_tx_desc_csum(tx_desc);

View File

@ -1303,12 +1303,13 @@ EXPORT_SYMBOL_GPL(rtl_action_proc);
static void setup_arp_tx(struct rtl_priv *rtlpriv, struct rtl_ps_ctl *ppsc)
{
struct ieee80211_hw *hw = rtlpriv->hw;
rtlpriv->ra.is_special_data = true;
if (rtlpriv->cfg->ops->get_btc_status())
rtlpriv->btcoexist.btc_ops->btc_special_packet_notify(
rtlpriv, 1);
rtlpriv->enter_ps = false;
schedule_work(&rtlpriv->works.lps_change_work);
rtl_lps_leave(hw);
ppsc->last_delaylps_stamp_jiffies = jiffies;
}
@ -1381,8 +1382,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
if (is_tx) {
rtlpriv->ra.is_special_data = true;
rtlpriv->enter_ps = false;
schedule_work(&rtlpriv->works.lps_change_work);
rtl_lps_leave(hw);
ppsc->last_delaylps_stamp_jiffies = jiffies;
}

View File

@ -1150,10 +1150,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
} else {
mstatus = RT_MEDIA_DISCONNECT;
if (mac->link_state == MAC80211_LINKED) {
rtlpriv->enter_ps = false;
schedule_work(&rtlpriv->works.lps_change_work);
}
if (mac->link_state == MAC80211_LINKED)
rtl_lps_leave(hw);
if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
mac->link_state = MAC80211_NOLINK;
@ -1431,8 +1429,7 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw,
}
if (mac->link_state == MAC80211_LINKED) {
rtlpriv->enter_ps = false;
schedule_work(&rtlpriv->works.lps_change_work);
rtl_lps_leave(hw);
mac->link_state = MAC80211_LINKED_SCANNING;
} else {
rtl_ips_nic_on(hw);

View File

@ -659,11 +659,9 @@ tx_status_ok:
}
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
(rtlpriv->link_info.num_rx_inperiod > 2)) {
rtlpriv->enter_ps = false;
schedule_work(&rtlpriv->works.lps_change_work);
}
rtlpriv->link_info.num_tx_inperiod) > 8) ||
(rtlpriv->link_info.num_rx_inperiod > 2))
rtl_lps_leave(hw);
}
static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
@ -914,10 +912,8 @@ new_trx_end:
}
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
(rtlpriv->link_info.num_rx_inperiod > 2)) {
rtlpriv->enter_ps = false;
schedule_work(&rtlpriv->works.lps_change_work);
}
(rtlpriv->link_info.num_rx_inperiod > 2))
rtl_lps_leave(hw);
skb = new_skb;
no_new:
if (rtlpriv->use_new_trx_flow) {

View File

@ -407,8 +407,8 @@ void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
}
}
/*Enter the leisure power save mode.*/
void rtl_lps_enter(struct ieee80211_hw *hw)
/* Interrupt safe routine to enter the leisure power save mode.*/
static void rtl_lps_enter_core(struct ieee80211_hw *hw)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
@ -444,10 +444,9 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
}
EXPORT_SYMBOL(rtl_lps_enter);
/*Leave the leisure power save mode.*/
void rtl_lps_leave(struct ieee80211_hw *hw)
/* Interrupt safe routine to leave the leisure power save mode.*/
static void rtl_lps_leave_core(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
@ -477,7 +476,6 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
}
spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
}
EXPORT_SYMBOL(rtl_lps_leave);
/* For sw LPS*/
void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
@ -670,12 +668,34 @@ void rtl_lps_change_work_callback(struct work_struct *work)
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->enter_ps)
rtl_lps_enter(hw);
rtl_lps_enter_core(hw);
else
rtl_lps_leave(hw);
rtl_lps_leave_core(hw);
}
EXPORT_SYMBOL_GPL(rtl_lps_change_work_callback);
void rtl_lps_enter(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (!in_interrupt())
return rtl_lps_enter_core(hw);
rtlpriv->enter_ps = true;
schedule_work(&rtlpriv->works.lps_change_work);
}
EXPORT_SYMBOL_GPL(rtl_lps_enter);
void rtl_lps_leave(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (!in_interrupt())
return rtl_lps_leave_core(hw);
rtlpriv->enter_ps = false;
schedule_work(&rtlpriv->works.lps_change_work);
}
EXPORT_SYMBOL_GPL(rtl_lps_leave);
void rtl_swlps_wq_callback(void *data)
{
struct rtl_works *rtlworks = container_of_dwork_rtl(data,

View File

@ -275,6 +275,8 @@ static int rsi_mac80211_start(struct ieee80211_hw *hw)
common->iface_down = false;
mutex_unlock(&common->mutex);
rsi_send_rx_filter_frame(common, 0);
return 0;
}
@ -388,7 +390,7 @@ static int rsi_channel_change(struct ieee80211_hw *hw)
status = rsi_band_check(common);
if (!status)
status = rsi_set_channel(adapter->priv, channel);
status = rsi_set_channel(adapter->priv, curchan);
if (bss->assoc) {
if (common->hw_data_qs_blocked &&
@ -408,6 +410,34 @@ static int rsi_channel_change(struct ieee80211_hw *hw)
return status;
}
/**
* rsi_config_power() - This function configures tx power to device
* @hw: Pointer to the ieee80211_hw structure.
*
* Return: 0 on success, negative error code on failure.
*/
static int rsi_config_power(struct ieee80211_hw *hw)
{
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
struct ieee80211_conf *conf = &hw->conf;
if (adapter->sc_nvifs <= 0) {
rsi_dbg(ERR_ZONE, "%s: No virtual interface found\n", __func__);
return -EINVAL;
}
rsi_dbg(INFO_ZONE,
"%s: Set tx power: %d dBM\n", __func__, conf->power_level);
if (conf->power_level == common->tx_power)
return 0;
common->tx_power = conf->power_level;
return rsi_send_radio_params_update(common);
}
/**
* rsi_mac80211_config() - This function is a handler for configuration
* requests. The stack calls this function to
@ -429,6 +459,12 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw,
if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
status = rsi_channel_change(hw);
/* tx power */
if (changed & IEEE80211_CONF_CHANGE_POWER) {
rsi_dbg(INFO_ZONE, "%s: Configuring Power\n", __func__);
status = rsi_config_power(hw);
}
mutex_unlock(&common->mutex);
return status;
@ -471,11 +507,19 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
{
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
u16 rx_filter_word = 0;
mutex_lock(&common->mutex);
if (changed & BSS_CHANGED_ASSOC) {
rsi_dbg(INFO_ZONE, "%s: Changed Association status: %d\n",
__func__, bss_conf->assoc);
if (bss_conf->assoc) {
/* Send the RX filter frame */
rx_filter_word = (ALLOW_DATA_ASSOC_PEER |
ALLOW_CTRL_ASSOC_PEER |
ALLOW_MGMT_ASSOC_PEER);
rsi_send_rx_filter_frame(common, rx_filter_word);
}
rsi_inform_bss_status(common,
bss_conf->assoc,
bss_conf->bssid,
@ -1013,6 +1057,7 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
struct rsi_common *common = adapter->priv;
mutex_lock(&common->mutex);
/* Resetting all the fields to default values */
common->bitrate_mask[NL80211_BAND_2GHZ] = 0;
common->bitrate_mask[NL80211_BAND_5GHZ] = 0;
@ -1022,11 +1067,116 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
common->vif_info[0].seq_start = 0;
common->secinfo.ptk_cipher = 0;
common->secinfo.gtk_cipher = 0;
mutex_unlock(&common->mutex);
rsi_send_rx_filter_frame(common, 0);
mutex_unlock(&common->mutex);
return 0;
}
/**
* rsi_mac80211_set_antenna() - This function is used to configure
* tx and rx antennas.
* @hw: Pointer to the ieee80211_hw structure.
* @tx_ant: Bitmap for tx antenna
* @rx_ant: Bitmap for rx antenna
*
* Return: 0 on success, Negative error code on failure.
*/
static int rsi_mac80211_set_antenna(struct ieee80211_hw *hw,
u32 tx_ant, u32 rx_ant)
{
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
u8 antenna = 0;
if (tx_ant > 1 || rx_ant > 1) {
rsi_dbg(ERR_ZONE,
"Invalid antenna selection (tx: %d, rx:%d)\n",
tx_ant, rx_ant);
rsi_dbg(ERR_ZONE,
"Use 0 for int_ant, 1 for ext_ant\n");
return -EINVAL;
}
rsi_dbg(INFO_ZONE, "%s: Antenna map Tx %x Rx %d\n",
__func__, tx_ant, rx_ant);
mutex_lock(&common->mutex);
antenna = tx_ant ? ANTENNA_SEL_UFL : ANTENNA_SEL_INT;
if (common->ant_in_use != antenna)
if (rsi_set_antenna(common, antenna))
goto fail_set_antenna;
rsi_dbg(INFO_ZONE, "(%s) Antenna path configured successfully\n",
tx_ant ? "UFL" : "INT");
common->ant_in_use = antenna;
mutex_unlock(&common->mutex);
return 0;
fail_set_antenna:
rsi_dbg(ERR_ZONE, "%s: Failed.\n", __func__);
mutex_unlock(&common->mutex);
return -EINVAL;
}
/**
* rsi_mac80211_get_antenna() - This function is used to configure
* tx and rx antennas.
*
* @hw: Pointer to the ieee80211_hw structure.
* @tx_ant: Bitmap for tx antenna
* @rx_ant: Bitmap for rx antenna
*
* Return: 0 on success, -1 on failure.
*/
static int rsi_mac80211_get_antenna(struct ieee80211_hw *hw,
u32 *tx_ant, u32 *rx_ant)
{
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
mutex_lock(&common->mutex);
*tx_ant = (common->ant_in_use == ANTENNA_SEL_UFL) ? 1 : 0;
*rx_ant = 0;
mutex_unlock(&common->mutex);
return 0;
}
static void rsi_reg_notify(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct rsi_hw * adapter = hw->priv;
int i;
sband = wiphy->bands[NL80211_BAND_5GHZ];
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
if (ch->flags & IEEE80211_CHAN_DISABLED)
continue;
if (ch->flags & IEEE80211_CHAN_RADAR)
ch->flags |= IEEE80211_CHAN_NO_IR;
}
rsi_dbg(INFO_ZONE,
"country = %s dfs_region = %d\n",
request->alpha2, request->dfs_region);
adapter->dfs_region = request->dfs_region;
}
static struct ieee80211_ops mac80211_ops = {
.tx = rsi_mac80211_tx,
.start = rsi_mac80211_start,
@ -1043,6 +1193,8 @@ static struct ieee80211_ops mac80211_ops = {
.ampdu_action = rsi_mac80211_ampdu_action,
.sta_add = rsi_mac80211_sta_add,
.sta_remove = rsi_mac80211_sta_remove,
.set_antenna = rsi_mac80211_set_antenna,
.get_antenna = rsi_mac80211_get_antenna,
};
/**
@ -1107,6 +1259,8 @@ int rsi_mac80211_attach(struct rsi_common *common)
wiphy->bands[NL80211_BAND_5GHZ] =
&adapter->sbands[NL80211_BAND_5GHZ];
wiphy->reg_notifier = rsi_reg_notify;
status = ieee80211_register_hw(hw);
if (status)
return status;

View File

@ -913,7 +913,8 @@ int rsi_band_check(struct rsi_common *common)
*
* Return: 0 on success, corresponding error code on failure.
*/
int rsi_set_channel(struct rsi_common *common, u16 channel)
int rsi_set_channel(struct rsi_common *common,
struct ieee80211_channel *channel)
{
struct sk_buff *skb = NULL;
struct rsi_mac_frame *mgmt_frame;
@ -928,24 +929,76 @@ int rsi_set_channel(struct rsi_common *common, u16 channel)
return -ENOMEM;
}
if (!channel) {
dev_kfree_skb(skb);
return 0;
}
memset(skb->data, 0, FRAME_DESC_SZ);
mgmt_frame = (struct rsi_mac_frame *)skb->data;
mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
mgmt_frame->desc_word[1] = cpu_to_le16(SCAN_REQUEST);
mgmt_frame->desc_word[4] = cpu_to_le16(channel);
mgmt_frame->desc_word[4] = cpu_to_le16(channel->hw_value);
mgmt_frame->desc_word[4] |=
cpu_to_le16(((char)(channel->max_antenna_gain)) << 8);
mgmt_frame->desc_word[5] =
cpu_to_le16((char)(channel->max_antenna_gain));
mgmt_frame->desc_word[7] = cpu_to_le16(PUT_BBP_RESET |
BBP_REG_WRITE |
(RSI_RF_TYPE << 4));
mgmt_frame->desc_word[5] = cpu_to_le16(0x01);
mgmt_frame->desc_word[6] = cpu_to_le16(0x12);
if (!(channel->flags & IEEE80211_CHAN_NO_IR) &&
!(channel->flags & IEEE80211_CHAN_RADAR)) {
if (common->tx_power < channel->max_power)
mgmt_frame->desc_word[6] = cpu_to_le16(common->tx_power);
else
mgmt_frame->desc_word[6] = cpu_to_le16(channel->max_power);
}
mgmt_frame->desc_word[7] = cpu_to_le16(common->priv->dfs_region);
if (common->channel_width == BW_40MHZ)
mgmt_frame->desc_word[5] |= cpu_to_le16(0x1 << 8);
common->channel = channel;
common->channel = channel->hw_value;
skb_put(skb, FRAME_DESC_SZ);
return rsi_send_internal_mgmt_frame(common, skb);
}
/**
* rsi_send_radio_params_update() - This function sends the radio
* parameters update to device
* @common: Pointer to the driver private structure.
* @channel: Channel value to be set.
*
* Return: 0 on success, corresponding error code on failure.
*/
int rsi_send_radio_params_update(struct rsi_common *common)
{
struct rsi_mac_frame *cmd_frame;
struct sk_buff *skb = NULL;
rsi_dbg(MGMT_TX_ZONE,
"%s: Sending Radio Params update frame\n", __func__);
skb = dev_alloc_skb(FRAME_DESC_SZ);
if (!skb) {
rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
__func__);
return -ENOMEM;
}
memset(skb->data, 0, FRAME_DESC_SZ);
cmd_frame = (struct rsi_mac_frame *)skb->data;
cmd_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
cmd_frame->desc_word[1] = cpu_to_le16(RADIO_PARAMS_UPDATE);
cmd_frame->desc_word[3] = cpu_to_le16(BIT(0));
cmd_frame->desc_word[3] |= cpu_to_le16(common->tx_power << 8);
skb_put(skb, FRAME_DESC_SZ);
@ -1243,6 +1296,72 @@ int rsi_send_block_unblock_frame(struct rsi_common *common, bool block_event)
}
/**
* rsi_send_rx_filter_frame() - Sends a frame to filter the RX packets
*
* @common: Pointer to the driver private structure.
* @rx_filter_word: Flags of filter packets
*
* @Return: 0 on success, -1 on failure.
*/
int rsi_send_rx_filter_frame(struct rsi_common *common, u16 rx_filter_word)
{
struct rsi_mac_frame *cmd_frame;
struct sk_buff *skb;
rsi_dbg(MGMT_TX_ZONE, "Sending RX filter frame\n");
skb = dev_alloc_skb(FRAME_DESC_SZ);
if (!skb) {
rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
__func__);
return -ENOMEM;
}
memset(skb->data, 0, FRAME_DESC_SZ);
cmd_frame = (struct rsi_mac_frame *)skb->data;
cmd_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
cmd_frame->desc_word[1] = cpu_to_le16(SET_RX_FILTER);
cmd_frame->desc_word[4] = cpu_to_le16(rx_filter_word);
skb_put(skb, FRAME_DESC_SZ);
return rsi_send_internal_mgmt_frame(common, skb);
}
/**
* rsi_set_antenna() - This fuction send antenna configuration request
* to device
*
* @common: Pointer to the driver private structure.
* @antenna: bitmap for tx antenna selection
*
* Return: 0 on Success, negative error code on failure
*/
int rsi_set_antenna(struct rsi_common *common, u8 antenna)
{
struct rsi_mac_frame *cmd_frame;
struct sk_buff *skb;
skb = dev_alloc_skb(FRAME_DESC_SZ);
if (!skb) {
rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
__func__);
return -ENOMEM;
}
memset(skb->data, 0, FRAME_DESC_SZ);
cmd_frame = (struct rsi_mac_frame *)skb->data;
cmd_frame->desc_word[1] = cpu_to_le16(ANT_SEL_FRAME);
cmd_frame->desc_word[3] = cpu_to_le16(antenna & 0x00ff);
cmd_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
skb_put(skb, FRAME_DESC_SZ);
return rsi_send_internal_mgmt_frame(common, skb);
}
/**
* rsi_handle_ta_confirm_type() - This function handles the confirm frames.

View File

@ -204,6 +204,9 @@ struct rsi_common {
struct cqm_info cqm_info;
bool hw_data_qs_blocked;
int tx_power;
u8 ant_in_use;
};
struct rsi_hw {
@ -220,6 +223,7 @@ struct rsi_hw {
struct rsi_debugfs *dfsentry;
u8 num_debugfs_entries;
#endif
u8 dfs_region;
void *rsi_dev;
int (*host_intf_read_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
int (*host_intf_write_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);

View File

@ -140,6 +140,19 @@
#define RSI_SUPP_FILTERS (FIF_ALLMULTI | FIF_PROBE_REQ |\
FIF_BCN_PRBRESP_PROMISC)
#define ANTENNA_SEL_INT 0x02 /* RF_OUT_2 / Integerated */
#define ANTENNA_SEL_UFL 0x03 /* RF_OUT_1 / U.FL */
/* Rx filter word definitions */
#define PROMISCOUS_MODE BIT(0)
#define ALLOW_DATA_ASSOC_PEER BIT(1)
#define ALLOW_MGMT_ASSOC_PEER BIT(2)
#define ALLOW_CTRL_ASSOC_PEER BIT(3)
#define DISALLOW_BEACONS BIT(4)
#define ALLOW_CONN_PEER_MGMT_WHILE_BUF_FULL BIT(5)
#define DISALLOW_BROADCAST_DATA BIT(6)
enum opmode {
STA_OPMODE = 1,
AP_OPMODE = 2
@ -190,7 +203,9 @@ enum cmd_frame_type {
BG_SCAN_PARAMS,
BG_SCAN_PROBE_REQ,
CW_MODE_REQ,
PER_CMD_PKT
PER_CMD_PKT,
ANT_SEL_FRAME = 0x20,
RADIO_PARAMS_UPDATE = 0x29
};
struct rsi_mac_frame {
@ -299,7 +314,8 @@ int rsi_send_aggregation_params_frame(struct rsi_common *common, u16 tid,
u16 ssn, u8 buf_size, u8 event);
int rsi_hal_load_key(struct rsi_common *common, u8 *data, u16 key_len,
u8 key_type, u8 key_id, u32 cipher);
int rsi_set_channel(struct rsi_common *common, u16 chno);
int rsi_set_channel(struct rsi_common *common,
struct ieee80211_channel *channel);
int rsi_send_block_unblock_frame(struct rsi_common *common, bool event);
void rsi_inform_bss_status(struct rsi_common *common, u8 status,
const u8 *bssid, u8 qos_enable, u16 aid);
@ -313,4 +329,7 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb);
int rsi_send_mgmt_pkt(struct rsi_common *common, struct sk_buff *skb);
int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb);
int rsi_band_check(struct rsi_common *common);
int rsi_send_rx_filter_frame(struct rsi_common *common, u16 rx_filter_word);
int rsi_send_radio_params_update(struct rsi_common *common);
int rsi_set_antenna(struct rsi_common *common, u8 antenna);
#endif