Merge wireless/main into wireless-next/main
There are a few merge conflicts due to overlapping fixes and changes, merge wireless/main to fix them. Signed-off-by: Johannes Berg <johannes.berg@intel.com>
This commit is contained in:
commit
0b354b8b09
|
@ -813,7 +813,10 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
|
|||
|
||||
rcu_read_lock();
|
||||
do {
|
||||
while (likely(!mvmtxq->stopped &&
|
||||
while (likely(!test_bit(IWL_MVM_TXQ_STATE_STOP_FULL,
|
||||
&mvmtxq->state) &&
|
||||
!test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT,
|
||||
&mvmtxq->state) &&
|
||||
!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) {
|
||||
skb = ieee80211_tx_dequeue(hw, txq);
|
||||
|
||||
|
@ -838,42 +841,25 @@ void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
|
|||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
|
||||
|
||||
/*
|
||||
* Please note that racing is handled very carefully here:
|
||||
* mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
|
||||
* deleted afterwards.
|
||||
* This means that if:
|
||||
* mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
|
||||
* queue is allocated and we can TX.
|
||||
* mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
|
||||
* a race, should defer the frame.
|
||||
* mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
|
||||
* need to allocate the queue and defer the frame.
|
||||
* mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
|
||||
* queue is already scheduled for allocation, no need to allocate,
|
||||
* should defer the frame.
|
||||
*/
|
||||
|
||||
/* If the queue is allocated TX and return. */
|
||||
if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
|
||||
/*
|
||||
* Check that list is empty to avoid a race where txq_id is
|
||||
* already updated, but the queue allocation work wasn't
|
||||
* finished
|
||||
*/
|
||||
if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
|
||||
return;
|
||||
|
||||
if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
|
||||
!txq->sta) {
|
||||
iwl_mvm_mac_itxq_xmit(hw, txq);
|
||||
return;
|
||||
}
|
||||
|
||||
/* The list is being deleted only after the queue is fully allocated. */
|
||||
if (!list_empty(&mvmtxq->list))
|
||||
return;
|
||||
/* iwl_mvm_mac_itxq_xmit() will later be called by the worker
|
||||
* to handle any packets we leave on the txq now
|
||||
*/
|
||||
|
||||
list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
|
||||
schedule_work(&mvm->add_stream_wk);
|
||||
spin_lock_bh(&mvm->add_stream_lock);
|
||||
/* The list is being deleted only after the queue is fully allocated. */
|
||||
if (list_empty(&mvmtxq->list) &&
|
||||
/* recheck under lock */
|
||||
!test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) {
|
||||
list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
|
||||
schedule_work(&mvm->add_stream_wk);
|
||||
}
|
||||
spin_unlock_bh(&mvm->add_stream_lock);
|
||||
}
|
||||
|
||||
#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
|
||||
|
|
|
@ -750,7 +750,10 @@ struct iwl_mvm_txq {
|
|||
struct list_head list;
|
||||
u16 txq_id;
|
||||
atomic_t tx_request;
|
||||
bool stopped;
|
||||
#define IWL_MVM_TXQ_STATE_STOP_FULL 0
|
||||
#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 1
|
||||
#define IWL_MVM_TXQ_STATE_READY 2
|
||||
unsigned long state;
|
||||
};
|
||||
|
||||
static inline struct iwl_mvm_txq *
|
||||
|
@ -879,6 +882,7 @@ struct iwl_mvm {
|
|||
struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
|
||||
};
|
||||
struct work_struct add_stream_wk; /* To add streams to queues */
|
||||
spinlock_t add_stream_lock;
|
||||
|
||||
const char *nvm_file_name;
|
||||
struct iwl_nvm_data *nvm_data;
|
||||
|
|
|
@ -1227,6 +1227,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
|
||||
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
|
||||
INIT_LIST_HEAD(&mvm->add_stream_txqs);
|
||||
spin_lock_init(&mvm->add_stream_lock);
|
||||
|
||||
init_waitqueue_head(&mvm->rx_sync_waitq);
|
||||
|
||||
|
@ -1729,7 +1730,10 @@ static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
|
|||
|
||||
txq = sta->txq[tid];
|
||||
mvmtxq = iwl_mvm_txq_from_mac80211(txq);
|
||||
mvmtxq->stopped = !start;
|
||||
if (start)
|
||||
clear_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
|
||||
else
|
||||
set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
|
||||
|
||||
if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
|
||||
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
|
||||
|
|
|
@ -407,8 +407,11 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
struct iwl_mvm_txq *mvmtxq =
|
||||
iwl_mvm_txq_from_tid(sta, tid);
|
||||
|
||||
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
spin_lock_bh(&mvm->add_stream_lock);
|
||||
list_del_init(&mvmtxq->list);
|
||||
clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
|
||||
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
spin_unlock_bh(&mvm->add_stream_lock);
|
||||
}
|
||||
|
||||
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
|
||||
|
@ -502,8 +505,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
|
|||
disable_agg_tids |= BIT(tid);
|
||||
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
|
||||
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
spin_lock_bh(&mvm->add_stream_lock);
|
||||
list_del_init(&mvmtxq->list);
|
||||
clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
|
||||
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
spin_unlock_bh(&mvm->add_stream_lock);
|
||||
}
|
||||
|
||||
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
|
||||
|
@ -716,7 +722,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
|
|||
queue, iwl_mvm_ac_to_tx_fifo[ac]);
|
||||
|
||||
/* Stop the queue and wait for it to empty */
|
||||
txq->stopped = true;
|
||||
set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
|
||||
|
||||
ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
|
||||
if (ret) {
|
||||
|
@ -759,7 +765,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
|
|||
|
||||
out:
|
||||
/* Continue using the queue */
|
||||
txq->stopped = false;
|
||||
clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1511,12 +1517,22 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
|
|||
* a queue in the function itself.
|
||||
*/
|
||||
if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
|
||||
spin_lock_bh(&mvm->add_stream_lock);
|
||||
list_del_init(&mvmtxq->list);
|
||||
spin_unlock_bh(&mvm->add_stream_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
list_del_init(&mvmtxq->list);
|
||||
/* now we're ready, any remaining races/concurrency will be
|
||||
* handled in iwl_mvm_mac_itxq_xmit()
|
||||
*/
|
||||
set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
|
||||
|
||||
local_bh_disable();
|
||||
spin_lock(&mvm->add_stream_lock);
|
||||
list_del_init(&mvmtxq->list);
|
||||
spin_unlock(&mvm->add_stream_lock);
|
||||
|
||||
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
@ -1951,8 +1967,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
|
|||
struct iwl_mvm_txq *mvmtxq =
|
||||
iwl_mvm_txq_from_mac80211(sta->txq[i]);
|
||||
|
||||
spin_lock_bh(&mvm->add_stream_lock);
|
||||
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
list_del_init(&mvmtxq->list);
|
||||
clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
|
||||
spin_unlock_bh(&mvm->add_stream_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -172,7 +172,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
|
|||
.can_ext_scan = true,
|
||||
};
|
||||
|
||||
static const struct of_device_id mwifiex_pcie_of_match_table[] = {
|
||||
static const struct of_device_id mwifiex_pcie_of_match_table[] __maybe_unused = {
|
||||
{ .compatible = "pci11ab,2b42" },
|
||||
{ .compatible = "pci1b4b,2b42" },
|
||||
{ }
|
||||
|
|
|
@ -495,7 +495,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
|
|||
{"EXTLAST", NULL, 0, 0xFE},
|
||||
};
|
||||
|
||||
static const struct of_device_id mwifiex_sdio_of_match_table[] = {
|
||||
static const struct of_device_id mwifiex_sdio_of_match_table[] __maybe_unused = {
|
||||
{ .compatible = "marvell,sd8787" },
|
||||
{ .compatible = "marvell,sd8897" },
|
||||
{ .compatible = "marvell,sd8978" },
|
||||
|
|
|
@ -539,6 +539,7 @@ int mt76_register_phy(struct mt76_phy *phy, bool vht,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
set_bit(MT76_STATE_REGISTERED, &phy->state);
|
||||
phy->dev->phys[phy->band_idx] = phy;
|
||||
|
||||
return 0;
|
||||
|
@ -549,6 +550,9 @@ void mt76_unregister_phy(struct mt76_phy *phy)
|
|||
{
|
||||
struct mt76_dev *dev = phy->dev;
|
||||
|
||||
if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
|
||||
return;
|
||||
|
||||
if (IS_ENABLED(CONFIG_MT76_LEDS))
|
||||
mt76_led_cleanup(phy);
|
||||
mt76_tx_status_check(dev, true);
|
||||
|
@ -719,6 +723,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
|
|||
return ret;
|
||||
|
||||
WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
|
||||
set_bit(MT76_STATE_REGISTERED, &phy->state);
|
||||
sched_set_fifo_low(dev->tx_worker.task);
|
||||
|
||||
return 0;
|
||||
|
@ -729,6 +734,9 @@ void mt76_unregister_device(struct mt76_dev *dev)
|
|||
{
|
||||
struct ieee80211_hw *hw = dev->hw;
|
||||
|
||||
if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
|
||||
return;
|
||||
|
||||
if (IS_ENABLED(CONFIG_MT76_LEDS))
|
||||
mt76_led_cleanup(&dev->phy);
|
||||
mt76_tx_status_check(dev, true);
|
||||
|
|
|
@ -402,6 +402,7 @@ struct mt76_tx_cb {
|
|||
|
||||
enum {
|
||||
MT76_STATE_INITIALIZED,
|
||||
MT76_STATE_REGISTERED,
|
||||
MT76_STATE_RUNNING,
|
||||
MT76_STATE_MCU_RUNNING,
|
||||
MT76_SCANNING,
|
||||
|
|
|
@ -1221,6 +1221,9 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba_tlv);
|
|||
|
||||
int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb)
|
||||
{
|
||||
if (!mt76_is_mmio(dev))
|
||||
return 0;
|
||||
|
||||
if (!mtk_wed_device_active(&dev->mmio.wed))
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -383,7 +383,6 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
|
|||
ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
|
||||
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
|
||||
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
|
||||
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
|
||||
|
||||
hw->max_tx_fragments = 4;
|
||||
|
||||
|
@ -396,6 +395,9 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
|
|||
}
|
||||
|
||||
if (phy->mt76->cap.has_5ghz) {
|
||||
struct ieee80211_sta_vht_cap *vht_cap;
|
||||
|
||||
vht_cap = &phy->mt76->sband_5g.sband.vht_cap;
|
||||
phy->mt76->sband_5g.sband.ht_cap.cap |=
|
||||
IEEE80211_HT_CAP_LDPC_CODING |
|
||||
IEEE80211_HT_CAP_MAX_AMSDU;
|
||||
|
@ -403,19 +405,28 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
|
|||
IEEE80211_HT_MPDU_DENSITY_4;
|
||||
|
||||
if (is_mt7915(&dev->mt76)) {
|
||||
phy->mt76->sband_5g.sband.vht_cap.cap |=
|
||||
vht_cap->cap |=
|
||||
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
|
||||
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
|
||||
|
||||
if (!dev->dbdc_support)
|
||||
vht_cap->cap |=
|
||||
IEEE80211_VHT_CAP_SHORT_GI_160 |
|
||||
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
|
||||
FIELD_PREP(IEEE80211_VHT_CAP_EXT_NSS_BW_MASK, 1);
|
||||
} else {
|
||||
phy->mt76->sband_5g.sband.vht_cap.cap |=
|
||||
vht_cap->cap |=
|
||||
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
|
||||
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
|
||||
|
||||
/* mt7916 dbdc with 2g 2x2 bw40 and 5g 2x2 bw160c */
|
||||
phy->mt76->sband_5g.sband.vht_cap.cap |=
|
||||
vht_cap->cap |=
|
||||
IEEE80211_VHT_CAP_SHORT_GI_160 |
|
||||
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
|
||||
}
|
||||
|
||||
if (!is_mt7915(&dev->mt76) || !dev->dbdc_support)
|
||||
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
|
||||
}
|
||||
|
||||
mt76_set_stream_caps(phy->mt76, true);
|
||||
|
@ -841,9 +852,13 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy,
|
|||
int sts = hweight8(phy->mt76->chainmask);
|
||||
u8 c, sts_160 = sts;
|
||||
|
||||
/* mt7915 doesn't support bw160 */
|
||||
if (is_mt7915(&dev->mt76))
|
||||
sts_160 = 0;
|
||||
/* Can do 1/2 of STS in 160Mhz mode for mt7915 */
|
||||
if (is_mt7915(&dev->mt76)) {
|
||||
if (!dev->dbdc_support)
|
||||
sts_160 /= 2;
|
||||
else
|
||||
sts_160 = 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MAC80211_MESH
|
||||
if (vif == NL80211_IFTYPE_MESH_POINT)
|
||||
|
@ -944,10 +959,15 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
|
|||
int i, idx = 0, nss = hweight8(phy->mt76->antenna_mask);
|
||||
u16 mcs_map = 0;
|
||||
u16 mcs_map_160 = 0;
|
||||
u8 nss_160 = nss;
|
||||
u8 nss_160;
|
||||
|
||||
/* Can't do 160MHz with mt7915 */
|
||||
if (is_mt7915(&dev->mt76))
|
||||
if (!is_mt7915(&dev->mt76))
|
||||
nss_160 = nss;
|
||||
else if (!dev->dbdc_support)
|
||||
/* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
|
||||
nss_160 = nss / 2;
|
||||
else
|
||||
/* Can't do 160MHz with mt7915 dbdc */
|
||||
nss_160 = 0;
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
|
|
|
@ -2673,6 +2673,17 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
|
|||
if (!sband)
|
||||
return -EINVAL;
|
||||
|
||||
if (params->basic_rates) {
|
||||
if (!ieee80211_parse_bitrates(link->conf->chandef.width,
|
||||
wiphy->bands[sband->band],
|
||||
params->basic_rates,
|
||||
params->basic_rates_len,
|
||||
&link->conf->basic_rates))
|
||||
return -EINVAL;
|
||||
changed |= BSS_CHANGED_BASIC_RATES;
|
||||
ieee80211_check_rate_mask(link);
|
||||
}
|
||||
|
||||
if (params->use_cts_prot >= 0) {
|
||||
link->conf->use_cts_prot = params->use_cts_prot;
|
||||
changed |= BSS_CHANGED_ERP_CTS_PROT;
|
||||
|
@ -2694,16 +2705,6 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
|
|||
changed |= BSS_CHANGED_ERP_SLOT;
|
||||
}
|
||||
|
||||
if (params->basic_rates) {
|
||||
ieee80211_parse_bitrates(link->conf->chandef.width,
|
||||
wiphy->bands[sband->band],
|
||||
params->basic_rates,
|
||||
params->basic_rates_len,
|
||||
&link->conf->basic_rates);
|
||||
changed |= BSS_CHANGED_BASIC_RATES;
|
||||
ieee80211_check_rate_mask(link);
|
||||
}
|
||||
|
||||
if (params->ap_isolate >= 0) {
|
||||
if (params->ap_isolate)
|
||||
sdata->flags |= IEEE80211_SDATA_DONT_BRIDGE_PACKETS;
|
||||
|
|
|
@ -1322,6 +1322,9 @@ struct ieee80211_local {
|
|||
struct list_head active_txqs[IEEE80211_NUM_ACS];
|
||||
u16 schedule_round[IEEE80211_NUM_ACS];
|
||||
|
||||
/* serializes ieee80211_handle_wake_tx_queue */
|
||||
spinlock_t handle_wake_tx_queue_lock;
|
||||
|
||||
u16 airtime_flags;
|
||||
u32 aql_txq_limit_low[IEEE80211_NUM_ACS];
|
||||
u32 aql_txq_limit_high[IEEE80211_NUM_ACS];
|
||||
|
|
|
@ -802,6 +802,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
|
|||
local->aql_threshold = IEEE80211_AQL_THRESHOLD;
|
||||
atomic_set(&local->aql_total_pending_airtime, 0);
|
||||
|
||||
spin_lock_init(&local->handle_wake_tx_queue_lock);
|
||||
|
||||
INIT_LIST_HEAD(&local->chanctx_list);
|
||||
mutex_init(&local->chanctx_mtx);
|
||||
|
||||
|
|
|
@ -2805,29 +2805,10 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
|
|||
mesh_rmc_check(sdata, eth->h_source, mesh_hdr))
|
||||
return RX_DROP_MONITOR;
|
||||
|
||||
/* Frame has reached destination. Don't forward */
|
||||
if (ether_addr_equal(sdata->vif.addr, eth->h_dest))
|
||||
goto rx_accept;
|
||||
|
||||
if (!ifmsh->mshcfg.dot11MeshForwarding) {
|
||||
if (is_multicast_ether_addr(eth->h_dest))
|
||||
goto rx_accept;
|
||||
|
||||
return RX_DROP_MONITOR;
|
||||
}
|
||||
|
||||
/* forward packet */
|
||||
if (sdata->crypto_tx_tailroom_needed_cnt)
|
||||
tailroom = IEEE80211_ENCRYPT_TAILROOM;
|
||||
|
||||
if (!--mesh_hdr->ttl) {
|
||||
if (multicast)
|
||||
goto rx_accept;
|
||||
|
||||
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
|
||||
return RX_DROP_MONITOR;
|
||||
}
|
||||
|
||||
if (mesh_hdr->flags & MESH_FLAGS_AE) {
|
||||
struct mesh_path *mppath;
|
||||
char *proxied_addr;
|
||||
|
@ -2862,6 +2843,25 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* Frame has reached destination. Don't forward */
|
||||
if (ether_addr_equal(sdata->vif.addr, eth->h_dest))
|
||||
goto rx_accept;
|
||||
|
||||
if (!--mesh_hdr->ttl) {
|
||||
if (multicast)
|
||||
goto rx_accept;
|
||||
|
||||
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
|
||||
return RX_DROP_MONITOR;
|
||||
}
|
||||
|
||||
if (!ifmsh->mshcfg.dot11MeshForwarding) {
|
||||
if (is_multicast_ether_addr(eth->h_dest))
|
||||
goto rx_accept;
|
||||
|
||||
return RX_DROP_MONITOR;
|
||||
}
|
||||
|
||||
skb_set_queue_mapping(skb, ieee802_1d_to_ac[skb->priority]);
|
||||
|
||||
if (!multicast &&
|
||||
|
@ -2885,6 +2885,9 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
|
|||
|
||||
if (skb_cow_head(fwd_skb, hdrlen - sizeof(struct ethhdr)))
|
||||
return RX_DROP_UNUSABLE;
|
||||
|
||||
if (skb_linearize(fwd_skb))
|
||||
return RX_DROP_UNUSABLE;
|
||||
}
|
||||
|
||||
fwd_hdr = skb_push(fwd_skb, hdrlen - sizeof(struct ethhdr));
|
||||
|
@ -2899,7 +2902,7 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
|
|||
hdrlen += ETH_ALEN;
|
||||
else
|
||||
fwd_skb->protocol = htons(fwd_skb->len - hdrlen);
|
||||
skb_set_network_header(fwd_skb, hdrlen);
|
||||
skb_set_network_header(fwd_skb, hdrlen + 2);
|
||||
|
||||
info = IEEE80211_SKB_CB(fwd_skb);
|
||||
memset(info, 0, sizeof(*info));
|
||||
|
@ -2948,7 +2951,7 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
|
|||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
__le16 fc = hdr->frame_control;
|
||||
struct sk_buff_head frame_list;
|
||||
static ieee80211_rx_result res;
|
||||
ieee80211_rx_result res;
|
||||
struct ethhdr ethhdr;
|
||||
const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
|
||||
|
||||
|
@ -2982,7 +2985,7 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
|
|||
data_offset, true))
|
||||
return RX_DROP_UNUSABLE;
|
||||
|
||||
if (rx->sta && rx->sta->amsdu_mesh_control < 0) {
|
||||
if (rx->sta->amsdu_mesh_control < 0) {
|
||||
s8 valid = -1;
|
||||
int i;
|
||||
|
||||
|
@ -3068,7 +3071,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
|
|||
}
|
||||
}
|
||||
|
||||
if (is_multicast_ether_addr(hdr->addr1))
|
||||
if (is_multicast_ether_addr(hdr->addr1) || !rx->sta)
|
||||
return RX_DROP_UNUSABLE;
|
||||
|
||||
if (rx->key) {
|
||||
|
@ -3099,7 +3102,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
|
|||
struct net_device *dev = sdata->dev;
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
|
||||
__le16 fc = hdr->frame_control;
|
||||
static ieee80211_rx_result res;
|
||||
ieee80211_rx_result res;
|
||||
bool port_control;
|
||||
int err;
|
||||
|
||||
|
|
|
@ -1264,7 +1264,8 @@ static int __must_check __sta_info_destroy_part1(struct sta_info *sta)
|
|||
list_del_rcu(&sta->list);
|
||||
sta->removed = true;
|
||||
|
||||
drv_sta_pre_rcu_remove(local, sta->sdata, sta);
|
||||
if (sta->uploaded)
|
||||
drv_sta_pre_rcu_remove(local, sta->sdata, sta);
|
||||
|
||||
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
|
||||
rcu_access_pointer(sdata->u.vlan.sta) == sta)
|
||||
|
|
|
@ -314,6 +314,8 @@ void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw,
|
|||
struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
|
||||
struct ieee80211_txq *queue;
|
||||
|
||||
spin_lock(&local->handle_wake_tx_queue_lock);
|
||||
|
||||
/* Use ieee80211_next_txq() for airtime fairness accounting */
|
||||
ieee80211_txq_schedule_start(hw, txq->ac);
|
||||
while ((queue = ieee80211_next_txq(hw, txq->ac))) {
|
||||
|
@ -321,6 +323,7 @@ void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw,
|
|||
ieee80211_return_txq(hw, queue, false);
|
||||
}
|
||||
ieee80211_txq_schedule_end(hw, txq->ac);
|
||||
spin_unlock(&local->handle_wake_tx_queue_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_handle_wake_tx_queue);
|
||||
|
||||
|
@ -4926,7 +4929,7 @@ u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
|
|||
&eht_cap->eht_cap_elem,
|
||||
is_ap);
|
||||
return 2 + 1 +
|
||||
sizeof(he_cap->he_cap_elem) + n +
|
||||
sizeof(eht_cap->eht_cap_elem) + n +
|
||||
ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
|
||||
eht_cap->eht_cap_elem.phy_cap_info);
|
||||
return 0;
|
||||
|
|
|
@ -147,6 +147,7 @@ u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
|
|||
u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
|
||||
struct sta_info *sta, struct sk_buff *skb)
|
||||
{
|
||||
const struct ethhdr *eth = (void *)skb->data;
|
||||
struct mac80211_qos_map *qos_map;
|
||||
bool qos;
|
||||
|
||||
|
@ -154,8 +155,9 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
|
|||
skb_get_hash(skb);
|
||||
|
||||
/* all mesh/ocb stations are required to support WME */
|
||||
if (sta && (sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
|
||||
sdata->vif.type == NL80211_IFTYPE_OCB))
|
||||
if ((sdata->vif.type == NL80211_IFTYPE_MESH_POINT &&
|
||||
!is_multicast_ether_addr(eth->h_dest)) ||
|
||||
(sdata->vif.type == NL80211_IFTYPE_OCB && sta))
|
||||
qos = true;
|
||||
else if (sta)
|
||||
qos = sta->sta.wme;
|
||||
|
|
|
@ -462,6 +462,11 @@ nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] = {
|
|||
[NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
|
||||
};
|
||||
|
||||
static struct netlink_range_validation nl80211_punct_bitmap_range = {
|
||||
.min = 0,
|
||||
.max = 0xffff,
|
||||
};
|
||||
|
||||
static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
|
||||
[0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD },
|
||||
[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
|
||||
|
@ -805,7 +810,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
|
|||
[NL80211_ATTR_MLD_ADDR] = NLA_POLICY_EXACT_LEN(ETH_ALEN),
|
||||
[NL80211_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG },
|
||||
[NL80211_ATTR_MAX_NUM_AKM_SUITES] = { .type = NLA_REJECT },
|
||||
[NL80211_ATTR_PUNCT_BITMAP] = NLA_POLICY_RANGE(NLA_U8, 0, 0xffff),
|
||||
[NL80211_ATTR_PUNCT_BITMAP] =
|
||||
NLA_POLICY_FULL_RANGE(NLA_U32, &nl80211_punct_bitmap_range),
|
||||
|
||||
[NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS] = { .type = NLA_U16 },
|
||||
[NL80211_ATTR_HW_TIMESTAMP_ENABLED] = { .type = NLA_FLAG },
|
||||
|
@ -8976,7 +8982,7 @@ static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev,
|
|||
struct cfg80211_chan_def *chandef;
|
||||
|
||||
chandef = wdev_chandef(wdev, link_id);
|
||||
if (!chandef)
|
||||
if (!chandef || !chandef->chan)
|
||||
continue;
|
||||
|
||||
/*
|
||||
|
@ -10874,8 +10880,7 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
|
|||
|
||||
static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device *rdev,
|
||||
const u8 *ssid, int ssid_len,
|
||||
struct nlattr **attrs,
|
||||
const u8 **bssid_out)
|
||||
struct nlattr **attrs)
|
||||
{
|
||||
struct ieee80211_channel *chan;
|
||||
struct cfg80211_bss *bss;
|
||||
|
@ -10902,7 +10907,6 @@ static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device
|
|||
if (!bss)
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
*bssid_out = bssid;
|
||||
return bss;
|
||||
}
|
||||
|
||||
|
@ -10912,7 +10916,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
|
|||
struct net_device *dev = info->user_ptr[1];
|
||||
struct cfg80211_assoc_request req = {};
|
||||
struct nlattr **attrs = NULL;
|
||||
const u8 *bssid, *ssid;
|
||||
const u8 *ap_addr, *ssid;
|
||||
unsigned int link_id;
|
||||
int err, ssid_len;
|
||||
|
||||
|
@ -11049,6 +11053,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
|
|||
return -EINVAL;
|
||||
|
||||
req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]);
|
||||
ap_addr = req.ap_mld_addr;
|
||||
|
||||
attrs = kzalloc(attrsize, GFP_KERNEL);
|
||||
if (!attrs)
|
||||
|
@ -11074,8 +11079,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
|
|||
goto free;
|
||||
}
|
||||
req.links[link_id].bss =
|
||||
nl80211_assoc_bss(rdev, ssid, ssid_len, attrs,
|
||||
&bssid);
|
||||
nl80211_assoc_bss(rdev, ssid, ssid_len, attrs);
|
||||
if (IS_ERR(req.links[link_id].bss)) {
|
||||
err = PTR_ERR(req.links[link_id].bss);
|
||||
req.links[link_id].bss = NULL;
|
||||
|
@ -11126,10 +11130,10 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
|
|||
if (req.link_id >= 0)
|
||||
return -EINVAL;
|
||||
|
||||
req.bss = nl80211_assoc_bss(rdev, ssid, ssid_len, info->attrs,
|
||||
&bssid);
|
||||
req.bss = nl80211_assoc_bss(rdev, ssid, ssid_len, info->attrs);
|
||||
if (IS_ERR(req.bss))
|
||||
return PTR_ERR(req.bss);
|
||||
ap_addr = req.bss->bssid;
|
||||
}
|
||||
|
||||
err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
|
||||
|
@ -11142,7 +11146,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
|
|||
dev->ieee80211_ptr->conn_owner_nlportid =
|
||||
info->snd_portid;
|
||||
memcpy(dev->ieee80211_ptr->disconnect_bssid,
|
||||
bssid, ETH_ALEN);
|
||||
ap_addr, ETH_ALEN);
|
||||
}
|
||||
|
||||
wdev_unlock(dev->ieee80211_ptr);
|
||||
|
|
Loading…
Reference in New Issue