rtw88: replace tx tasklet with work queue
Replace tasklet so we can do tx scheduling in parallel. Since throughput is delay-sensitive in most cases, we allocate a dedicated, high priority wq for our needs. Signed-off-by: Po-Hao Huang <phhuang@realtek.com> Signed-off-by: Ping-Ke Shih <pkshih@realtek.com> Reviewed-by: Brian Norris <briannorris@chromium.org> Signed-off-by: Kalle Valo <kvalo@codeaurora.org> Link: https://lore.kernel.org/r/20210209070755.23019-5-pkshih@realtek.com
This commit is contained in:
parent
9e2fd29864
commit
fe101716c7
|
@ -42,7 +42,7 @@ static void rtw_ops_wake_tx_queue(struct ieee80211_hw *hw,
|
|||
list_add_tail(&rtwtxq->list, &rtwdev->txqs);
|
||||
spin_unlock_bh(&rtwdev->txq_lock);
|
||||
|
||||
tasklet_schedule(&rtwdev->tx_tasklet);
|
||||
queue_work(rtwdev->tx_wq, &rtwdev->tx_work);
|
||||
}
|
||||
|
||||
static int rtw_ops_start(struct ieee80211_hw *hw)
|
||||
|
|
|
@ -1658,7 +1658,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
|
|||
|
||||
timer_setup(&rtwdev->tx_report.purge_timer,
|
||||
rtw_tx_report_purge_timer, 0);
|
||||
tasklet_setup(&rtwdev->tx_tasklet, rtw_tx_tasklet);
|
||||
rtwdev->tx_wq = alloc_workqueue("rtw_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
|
||||
|
||||
INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work);
|
||||
INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work);
|
||||
|
@ -1670,6 +1670,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
|
|||
INIT_DELAYED_WORK(&coex->bt_multi_link_remain_work,
|
||||
rtw_coex_bt_multi_link_remain_work);
|
||||
INIT_DELAYED_WORK(&coex->wl_ccklock_work, rtw_coex_wl_ccklock_work);
|
||||
INIT_WORK(&rtwdev->tx_work, rtw_tx_work);
|
||||
INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
|
||||
INIT_WORK(&rtwdev->fw_recovery_work, rtw_fw_recovery_work);
|
||||
INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work);
|
||||
|
@ -1736,7 +1737,7 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
|
|||
if (wow_fw->firmware)
|
||||
release_firmware(wow_fw->firmware);
|
||||
|
||||
tasklet_kill(&rtwdev->tx_tasklet);
|
||||
destroy_workqueue(rtwdev->tx_wq);
|
||||
spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags);
|
||||
skb_queue_purge(&rtwdev->tx_report.queue);
|
||||
spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags);
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/bitfield.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include "util.h"
|
||||
|
||||
|
@ -1772,7 +1773,8 @@ struct rtw_dev {
|
|||
/* used to protect txqs list */
|
||||
spinlock_t txq_lock;
|
||||
struct list_head txqs;
|
||||
struct tasklet_struct tx_tasklet;
|
||||
struct workqueue_struct *tx_wq;
|
||||
struct work_struct tx_work;
|
||||
struct work_struct ba_work;
|
||||
|
||||
struct rtw_tx_report tx_report;
|
||||
|
|
|
@ -592,9 +592,9 @@ static void rtw_txq_push(struct rtw_dev *rtwdev,
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void rtw_tx_tasklet(struct tasklet_struct *t)
|
||||
void rtw_tx_work(struct work_struct *w)
|
||||
{
|
||||
struct rtw_dev *rtwdev = from_tasklet(rtwdev, t, tx_tasklet);
|
||||
struct rtw_dev *rtwdev = container_of(w, struct rtw_dev, tx_work);
|
||||
struct rtw_txq *rtwtxq, *tmp;
|
||||
|
||||
spin_lock_bh(&rtwdev->txq_lock);
|
||||
|
|
|
@ -98,7 +98,7 @@ void rtw_tx(struct rtw_dev *rtwdev,
|
|||
struct sk_buff *skb);
|
||||
void rtw_txq_init(struct rtw_dev *rtwdev, struct ieee80211_txq *txq);
|
||||
void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq);
|
||||
void rtw_tx_tasklet(struct tasklet_struct *t);
|
||||
void rtw_tx_work(struct work_struct *w);
|
||||
void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
|
||||
struct rtw_tx_pkt_info *pkt_info,
|
||||
struct ieee80211_sta *sta,
|
||||
|
|
Loading…
Reference in New Issue