staging: wfx: repair wfx_flush()

Until now, wfx_flush() flushed queue for while device instead of only
the queue of the intended vif. It sometime failed with a timeout, but
this error was not reported.

Moreover, if the device was frozen, wfx_flush didn't do anything and it
results a potential warning (and maybe a resource leak) when the frozen
device was unregistered.

We can also notice that wfx_tx_queues_wait_empty_vif() did only exist to
work around the broken feature of wfx_flush().

This patch repair wfx_flush() and therefore drop
wfx_tx_queues_wait_empty_vif().

Reviewed-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Jérôme Pouiller <jerome.pouiller@silabs.com>
Link: https://lore.kernel.org/r/20200401110405.80282-29-Jerome.Pouiller@silabs.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Jérôme Pouiller 2020-04-01 13:04:01 +02:00 committed by Greg Kroah-Hartman
parent 7db856797f
commit 4c6b3837c6
7 changed files with 120 additions and 129 deletions

View File

@ -524,7 +524,7 @@ static void wfx_notify_buffered_tx(struct wfx_vif *wvif, struct sk_buff *skb)
rcu_read_unlock();
}
void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb)
static void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb)
{
struct hif_msg *hif = (struct hif_msg *)skb->data;
struct hif_req_tx *req = (struct hif_req_tx *)hif->body;
@ -626,4 +626,36 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg)
wfx_skb_dtor(wvif->wdev, skb);
}
void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct wfx_dev *wdev = hw->priv;
struct sk_buff_head dropped;
struct wfx_queue *queue;
struct sk_buff *skb;
int vif_id = -1;
int i;
if (vif)
vif_id = ((struct wfx_vif *)vif->drv_priv)->id;
skb_queue_head_init(&dropped);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
if (!(BIT(i) & queues))
continue;
queue = &wdev->tx_queue[i];
if (drop)
wfx_tx_queue_drop(wdev, queue, vif_id, &dropped);
if (wdev->chip_frozen)
continue;
if (wait_event_timeout(wdev->tx_dequeue,
wfx_tx_queue_empty(wdev, queue, vif_id),
msecs_to_jiffies(1000)) <= 0)
dev_warn(wdev->dev, "frames queued while flushing tx queues?");
}
wfx_tx_flush(wdev);
if (wdev->chip_frozen)
wfx_pending_drop(wdev, &dropped);
while ((skb = skb_dequeue(&dropped)) != NULL)
wfx_skb_dtor(wdev, skb);
}

View File

@ -44,7 +44,8 @@ void wfx_tx_policy_upload_work(struct work_struct *work);
void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb);
void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg);
void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb);
void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop);
static inline struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb)
{

View File

@ -267,7 +267,6 @@ static void wfx_free_common(void *data)
mutex_destroy(&wdev->rx_stats_lock);
mutex_destroy(&wdev->conf_mutex);
wfx_tx_queues_deinit(wdev);
ieee80211_free_hw(wdev->hw);
}

View File

@ -62,92 +62,79 @@ void wfx_tx_lock_flush(struct wfx_dev *wdev)
wfx_tx_flush(wdev);
}
/* If successful, LOCKS the TX queue! */
void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif)
{
int i;
bool done;
struct wfx_queue *queue;
struct sk_buff *item;
struct wfx_dev *wdev = wvif->wdev;
struct hif_msg *hif;
if (wvif->wdev->chip_frozen) {
wfx_tx_lock_flush(wdev);
wfx_tx_queues_clear(wdev);
return;
}
do {
done = true;
wfx_tx_lock_flush(wdev);
for (i = 0; i < IEEE80211_NUM_ACS && done; ++i) {
queue = &wdev->tx_queue[i];
spin_lock_bh(&queue->normal.lock);
skb_queue_walk(&queue->normal, item) {
hif = (struct hif_msg *)item->data;
if (hif->interface == wvif->id)
done = false;
}
spin_unlock_bh(&queue->normal.lock);
spin_lock_bh(&queue->cab.lock);
skb_queue_walk(&queue->cab, item) {
hif = (struct hif_msg *)item->data;
if (hif->interface == wvif->id)
done = false;
}
spin_unlock_bh(&queue->cab.lock);
}
if (!done) {
wfx_tx_unlock(wdev);
msleep(20);
}
} while (!done);
}
static void wfx_tx_queue_clear(struct wfx_dev *wdev, struct wfx_queue *queue,
struct sk_buff_head *gc_list)
{
struct sk_buff *item;
while ((item = skb_dequeue(&queue->normal)) != NULL)
skb_queue_head(gc_list, item);
while ((item = skb_dequeue(&queue->cab)) != NULL)
skb_queue_head(gc_list, item);
}
void wfx_tx_queues_clear(struct wfx_dev *wdev)
{
int i;
struct sk_buff *item;
struct sk_buff_head gc_list;
skb_queue_head_init(&gc_list);
for (i = 0; i < IEEE80211_NUM_ACS; ++i)
wfx_tx_queue_clear(wdev, &wdev->tx_queue[i], &gc_list);
wake_up(&wdev->tx_dequeue);
while ((item = skb_dequeue(&gc_list)) != NULL)
wfx_skb_dtor(wdev, item);
}
void wfx_tx_queues_init(struct wfx_dev *wdev)
{
int i;
memset(wdev->tx_queue, 0, sizeof(wdev->tx_queue));
skb_queue_head_init(&wdev->tx_pending);
init_waitqueue_head(&wdev->tx_dequeue);
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
skb_queue_head_init(&wdev->tx_queue[i].normal);
skb_queue_head_init(&wdev->tx_queue[i].cab);
}
}
void wfx_tx_queues_deinit(struct wfx_dev *wdev)
void wfx_tx_queues_check_empty(struct wfx_dev *wdev)
{
WARN_ON(!skb_queue_empty(&wdev->tx_pending));
wfx_tx_queues_clear(wdev);
int i;
WARN_ON(!skb_queue_empty_lockless(&wdev->tx_pending));
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
WARN_ON(atomic_read(&wdev->tx_queue[i].pending_frames));
WARN_ON(!skb_queue_empty_lockless(&wdev->tx_queue[i].normal));
WARN_ON(!skb_queue_empty_lockless(&wdev->tx_queue[i].cab));
}
}
static bool __wfx_tx_queue_empty(struct wfx_dev *wdev,
struct sk_buff_head *skb_queue, int vif_id)
{
struct hif_msg *hif_msg;
struct sk_buff *skb;
spin_lock_bh(&skb_queue->lock);
skb_queue_walk(skb_queue, skb) {
hif_msg = (struct hif_msg *)skb->data;
if (vif_id < 0 || hif_msg->interface == vif_id) {
spin_unlock_bh(&skb_queue->lock);
return false;
}
}
spin_unlock_bh(&skb_queue->lock);
return true;
}
bool wfx_tx_queue_empty(struct wfx_dev *wdev,
struct wfx_queue *queue, int vif_id)
{
return __wfx_tx_queue_empty(wdev, &queue->normal, vif_id) &&
__wfx_tx_queue_empty(wdev, &queue->cab, vif_id);
}
static void __wfx_tx_queue_drop(struct wfx_dev *wdev,
struct sk_buff_head *skb_queue, int vif_id,
struct sk_buff_head *dropped)
{
struct sk_buff *skb, *tmp;
struct hif_msg *hif_msg;
spin_lock_bh(&skb_queue->lock);
skb_queue_walk_safe(skb_queue, skb, tmp) {
hif_msg = (struct hif_msg *)skb->data;
if (vif_id < 0 || hif_msg->interface == vif_id) {
__skb_unlink(skb, skb_queue);
skb_queue_head(dropped, skb);
}
}
spin_unlock_bh(&skb_queue->lock);
}
void wfx_tx_queue_drop(struct wfx_dev *wdev, struct wfx_queue *queue,
int vif_id, struct sk_buff_head *dropped)
{
__wfx_tx_queue_drop(wdev, &queue->cab, vif_id, dropped);
__wfx_tx_queue_drop(wdev, &queue->normal, vif_id, dropped);
wake_up(&wdev->tx_dequeue);
}
void wfx_tx_queues_put(struct wfx_dev *wdev, struct sk_buff *skb)
@ -174,6 +161,22 @@ int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb)
return 0;
}
void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped)
{
struct wfx_queue *queue;
struct sk_buff *skb;
WARN(!wdev->chip_frozen, "%s should only be used to recover a frozen device",
__func__);
while ((skb = skb_dequeue(&wdev->tx_pending)) != NULL) {
queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
WARN_ON(skb_get_queue_mapping(skb) > 3);
WARN_ON(!atomic_read(&queue->pending_frames));
atomic_dec(&queue->pending_frames);
skb_queue_head(dropped, skb);
}
}
struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
{
struct wfx_queue *queue;
@ -249,17 +252,6 @@ bool wfx_tx_queues_has_cab(struct wfx_vif *wvif)
return false;
}
bool wfx_tx_queues_empty(struct wfx_dev *wdev)
{
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++)
if (!skb_queue_empty_lockless(&wdev->tx_queue[i].normal) ||
!skb_queue_empty_lockless(&wdev->tx_queue[i].cab))
return false;
return true;
}
static bool wfx_handle_tx_data(struct wfx_dev *wdev, struct sk_buff *skb)
{
struct hif_req_tx *req = wfx_skb_txreq(skb);
@ -364,7 +356,6 @@ struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
if (!skb)
return NULL;
skb_queue_tail(&wdev->tx_pending, skb);
if (wfx_tx_queues_empty(wdev))
wake_up(&wdev->tx_dequeue);
// FIXME: is it useful?
if (wfx_handle_tx_data(wdev, skb))

View File

@ -31,16 +31,18 @@ void wfx_tx_flush(struct wfx_dev *wdev);
void wfx_tx_lock_flush(struct wfx_dev *wdev);
void wfx_tx_queues_init(struct wfx_dev *wdev);
void wfx_tx_queues_deinit(struct wfx_dev *wdev);
void wfx_tx_queues_clear(struct wfx_dev *wdev);
bool wfx_tx_queues_empty(struct wfx_dev *wdev);
void wfx_tx_queues_check_empty(struct wfx_dev *wdev);
bool wfx_tx_queues_has_cab(struct wfx_vif *wvif);
void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif);
void wfx_tx_queues_put(struct wfx_dev *wdev, struct sk_buff *skb);
struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev);
bool wfx_tx_queue_empty(struct wfx_dev *wdev, struct wfx_queue *queue,
int vif_id);
void wfx_tx_queue_drop(struct wfx_dev *wdev, struct wfx_queue *queue,
int vif_id, struct sk_buff_head *dropped);
struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id);
void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped);
int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb);
unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
struct sk_buff *skb);

View File

@ -318,29 +318,6 @@ int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return 0;
}
static int __wfx_flush(struct wfx_dev *wdev, bool drop)
{
for (;;) {
if (drop)
wfx_tx_queues_clear(wdev);
if (wait_event_timeout(wdev->tx_dequeue,
wfx_tx_queues_empty(wdev),
2 * HZ) <= 0)
return -ETIMEDOUT;
wfx_tx_flush(wdev);
if (wfx_tx_queues_empty(wdev))
return 0;
dev_warn(wdev->dev, "frames queued while flushing tx queues");
}
}
void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
// FIXME: only flush requested vif and queues
__wfx_flush(hw->priv, drop);
}
/* WSM callbacks */
static void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi)
@ -843,10 +820,8 @@ static int wfx_update_tim(struct wfx_vif *wvif)
skb = ieee80211_beacon_get_tim(wvif->wdev->hw, wvif->vif,
&tim_offset, &tim_length);
if (!skb) {
__wfx_flush(wvif->wdev, true);
if (!skb)
return -ENOENT;
}
tim_ptr = skb->data + tim_offset;
if (tim_offset && tim_length >= 6) {
@ -1062,8 +1037,6 @@ void wfx_remove_interface(struct ieee80211_hw *hw,
}
wvif->state = WFX_STATE_PASSIVE;
wfx_tx_queues_wait_empty_vif(wvif);
wfx_tx_unlock(wdev);
/* FIXME: In add to reset MAC address, try to reset interface */
hif_set_macaddr(wvif, NULL);
@ -1097,10 +1070,5 @@ void wfx_stop(struct ieee80211_hw *hw)
{
struct wfx_dev *wdev = hw->priv;
wfx_tx_lock_flush(wdev);
mutex_lock(&wdev->conf_mutex);
wfx_tx_queues_clear(wdev);
mutex_unlock(&wdev->conf_mutex);
wfx_tx_unlock(wdev);
WARN(atomic_read(&wdev->tx_lock), "tx_lock is locked");
wfx_tx_queues_check_empty(wdev);
}

View File

@ -54,8 +54,6 @@ void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop);
int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u16 queue, const struct ieee80211_tx_queue_params *params);
void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,