ath9k: fix queue stop/start based on the number of pending frames
Because there is a limited number of tx buffers available, once the queue has been filled to a certain point, ath9k needs to stop accepting new frames from mac80211. In order to prevent a full WMM queue from stopping another queue with fewer frames, this patch limits the number of queued frames to a quarter of the total available tx buffers, minus some reserved frames to be used for other purposes (e.g. beacons). Because tx buffers are reserved for frames when they're staged in software queues as well, the actual queue depth cannot be used for this, so this patch stores a reference to the tx queue in the ath_buf struct and keeps track of the total number of pending frames. Signed-off-by: Felix Fietkau <nbd@openwrt.org> Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
parent
764580f577
commit
84642d6bdd
|
@ -136,6 +136,8 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
|
|||
#define ATH_MAX_ANTENNA 3
|
||||
#define ATH_RXBUF 512
|
||||
#define ATH_TXBUF 512
|
||||
#define ATH_TXBUF_RESERVE 5
|
||||
#define ATH_MAX_QDEPTH (ATH_TXBUF / 4 - ATH_TXBUF_RESERVE)
|
||||
#define ATH_TXMAXTRY 13
|
||||
#define ATH_MGT_TXMAXTRY 4
|
||||
|
||||
|
@ -204,6 +206,7 @@ struct ath_txq {
|
|||
struct list_head txq_fifo_pending;
|
||||
u8 txq_headidx;
|
||||
u8 txq_tailidx;
|
||||
int pending_frames;
|
||||
};
|
||||
|
||||
struct ath_atx_ac {
|
||||
|
@ -241,6 +244,7 @@ struct ath_buf {
|
|||
struct ath_buf_state bf_state;
|
||||
dma_addr_t bf_dmacontext;
|
||||
struct ath_wiphy *aphy;
|
||||
struct ath_txq *txq;
|
||||
};
|
||||
|
||||
struct ath_atx_tid {
|
||||
|
@ -330,7 +334,6 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
|
|||
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
|
||||
int ath_tx_init(struct ath_softc *sc, int nbufs);
|
||||
void ath_tx_cleanup(struct ath_softc *sc);
|
||||
struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb);
|
||||
int ath_txq_update(struct ath_softc *sc, int qnum,
|
||||
struct ath9k_tx_queue_info *q);
|
||||
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
|
||||
|
|
|
@ -1025,6 +1025,7 @@ static int ath9k_tx(struct ieee80211_hw *hw,
|
|||
struct ath_tx_control txctl;
|
||||
int padpos, padsize;
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
|
||||
int qnum;
|
||||
|
||||
if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
|
||||
ath_print(common, ATH_DBG_XMIT,
|
||||
|
@ -1097,11 +1098,8 @@ static int ath9k_tx(struct ieee80211_hw *hw,
|
|||
memmove(skb->data, skb->data + padsize, padpos);
|
||||
}
|
||||
|
||||
/* Check if a tx queue is available */
|
||||
|
||||
txctl.txq = ath_test_get_txq(sc, skb);
|
||||
if (!txctl.txq)
|
||||
goto exit;
|
||||
qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
|
||||
txctl.txq = &sc->tx.txq[qnum];
|
||||
|
||||
ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
|
||||
|
||||
|
|
|
@ -984,32 +984,6 @@ int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
|
|||
return qnum;
|
||||
}
|
||||
|
||||
struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
|
||||
{
|
||||
struct ath_txq *txq = NULL;
|
||||
u16 skb_queue = skb_get_queue_mapping(skb);
|
||||
int qnum;
|
||||
|
||||
qnum = ath_get_hal_qnum(skb_queue, sc);
|
||||
txq = &sc->tx.txq[qnum];
|
||||
|
||||
spin_lock_bh(&txq->axq_lock);
|
||||
|
||||
if (txq->axq_depth >= (ATH_TXBUF - 20)) {
|
||||
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT,
|
||||
"TX queue: %d is full, depth: %d\n",
|
||||
qnum, txq->axq_depth);
|
||||
ath_mac80211_stop_queue(sc, skb_queue);
|
||||
txq->stopped = 1;
|
||||
spin_unlock_bh(&txq->axq_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&txq->axq_lock);
|
||||
|
||||
return txq;
|
||||
}
|
||||
|
||||
int ath_txq_update(struct ath_softc *sc, int qnum,
|
||||
struct ath9k_tx_queue_info *qinfo)
|
||||
{
|
||||
|
@ -1809,6 +1783,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
|
|||
struct ath_wiphy *aphy = hw->priv;
|
||||
struct ath_softc *sc = aphy->sc;
|
||||
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
|
||||
struct ath_txq *txq = txctl->txq;
|
||||
struct ath_buf *bf;
|
||||
int r;
|
||||
|
||||
|
@ -1818,10 +1793,16 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
|
|||
return -1;
|
||||
}
|
||||
|
||||
bf->txq = txctl->txq;
|
||||
spin_lock_bh(&bf->txq->axq_lock);
|
||||
if (++bf->txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
|
||||
ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
|
||||
txq->stopped = 1;
|
||||
}
|
||||
spin_unlock_bh(&bf->txq->axq_lock);
|
||||
|
||||
r = ath_tx_setup_buffer(hw, bf, skb, txctl);
|
||||
if (unlikely(r)) {
|
||||
struct ath_txq *txq = txctl->txq;
|
||||
|
||||
ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
|
||||
|
||||
/* upon ath_tx_processq() this TX queue will be resumed, we
|
||||
|
@ -1829,7 +1810,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
|
|||
* we will at least have to run TX completionon one buffer
|
||||
* on the queue */
|
||||
spin_lock_bh(&txq->axq_lock);
|
||||
if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) {
|
||||
if (!txq->stopped && txq->axq_depth > 1) {
|
||||
ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
|
||||
txq->stopped = 1;
|
||||
}
|
||||
|
@ -1970,6 +1951,13 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
|
|||
tx_flags |= ATH_TX_XRETRY;
|
||||
}
|
||||
|
||||
if (bf->txq) {
|
||||
spin_lock_bh(&bf->txq->axq_lock);
|
||||
bf->txq->pending_frames--;
|
||||
spin_unlock_bh(&bf->txq->axq_lock);
|
||||
bf->txq = NULL;
|
||||
}
|
||||
|
||||
dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
|
||||
ath_tx_complete(sc, skb, bf->aphy, tx_flags);
|
||||
ath_debug_stat_tx(sc, txq, bf, ts);
|
||||
|
@ -2058,8 +2046,7 @@ static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
|
|||
int qnum;
|
||||
|
||||
spin_lock_bh(&txq->axq_lock);
|
||||
if (txq->stopped &&
|
||||
sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) {
|
||||
if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
|
||||
qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
|
||||
if (qnum != -1) {
|
||||
ath_mac80211_start_queue(sc, qnum);
|
||||
|
|
Loading…
Reference in New Issue