net/smc: introduce a delay
The number of outstanding work requests is limited. If all work requests are in use, tx processing is postponed to another scheduling of the tx worker. Switch to a delayed worker to have a gap for tx completion queue events before the next retry. Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
bfbedfd383
commit
18e537cd58
|
@ -149,7 +149,7 @@ struct smc_connection {
|
||||||
atomic_t sndbuf_space; /* remaining space in sndbuf */
|
atomic_t sndbuf_space; /* remaining space in sndbuf */
|
||||||
u16 tx_cdc_seq; /* sequence # for CDC send */
|
u16 tx_cdc_seq; /* sequence # for CDC send */
|
||||||
spinlock_t send_lock; /* protect wr_sends */
|
spinlock_t send_lock; /* protect wr_sends */
|
||||||
struct work_struct tx_work; /* retry of smc_cdc_msg_send */
|
struct delayed_work tx_work; /* retry of smc_cdc_msg_send */
|
||||||
|
|
||||||
struct smc_host_cdc_msg local_rx_ctrl; /* filled during event_handl.
|
struct smc_host_cdc_msg local_rx_ctrl; /* filled during event_handl.
|
||||||
* .prod cf. TCP rcv_nxt
|
* .prod cf. TCP rcv_nxt
|
||||||
|
|
|
@ -208,7 +208,7 @@ again:
|
||||||
case SMC_ACTIVE:
|
case SMC_ACTIVE:
|
||||||
smc_close_stream_wait(smc, timeout);
|
smc_close_stream_wait(smc, timeout);
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
cancel_work_sync(&conn->tx_work);
|
cancel_delayed_work_sync(&conn->tx_work);
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
if (sk->sk_state == SMC_ACTIVE) {
|
if (sk->sk_state == SMC_ACTIVE) {
|
||||||
/* send close request */
|
/* send close request */
|
||||||
|
@ -234,7 +234,7 @@ again:
|
||||||
if (!smc_cdc_rxed_any_close(conn))
|
if (!smc_cdc_rxed_any_close(conn))
|
||||||
smc_close_stream_wait(smc, timeout);
|
smc_close_stream_wait(smc, timeout);
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
cancel_work_sync(&conn->tx_work);
|
cancel_delayed_work_sync(&conn->tx_work);
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
if (sk->sk_err != ECONNABORTED) {
|
if (sk->sk_err != ECONNABORTED) {
|
||||||
/* confirm close from peer */
|
/* confirm close from peer */
|
||||||
|
@ -263,7 +263,9 @@ again:
|
||||||
/* peer sending PeerConnectionClosed will cause transition */
|
/* peer sending PeerConnectionClosed will cause transition */
|
||||||
break;
|
break;
|
||||||
case SMC_PROCESSABORT:
|
case SMC_PROCESSABORT:
|
||||||
cancel_work_sync(&conn->tx_work);
|
release_sock(sk);
|
||||||
|
cancel_delayed_work_sync(&conn->tx_work);
|
||||||
|
lock_sock(sk);
|
||||||
smc_close_abort(conn);
|
smc_close_abort(conn);
|
||||||
sk->sk_state = SMC_CLOSED;
|
sk->sk_state = SMC_CLOSED;
|
||||||
smc_close_wait_tx_pends(smc);
|
smc_close_wait_tx_pends(smc);
|
||||||
|
@ -425,7 +427,7 @@ again:
|
||||||
case SMC_ACTIVE:
|
case SMC_ACTIVE:
|
||||||
smc_close_stream_wait(smc, timeout);
|
smc_close_stream_wait(smc, timeout);
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
cancel_work_sync(&conn->tx_work);
|
cancel_delayed_work_sync(&conn->tx_work);
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
/* send close wr request */
|
/* send close wr request */
|
||||||
rc = smc_close_wr(conn);
|
rc = smc_close_wr(conn);
|
||||||
|
@ -439,7 +441,7 @@ again:
|
||||||
if (!smc_cdc_rxed_any_close(conn))
|
if (!smc_cdc_rxed_any_close(conn))
|
||||||
smc_close_stream_wait(smc, timeout);
|
smc_close_stream_wait(smc, timeout);
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
cancel_work_sync(&conn->tx_work);
|
cancel_delayed_work_sync(&conn->tx_work);
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
/* confirm close from peer */
|
/* confirm close from peer */
|
||||||
rc = smc_close_wr(conn);
|
rc = smc_close_wr(conn);
|
||||||
|
|
|
@ -24,6 +24,8 @@
|
||||||
#include "smc_cdc.h"
|
#include "smc_cdc.h"
|
||||||
#include "smc_tx.h"
|
#include "smc_tx.h"
|
||||||
|
|
||||||
|
#define SMC_TX_WORK_DELAY HZ
|
||||||
|
|
||||||
/***************************** sndbuf producer *******************************/
|
/***************************** sndbuf producer *******************************/
|
||||||
|
|
||||||
/* callback implementation for sk.sk_write_space()
|
/* callback implementation for sk.sk_write_space()
|
||||||
|
@ -406,7 +408,8 @@ int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
rc = 0;
|
rc = 0;
|
||||||
schedule_work(&conn->tx_work);
|
schedule_delayed_work(&conn->tx_work,
|
||||||
|
SMC_TX_WORK_DELAY);
|
||||||
}
|
}
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
@ -430,7 +433,7 @@ out_unlock:
|
||||||
*/
|
*/
|
||||||
static void smc_tx_work(struct work_struct *work)
|
static void smc_tx_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct smc_connection *conn = container_of(work,
|
struct smc_connection *conn = container_of(to_delayed_work(work),
|
||||||
struct smc_connection,
|
struct smc_connection,
|
||||||
tx_work);
|
tx_work);
|
||||||
struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
|
struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
|
||||||
|
@ -468,7 +471,8 @@ void smc_tx_consumer_update(struct smc_connection *conn)
|
||||||
if (!rc)
|
if (!rc)
|
||||||
rc = smc_cdc_msg_send(conn, wr_buf, pend);
|
rc = smc_cdc_msg_send(conn, wr_buf, pend);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
schedule_work(&conn->tx_work);
|
schedule_delayed_work(&conn->tx_work,
|
||||||
|
SMC_TX_WORK_DELAY);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
smc_curs_write(&conn->rx_curs_confirmed,
|
smc_curs_write(&conn->rx_curs_confirmed,
|
||||||
|
@ -487,6 +491,6 @@ void smc_tx_consumer_update(struct smc_connection *conn)
|
||||||
void smc_tx_init(struct smc_sock *smc)
|
void smc_tx_init(struct smc_sock *smc)
|
||||||
{
|
{
|
||||||
smc->sk.sk_write_space = smc_tx_write_space;
|
smc->sk.sk_write_space = smc_tx_write_space;
|
||||||
INIT_WORK(&smc->conn.tx_work, smc_tx_work);
|
INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
|
||||||
spin_lock_init(&smc->conn.send_lock);
|
spin_lock_init(&smc->conn.send_lock);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue