Merge branch 'net-smc-next'
Ursula Braun says: ==================== net/smc: patches 2019-02-12 here are patches for SMC: * patches 1 and 3 optimize SMC-R tx logic * patch 2 is a cleanup without functional change * patch 4 optimizes rx logic * patches 5 and 6 improve robustness in link group and IB event handling * patch 7 establishes Karsten Graul as another SMC maintainer ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
3f9b2d2864
|
@ -13760,6 +13760,7 @@ F: drivers/misc/sgi-xp/
|
|||
|
||||
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
|
||||
M: Ursula Braun <ubraun@linux.ibm.com>
|
||||
M: Karsten Graul <kgraul@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
S: Supported
|
||||
|
|
|
@ -105,8 +105,10 @@ int smc_cdc_msg_send(struct smc_connection *conn,
|
|||
&conn->local_tx_ctrl, conn);
|
||||
smc_curs_copy(&cfed, &((struct smc_host_cdc_msg *)wr_buf)->cons, conn);
|
||||
rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
|
||||
if (!rc)
|
||||
if (!rc) {
|
||||
smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
|
||||
conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -194,6 +196,7 @@ int smcd_cdc_msg_send(struct smc_connection *conn)
|
|||
if (rc)
|
||||
return rc;
|
||||
smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn);
|
||||
conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
|
||||
/* Calculate transmitted data and increment free send buffer space */
|
||||
diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
|
||||
&conn->tx_curs_sent);
|
||||
|
@ -270,26 +273,18 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
|
|||
smp_mb__after_atomic();
|
||||
smc->sk.sk_data_ready(&smc->sk);
|
||||
} else {
|
||||
if (conn->local_rx_ctrl.prod_flags.write_blocked ||
|
||||
conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
|
||||
conn->local_rx_ctrl.prod_flags.urg_data_pending) {
|
||||
if (conn->local_rx_ctrl.prod_flags.urg_data_pending)
|
||||
conn->urg_state = SMC_URG_NOTYET;
|
||||
/* force immediate tx of current consumer cursor, but
|
||||
* under send_lock to guarantee arrival in seqno-order
|
||||
*/
|
||||
if (smc->sk.sk_state != SMC_INIT)
|
||||
smc_tx_sndbuf_nonempty(conn);
|
||||
}
|
||||
if (conn->local_rx_ctrl.prod_flags.write_blocked)
|
||||
smc->sk.sk_data_ready(&smc->sk);
|
||||
if (conn->local_rx_ctrl.prod_flags.urg_data_pending)
|
||||
conn->urg_state = SMC_URG_NOTYET;
|
||||
}
|
||||
|
||||
/* piggy backed tx info */
|
||||
/* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
|
||||
if (diff_cons && smc_tx_prepared_sends(conn)) {
|
||||
if ((diff_cons && smc_tx_prepared_sends(conn)) ||
|
||||
conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
|
||||
conn->local_rx_ctrl.prod_flags.urg_data_pending)
|
||||
smc_tx_sndbuf_nonempty(conn);
|
||||
/* trigger socket release if connection closed */
|
||||
smc_close_wake_tx_prepared(smc);
|
||||
}
|
||||
|
||||
if (diff_cons && conn->urg_tx_pend &&
|
||||
atomic_read(&conn->peer_rmbe_space) == conn->peer_rmbe_size) {
|
||||
/* urg data confirmed by peer, indicate we're ready for more */
|
||||
|
|
|
@ -160,8 +160,6 @@ static void smc_lgr_free_work(struct work_struct *work)
|
|||
bool conns;
|
||||
|
||||
spin_lock_bh(&smc_lgr_list.lock);
|
||||
if (list_empty(&lgr->list))
|
||||
goto free;
|
||||
read_lock_bh(&lgr->conns_lock);
|
||||
conns = RB_EMPTY_ROOT(&lgr->conns_all);
|
||||
read_unlock_bh(&lgr->conns_lock);
|
||||
|
@ -169,8 +167,8 @@ static void smc_lgr_free_work(struct work_struct *work)
|
|||
spin_unlock_bh(&smc_lgr_list.lock);
|
||||
return;
|
||||
}
|
||||
list_del_init(&lgr->list); /* remove from smc_lgr_list */
|
||||
free:
|
||||
if (!list_empty(&lgr->list))
|
||||
list_del_init(&lgr->list); /* remove from smc_lgr_list */
|
||||
spin_unlock_bh(&smc_lgr_list.lock);
|
||||
|
||||
if (!lgr->is_smcd && !lgr->terminating) {
|
||||
|
|
|
@ -257,13 +257,21 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
|
|||
smcibdev = container_of(handler, struct smc_ib_device, event_handler);
|
||||
|
||||
switch (ibevent->event) {
|
||||
case IB_EVENT_PORT_ERR:
|
||||
case IB_EVENT_DEVICE_FATAL:
|
||||
case IB_EVENT_PORT_ACTIVE:
|
||||
port_idx = ibevent->element.port_num - 1;
|
||||
set_bit(port_idx, &smcibdev->port_event_mask);
|
||||
/* terminate all ports on device */
|
||||
for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++)
|
||||
set_bit(port_idx, &smcibdev->port_event_mask);
|
||||
schedule_work(&smcibdev->port_event_work);
|
||||
break;
|
||||
case IB_EVENT_PORT_ERR:
|
||||
case IB_EVENT_PORT_ACTIVE:
|
||||
case IB_EVENT_GID_CHANGE:
|
||||
port_idx = ibevent->element.port_num - 1;
|
||||
if (port_idx < SMC_MAX_PORTS) {
|
||||
set_bit(port_idx, &smcibdev->port_event_mask);
|
||||
schedule_work(&smcibdev->port_event_work);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -294,13 +302,13 @@ static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
|
|||
u8 port_idx;
|
||||
|
||||
switch (ibevent->event) {
|
||||
case IB_EVENT_DEVICE_FATAL:
|
||||
case IB_EVENT_GID_CHANGE:
|
||||
case IB_EVENT_PORT_ERR:
|
||||
case IB_EVENT_QP_FATAL:
|
||||
case IB_EVENT_QP_ACCESS_ERR:
|
||||
port_idx = ibevent->element.qp->port - 1;
|
||||
set_bit(port_idx, &smcibdev->port_event_mask);
|
||||
schedule_work(&smcibdev->port_event_work);
|
||||
if (port_idx < SMC_MAX_PORTS) {
|
||||
set_bit(port_idx, &smcibdev->port_event_mask);
|
||||
schedule_work(&smcibdev->port_event_work);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -24,10 +24,11 @@
|
|||
#include "smc.h"
|
||||
#include "smc_wr.h"
|
||||
#include "smc_cdc.h"
|
||||
#include "smc_close.h"
|
||||
#include "smc_ism.h"
|
||||
#include "smc_tx.h"
|
||||
|
||||
#define SMC_TX_WORK_DELAY HZ
|
||||
#define SMC_TX_WORK_DELAY 0
|
||||
#define SMC_TX_CORK_DELAY (HZ >> 2) /* 250 ms */
|
||||
|
||||
/***************************** sndbuf producer *******************************/
|
||||
|
@ -554,6 +555,12 @@ int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
|
|||
else
|
||||
rc = smcr_tx_sndbuf_nonempty(conn);
|
||||
|
||||
if (!rc) {
|
||||
/* trigger socket release if connection is closing */
|
||||
struct smc_sock *smc = container_of(conn, struct smc_sock,
|
||||
conn);
|
||||
smc_close_wake_tx_prepared(smc);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -610,9 +617,6 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
|
|||
SMC_TX_WORK_DELAY);
|
||||
return;
|
||||
}
|
||||
smc_curs_copy(&conn->rx_curs_confirmed,
|
||||
&conn->local_tx_ctrl.cons, conn);
|
||||
conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
|
||||
}
|
||||
if (conn->local_rx_ctrl.prod_flags.write_blocked &&
|
||||
!atomic_read(&conn->bytes_to_rcv))
|
||||
|
|
Loading…
Reference in New Issue