hv_netvsc: Use the xmit_more skb flag to optimize signaling the host
Based on the information given to this driver (via the xmit_more skb flag), we can defer signaling the host if more packets are on the way. This will help make the host more efficient since it can potentially process a larger batch of packets. Implement this optimization. Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
9eea922264
commit
82fa3c776e
|
@ -743,6 +743,7 @@ static inline int netvsc_send_pkt(
|
||||||
u64 req_id;
|
u64 req_id;
|
||||||
int ret;
|
int ret;
|
||||||
struct hv_page_buffer *pgbuf;
|
struct hv_page_buffer *pgbuf;
|
||||||
|
u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
|
||||||
|
|
||||||
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
|
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
|
||||||
if (packet->is_data_pkt) {
|
if (packet->is_data_pkt) {
|
||||||
|
@ -769,32 +770,42 @@ static inline int netvsc_send_pkt(
|
||||||
if (out_channel->rescind)
|
if (out_channel->rescind)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It is possible that once we successfully place this packet
|
||||||
|
* on the ringbuffer, we may stop the queue. In that case, we want
|
||||||
|
* to notify the host independent of the xmit_more flag. We don't
|
||||||
|
* need to be precise here; in the worst case we may signal the host
|
||||||
|
* unnecessarily.
|
||||||
|
*/
|
||||||
|
if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
|
||||||
|
packet->xmit_more = false;
|
||||||
|
|
||||||
if (packet->page_buf_cnt) {
|
if (packet->page_buf_cnt) {
|
||||||
pgbuf = packet->cp_partial ? packet->page_buf +
|
pgbuf = packet->cp_partial ? packet->page_buf +
|
||||||
packet->rmsg_pgcnt : packet->page_buf;
|
packet->rmsg_pgcnt : packet->page_buf;
|
||||||
ret = vmbus_sendpacket_pagebuffer(out_channel,
|
ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
|
||||||
pgbuf,
|
pgbuf,
|
||||||
packet->page_buf_cnt,
|
packet->page_buf_cnt,
|
||||||
&nvmsg,
|
&nvmsg,
|
||||||
sizeof(struct nvsp_message),
|
sizeof(struct nvsp_message),
|
||||||
req_id);
|
req_id,
|
||||||
|
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
|
||||||
|
!packet->xmit_more);
|
||||||
} else {
|
} else {
|
||||||
ret = vmbus_sendpacket(
|
ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
|
||||||
out_channel, &nvmsg,
|
sizeof(struct nvsp_message),
|
||||||
sizeof(struct nvsp_message),
|
req_id,
|
||||||
req_id,
|
VM_PKT_DATA_INBAND,
|
||||||
VM_PKT_DATA_INBAND,
|
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
|
||||||
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
!packet->xmit_more);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
atomic_inc(&net_device->num_outstanding_sends);
|
atomic_inc(&net_device->num_outstanding_sends);
|
||||||
atomic_inc(&net_device->queue_sends[q_idx]);
|
atomic_inc(&net_device->queue_sends[q_idx]);
|
||||||
|
|
||||||
if (hv_ringbuf_avail_percent(&out_channel->outbound) <
|
if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
|
||||||
RING_AVAIL_PERCENT_LOWATER) {
|
netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
|
||||||
netif_tx_stop_queue(netdev_get_tx_queue(
|
|
||||||
ndev, q_idx));
|
|
||||||
|
|
||||||
if (atomic_read(&net_device->
|
if (atomic_read(&net_device->
|
||||||
queue_sends[q_idx]) < 1)
|
queue_sends[q_idx]) < 1)
|
||||||
|
|
Loading…
Reference in New Issue