skb: report completion status for zero copy skbs
Even if skb is marked for zero copy, net core might still decide to copy it later which is somewhat slower than a copy in user context: besides copying the data we need to pin/unpin the pages. Add a parameter reporting such cases through zero copy callback: if this happens a lot, device can take this into account and switch to copying in user context. This patch updates all users but ignores the passed value for now: it will be used by follow-up patches. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
202975b4c5
commit
e19d6763cc
|
@ -1600,7 +1600,7 @@ void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
|
|||
kfree(ubufs);
|
||||
}
|
||||
|
||||
void vhost_zerocopy_callback(struct ubuf_info *ubuf)
|
||||
void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool status)
|
||||
{
|
||||
struct vhost_ubuf_ref *ubufs = ubuf->ctx;
|
||||
struct vhost_virtqueue *vq = ubufs->vq;
|
||||
|
|
|
@ -191,7 +191,7 @@ bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
|
|||
|
||||
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
|
||||
unsigned int log_num, u64 len);
|
||||
void vhost_zerocopy_callback(struct ubuf_info *);
|
||||
void vhost_zerocopy_callback(struct ubuf_info *, bool);
|
||||
int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq);
|
||||
|
||||
#define vq_err(vq, fmt, ...) do { \
|
||||
|
|
|
@ -235,11 +235,13 @@ enum {
|
|||
/*
|
||||
* The callback notifies userspace to release buffers when skb DMA is done in
|
||||
* lower device, the skb last reference should be 0 when calling this.
|
||||
* The zerocopy_success argument is true if zero copy transmit occurred,
|
||||
* false on data copy or out of memory error caused by data copy attempt.
|
||||
* The ctx field is used to track device context.
|
||||
* The desc field is used to track userspace buffer index.
|
||||
*/
|
||||
struct ubuf_info {
|
||||
void (*callback)(struct ubuf_info *);
|
||||
void (*callback)(struct ubuf_info *, bool zerocopy_success);
|
||||
void *ctx;
|
||||
unsigned long desc;
|
||||
};
|
||||
|
|
|
@ -519,7 +519,7 @@ static void skb_release_data(struct sk_buff *skb)
|
|||
|
||||
uarg = skb_shinfo(skb)->destructor_arg;
|
||||
if (uarg->callback)
|
||||
uarg->callback(uarg);
|
||||
uarg->callback(uarg, true);
|
||||
}
|
||||
|
||||
if (skb_has_frag_list(skb))
|
||||
|
@ -797,7 +797,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
|
|||
for (i = 0; i < num_frags; i++)
|
||||
skb_frag_unref(skb, i);
|
||||
|
||||
uarg->callback(uarg);
|
||||
uarg->callback(uarg, false);
|
||||
|
||||
/* skb frags point to kernel buffers */
|
||||
for (i = num_frags - 1; i >= 0; i--) {
|
||||
|
|
Loading…
Reference in New Issue