virtio_ring: introduce dma sync api for virtqueue
These API has been introduced: * virtqueue_dma_need_sync * virtqueue_dma_sync_single_range_for_cpu * virtqueue_dma_sync_single_range_for_device These APIs can be used together with the premapped mechanism to sync the DMA address. Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> Message-Id: <20230810123057.43407-12-xuanzhuo@linux.alibaba.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
b6253b4e21
commit
8bd2f71054
|
@ -3175,4 +3175,80 @@ int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(virtqueue_dma_mapping_error);
|
EXPORT_SYMBOL_GPL(virtqueue_dma_mapping_error);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* virtqueue_dma_need_sync - check a dma address needs sync
|
||||||
|
* @_vq: the struct virtqueue we're talking about.
|
||||||
|
* @addr: DMA address
|
||||||
|
*
|
||||||
|
* Check if the dma address mapped by the virtqueue_dma_map_* APIs needs to be
|
||||||
|
* synchronized
|
||||||
|
*
|
||||||
|
* return bool
|
||||||
|
*/
|
||||||
|
bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr)
|
||||||
|
{
|
||||||
|
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||||
|
|
||||||
|
if (!vq->use_dma_api)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return dma_need_sync(vring_dma_dev(vq), addr);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(virtqueue_dma_need_sync);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* virtqueue_dma_sync_single_range_for_cpu - dma sync for cpu
|
||||||
|
* @_vq: the struct virtqueue we're talking about.
|
||||||
|
* @addr: DMA address
|
||||||
|
* @offset: DMA address offset
|
||||||
|
* @size: buf size for sync
|
||||||
|
* @dir: DMA direction
|
||||||
|
*
|
||||||
|
* Before calling this function, use virtqueue_dma_need_sync() to confirm that
|
||||||
|
* the DMA address really needs to be synchronized
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq,
|
||||||
|
dma_addr_t addr,
|
||||||
|
unsigned long offset, size_t size,
|
||||||
|
enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||||
|
struct device *dev = vring_dma_dev(vq);
|
||||||
|
|
||||||
|
if (!vq->use_dma_api)
|
||||||
|
return;
|
||||||
|
|
||||||
|
dma_sync_single_range_for_cpu(dev, addr, offset, size,
|
||||||
|
DMA_BIDIRECTIONAL);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_cpu);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* virtqueue_dma_sync_single_range_for_device - dma sync for device
|
||||||
|
* @_vq: the struct virtqueue we're talking about.
|
||||||
|
* @addr: DMA address
|
||||||
|
* @offset: DMA address offset
|
||||||
|
* @size: buf size for sync
|
||||||
|
* @dir: DMA direction
|
||||||
|
*
|
||||||
|
* Before calling this function, use virtqueue_dma_need_sync() to confirm that
|
||||||
|
* the DMA address really needs to be synchronized
|
||||||
|
*/
|
||||||
|
void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq,
|
||||||
|
dma_addr_t addr,
|
||||||
|
unsigned long offset, size_t size,
|
||||||
|
enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||||
|
struct device *dev = vring_dma_dev(vq);
|
||||||
|
|
||||||
|
if (!vq->use_dma_api)
|
||||||
|
return;
|
||||||
|
|
||||||
|
dma_sync_single_range_for_device(dev, addr, offset, size,
|
||||||
|
DMA_BIDIRECTIONAL);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_device);
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
|
@ -220,4 +220,12 @@ void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
|
||||||
size_t size, enum dma_data_direction dir,
|
size_t size, enum dma_data_direction dir,
|
||||||
unsigned long attrs);
|
unsigned long attrs);
|
||||||
int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr);
|
int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr);
|
||||||
|
|
||||||
|
bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr);
|
||||||
|
void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t addr,
|
||||||
|
unsigned long offset, size_t size,
|
||||||
|
enum dma_data_direction dir);
|
||||||
|
void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr,
|
||||||
|
unsigned long offset, size_t size,
|
||||||
|
enum dma_data_direction dir);
|
||||||
#endif /* _LINUX_VIRTIO_H */
|
#endif /* _LINUX_VIRTIO_H */
|
||||||
|
|
Loading…
Reference in New Issue