virtio_ring: introduce dma sync api for virtqueue

These API has been introduced:

* virtqueue_dma_need_sync
* virtqueue_dma_sync_single_range_for_cpu
* virtqueue_dma_sync_single_range_for_device

These APIs can be used together with the premapped mechanism to sync the
DMA address.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Message-Id: <20230810123057.43407-12-xuanzhuo@linux.alibaba.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Xuan Zhuo 2023-08-10 20:30:56 +08:00 committed by Michael S. Tsirkin
parent b6253b4e21
commit 8bd2f71054
2 changed files with 84 additions and 0 deletions

View File

@ -3175,4 +3175,80 @@ int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr)
} }
EXPORT_SYMBOL_GPL(virtqueue_dma_mapping_error); EXPORT_SYMBOL_GPL(virtqueue_dma_mapping_error);
/**
* virtqueue_dma_need_sync - check a dma address needs sync
* @_vq: the struct virtqueue we're talking about.
* @addr: DMA address
*
* Check if the dma address mapped by the virtqueue_dma_map_* APIs needs to be
* synchronized
*
* return bool
*/
bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr)
{
struct vring_virtqueue *vq = to_vvq(_vq);
if (!vq->use_dma_api)
return false;
return dma_need_sync(vring_dma_dev(vq), addr);
}
EXPORT_SYMBOL_GPL(virtqueue_dma_need_sync);
/**
* virtqueue_dma_sync_single_range_for_cpu - dma sync for cpu
* @_vq: the struct virtqueue we're talking about.
* @addr: DMA address
* @offset: DMA address offset
* @size: buf size for sync
* @dir: DMA direction
*
* Before calling this function, use virtqueue_dma_need_sync() to confirm that
* the DMA address really needs to be synchronized
*
*/
void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq,
dma_addr_t addr,
unsigned long offset, size_t size,
enum dma_data_direction dir)
{
struct vring_virtqueue *vq = to_vvq(_vq);
struct device *dev = vring_dma_dev(vq);
if (!vq->use_dma_api)
return;
dma_sync_single_range_for_cpu(dev, addr, offset, size,
DMA_BIDIRECTIONAL);
}
EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_cpu);
/**
* virtqueue_dma_sync_single_range_for_device - dma sync for device
* @_vq: the struct virtqueue we're talking about.
* @addr: DMA address
* @offset: DMA address offset
* @size: buf size for sync
* @dir: DMA direction
*
* Before calling this function, use virtqueue_dma_need_sync() to confirm that
* the DMA address really needs to be synchronized
*/
void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq,
dma_addr_t addr,
unsigned long offset, size_t size,
enum dma_data_direction dir)
{
struct vring_virtqueue *vq = to_vvq(_vq);
struct device *dev = vring_dma_dev(vq);
if (!vq->use_dma_api)
return;
dma_sync_single_range_for_device(dev, addr, offset, size,
DMA_BIDIRECTIONAL);
}
EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_device);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");

View File

@ -220,4 +220,12 @@ void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
size_t size, enum dma_data_direction dir, size_t size, enum dma_data_direction dir,
unsigned long attrs); unsigned long attrs);
int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr); int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr);
bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr);
void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t addr,
unsigned long offset, size_t size,
enum dma_data_direction dir);
void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr,
unsigned long offset, size_t size,
enum dma_data_direction dir);
#endif /* _LINUX_VIRTIO_H */ #endif /* _LINUX_VIRTIO_H */