vfio: Refactor dma APIs for emulated devices

To use group helpers instead of opening group related code in the
API. This prepares moving group specific code out of vfio_main.c.

Link: https://lore.kernel.org/r/20221201145535.589687-10-yi.l.liu@intel.com
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Alex Williamson <alex.williamson@redhat.com>
Tested-by: Lixiao Yang <lixiao.yang@intel.com>
Tested-by: Yu He <yu.he@intel.com>
Signed-off-by: Yi Liu <yi.l.liu@intel.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Yi Liu 2022-11-10 18:57:01 -08:00 committed by Jason Gunthorpe
parent 1334e47ee7
commit 8da7a0e79f
3 changed files with 43 additions and 34 deletions

View File

@ -540,10 +540,12 @@ void vfio_group_unuse_container(struct vfio_group *group)
fput(group->opened_file); fput(group->opened_file);
} }
int vfio_container_pin_pages(struct vfio_container *container, int vfio_device_container_pin_pages(struct vfio_device *device,
struct iommu_group *iommu_group, dma_addr_t iova, dma_addr_t iova, int npage,
int npage, int prot, struct page **pages) int prot, struct page **pages)
{ {
struct vfio_container *container = device->group->container;
struct iommu_group *iommu_group = device->group->iommu_group;
struct vfio_iommu_driver *driver = container->iommu_driver; struct vfio_iommu_driver *driver = container->iommu_driver;
if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
@ -555,9 +557,11 @@ int vfio_container_pin_pages(struct vfio_container *container,
npage, prot, pages); npage, prot, pages);
} }
void vfio_container_unpin_pages(struct vfio_container *container, void vfio_device_container_unpin_pages(struct vfio_device *device,
dma_addr_t iova, int npage) dma_addr_t iova, int npage)
{ {
struct vfio_container *container = device->group->container;
if (WARN_ON(npage <= 0 || npage > VFIO_PIN_PAGES_MAX_ENTRIES)) if (WARN_ON(npage <= 0 || npage > VFIO_PIN_PAGES_MAX_ENTRIES))
return; return;
@ -565,9 +569,11 @@ void vfio_container_unpin_pages(struct vfio_container *container,
npage); npage);
} }
int vfio_container_dma_rw(struct vfio_container *container, dma_addr_t iova, int vfio_device_container_dma_rw(struct vfio_device *device,
void *data, size_t len, bool write) dma_addr_t iova, void *data,
size_t len, bool write)
{ {
struct vfio_container *container = device->group->container;
struct vfio_iommu_driver *driver = container->iommu_driver; struct vfio_iommu_driver *driver = container->iommu_driver;
if (unlikely(!driver || !driver->ops->dma_rw)) if (unlikely(!driver || !driver->ops->dma_rw))

View File

@ -122,13 +122,14 @@ int vfio_container_attach_group(struct vfio_container *container,
void vfio_group_detach_container(struct vfio_group *group); void vfio_group_detach_container(struct vfio_group *group);
void vfio_device_container_register(struct vfio_device *device); void vfio_device_container_register(struct vfio_device *device);
void vfio_device_container_unregister(struct vfio_device *device); void vfio_device_container_unregister(struct vfio_device *device);
int vfio_container_pin_pages(struct vfio_container *container, int vfio_device_container_pin_pages(struct vfio_device *device,
struct iommu_group *iommu_group, dma_addr_t iova, dma_addr_t iova, int npage,
int npage, int prot, struct page **pages); int prot, struct page **pages);
void vfio_container_unpin_pages(struct vfio_container *container, void vfio_device_container_unpin_pages(struct vfio_device *device,
dma_addr_t iova, int npage); dma_addr_t iova, int npage);
int vfio_container_dma_rw(struct vfio_container *container, dma_addr_t iova, int vfio_device_container_dma_rw(struct vfio_device *device,
void *data, size_t len, bool write); dma_addr_t iova, void *data,
size_t len, bool write);
int __init vfio_container_init(void); int __init vfio_container_init(void);
void vfio_container_cleanup(void); void vfio_container_cleanup(void);
@ -166,22 +167,21 @@ static inline void vfio_device_container_unregister(struct vfio_device *device)
{ {
} }
static inline int vfio_container_pin_pages(struct vfio_container *container, static inline int vfio_device_container_pin_pages(struct vfio_device *device,
struct iommu_group *iommu_group, dma_addr_t iova, int npage,
dma_addr_t iova, int npage, int prot, int prot, struct page **pages)
struct page **pages)
{ {
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline void vfio_container_unpin_pages(struct vfio_container *container, static inline void vfio_device_container_unpin_pages(struct vfio_device *device,
dma_addr_t iova, int npage) dma_addr_t iova, int npage)
{ {
} }
static inline int vfio_container_dma_rw(struct vfio_container *container, static inline int vfio_device_container_dma_rw(struct vfio_device *device,
dma_addr_t iova, void *data, size_t len, dma_addr_t iova, void *data,
bool write) size_t len, bool write)
{ {
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }

View File

@ -1938,6 +1938,11 @@ int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
} }
EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare); EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
static bool vfio_device_has_container(struct vfio_device *device)
{
return device->group->container;
}
/* /*
* Pin contiguous user pages and return their associated host pages for local * Pin contiguous user pages and return their associated host pages for local
* domain only. * domain only.
@ -1950,7 +1955,7 @@ EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
* Return error or number of pages pinned. * Return error or number of pages pinned.
* *
* A driver may only call this function if the vfio_device was created * A driver may only call this function if the vfio_device was created
* by vfio_register_emulated_iommu_dev() due to vfio_container_pin_pages(). * by vfio_register_emulated_iommu_dev() due to vfio_device_container_pin_pages().
*/ */
int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova, int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
int npage, int prot, struct page **pages) int npage, int prot, struct page **pages)
@ -1958,10 +1963,9 @@ int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
/* group->container cannot change while a vfio device is open */ /* group->container cannot change while a vfio device is open */
if (!pages || !npage || WARN_ON(!vfio_assert_device_open(device))) if (!pages || !npage || WARN_ON(!vfio_assert_device_open(device)))
return -EINVAL; return -EINVAL;
if (device->group->container) if (vfio_device_has_container(device))
return vfio_container_pin_pages(device->group->container, return vfio_device_container_pin_pages(device, iova,
device->group->iommu_group, npage, prot, pages);
iova, npage, prot, pages);
if (device->iommufd_access) { if (device->iommufd_access) {
int ret; int ret;
@ -1997,9 +2001,8 @@ void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage)
if (WARN_ON(!vfio_assert_device_open(device))) if (WARN_ON(!vfio_assert_device_open(device)))
return; return;
if (device->group->container) { if (vfio_device_has_container(device)) {
vfio_container_unpin_pages(device->group->container, iova, vfio_device_container_unpin_pages(device, iova, npage);
npage);
return; return;
} }
if (device->iommufd_access) { if (device->iommufd_access) {
@ -2036,9 +2039,9 @@ int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data,
if (!data || len <= 0 || !vfio_assert_device_open(device)) if (!data || len <= 0 || !vfio_assert_device_open(device))
return -EINVAL; return -EINVAL;
if (device->group->container) if (vfio_device_has_container(device))
return vfio_container_dma_rw(device->group->container, iova, return vfio_device_container_dma_rw(device, iova,
data, len, write); data, len, write);
if (device->iommufd_access) { if (device->iommufd_access) {
unsigned int flags = 0; unsigned int flags = 0;