VFIO fixes for v5.10-rc3
- Remove code by using existing helper (Zenghui Yu) - fsl-mc copy-user return and underflow fixes (Dan Carpenter) - fsl-mc static function declaration (Diana Craciun) - Fix ioeventfd sleeping under spinlock (Alex Williamson) - Fix pm reference count leak in vfio-platform (Zhang Qilong) - Allow opening IGD device w/o OpRegion support (Fred Gao) -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.14 (GNU/Linux) iQIcBAABAgAGBQJfpGRRAAoJECObm247sIsi6coP/iIb7fpQM0RvzBN/ghV2aq7L Zr+0JbmUf1XrANrz4z0bK0PAF9XBuvXlvu/6nOR8reok491hY+hDd8fvb2GTkaoi MxWP92IcJ6EASSKTpP4VmOqnYWklYaUBCn+VO2x3pOeCZfFKBGv4T7jtZuLlq0or 6y0pwrk5jch/WLoIDpgGQykTVWva6G6FyEfxNxd9LtkSYKu+94jfG304tUj2xQMO r5hYHXY3sgyYez1i4CNYxR/IuGdu01TiryQeNX6ldEpbHwRYc7jkxANv32RPDM9g JUDXuK7HkGuN2Fdwuk8CrR18ANryMC0c/tDwJH+no8GSgiv7F4Y22C8Td7jQapKY gmx5JT8BaSnfgm0biAoQ7XIzee0MqQXaYx4K3MU250JnOBsQ1gqc9F0qVeY29Yx6 01zMh4RzAriCXucX/qPKDrYA48fHYXJDOestTMGKSNqnQK73eBw+c/a1VzOVIiAe euA6fDR/NOlxd6ZLx8lPMGWjXRoJ8+mTy9XzGESnsvQl4Dr2MZKZ/SSYvhC4Ilzg 9x9FE+eBCruL+mJXbVTZHqHjqbNYN8/kgUb5dBw03oeedoLJDM68NKOCmcguls/a iu5dD3etu0AVDzBAAcbxQ02v1ybTzk0dqGamhtB8Hem/CIXYHWcv0eXWds0gD8KJ ZrvKyjkaW/G9u+6b/1CG =QqHP -----END PGP SIGNATURE----- Merge tag 'vfio-v5.10-rc3' of git://github.com/awilliam/linux-vfio Pull VFIO fixes from Alex Williamson: - Remove code by using existing helper (Zenghui Yu) - fsl-mc copy-user return and underflow fixes (Dan Carpenter) - fsl-mc static function declaration (Diana Craciun) - Fix ioeventfd sleeping under spinlock (Alex Williamson) - Fix pm reference count leak in vfio-platform (Zhang Qilong) - Allow opening IGD device w/o OpRegion support (Fred Gao) * tag 'vfio-v5.10-rc3' of git://github.com/awilliam/linux-vfio: vfio/pci: Bypass IGD init in case of -ENODEV vfio: platform: fix reference leak in vfio_platform_open vfio/pci: Implement ioeventfd thread handler for contended memory lock vfio/fsl-mc: Make vfio_fsl_mc_irqs_allocate static vfio/fsl-mc: prevent underflow in vfio_fsl_mc_mmap() vfio/fsl-mc: return -EFAULT if copy_to_user() fails vfio/type1: Use the new helper to find vfio_group
This commit is contained in:
commit
1669ecf9c8
|
@ -248,7 +248,9 @@ static long vfio_fsl_mc_ioctl(void *device_data, unsigned int cmd,
|
||||||
info.size = vdev->regions[info.index].size;
|
info.size = vdev->regions[info.index].size;
|
||||||
info.flags = vdev->regions[info.index].flags;
|
info.flags = vdev->regions[info.index].flags;
|
||||||
|
|
||||||
return copy_to_user((void __user *)arg, &info, minsz);
|
if (copy_to_user((void __user *)arg, &info, minsz))
|
||||||
|
return -EFAULT;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
case VFIO_DEVICE_GET_IRQ_INFO:
|
case VFIO_DEVICE_GET_IRQ_INFO:
|
||||||
{
|
{
|
||||||
|
@ -267,7 +269,9 @@ static long vfio_fsl_mc_ioctl(void *device_data, unsigned int cmd,
|
||||||
info.flags = VFIO_IRQ_INFO_EVENTFD;
|
info.flags = VFIO_IRQ_INFO_EVENTFD;
|
||||||
info.count = 1;
|
info.count = 1;
|
||||||
|
|
||||||
return copy_to_user((void __user *)arg, &info, minsz);
|
if (copy_to_user((void __user *)arg, &info, minsz))
|
||||||
|
return -EFAULT;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
case VFIO_DEVICE_SET_IRQS:
|
case VFIO_DEVICE_SET_IRQS:
|
||||||
{
|
{
|
||||||
|
@ -468,7 +472,7 @@ static int vfio_fsl_mc_mmap(void *device_data, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
struct vfio_fsl_mc_device *vdev = device_data;
|
struct vfio_fsl_mc_device *vdev = device_data;
|
||||||
struct fsl_mc_device *mc_dev = vdev->mc_dev;
|
struct fsl_mc_device *mc_dev = vdev->mc_dev;
|
||||||
int index;
|
unsigned int index;
|
||||||
|
|
||||||
index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT);
|
index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT);
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
#include "linux/fsl/mc.h"
|
#include "linux/fsl/mc.h"
|
||||||
#include "vfio_fsl_mc_private.h"
|
#include "vfio_fsl_mc_private.h"
|
||||||
|
|
||||||
int vfio_fsl_mc_irqs_allocate(struct vfio_fsl_mc_device *vdev)
|
static int vfio_fsl_mc_irqs_allocate(struct vfio_fsl_mc_device *vdev)
|
||||||
{
|
{
|
||||||
struct fsl_mc_device *mc_dev = vdev->mc_dev;
|
struct fsl_mc_device *mc_dev = vdev->mc_dev;
|
||||||
struct vfio_fsl_mc_irq *mc_irq;
|
struct vfio_fsl_mc_irq *mc_irq;
|
||||||
|
|
|
@ -385,7 +385,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
|
||||||
pdev->vendor == PCI_VENDOR_ID_INTEL &&
|
pdev->vendor == PCI_VENDOR_ID_INTEL &&
|
||||||
IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
|
IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
|
||||||
ret = vfio_pci_igd_init(vdev);
|
ret = vfio_pci_igd_init(vdev);
|
||||||
if (ret) {
|
if (ret && ret != -ENODEV) {
|
||||||
pci_warn(pdev, "Failed to setup Intel IGD regions\n");
|
pci_warn(pdev, "Failed to setup Intel IGD regions\n");
|
||||||
goto disable_exit;
|
goto disable_exit;
|
||||||
}
|
}
|
||||||
|
|
|
@ -356,34 +356,60 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
|
||||||
return done;
|
return done;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
|
static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd,
|
||||||
|
bool test_mem)
|
||||||
{
|
{
|
||||||
struct vfio_pci_ioeventfd *ioeventfd = opaque;
|
|
||||||
|
|
||||||
switch (ioeventfd->count) {
|
switch (ioeventfd->count) {
|
||||||
case 1:
|
case 1:
|
||||||
vfio_pci_iowrite8(ioeventfd->vdev, ioeventfd->test_mem,
|
vfio_pci_iowrite8(ioeventfd->vdev, test_mem,
|
||||||
ioeventfd->data, ioeventfd->addr);
|
ioeventfd->data, ioeventfd->addr);
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
vfio_pci_iowrite16(ioeventfd->vdev, ioeventfd->test_mem,
|
vfio_pci_iowrite16(ioeventfd->vdev, test_mem,
|
||||||
ioeventfd->data, ioeventfd->addr);
|
ioeventfd->data, ioeventfd->addr);
|
||||||
break;
|
break;
|
||||||
case 4:
|
case 4:
|
||||||
vfio_pci_iowrite32(ioeventfd->vdev, ioeventfd->test_mem,
|
vfio_pci_iowrite32(ioeventfd->vdev, test_mem,
|
||||||
ioeventfd->data, ioeventfd->addr);
|
ioeventfd->data, ioeventfd->addr);
|
||||||
break;
|
break;
|
||||||
#ifdef iowrite64
|
#ifdef iowrite64
|
||||||
case 8:
|
case 8:
|
||||||
vfio_pci_iowrite64(ioeventfd->vdev, ioeventfd->test_mem,
|
vfio_pci_iowrite64(ioeventfd->vdev, test_mem,
|
||||||
ioeventfd->data, ioeventfd->addr);
|
ioeventfd->data, ioeventfd->addr);
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
|
||||||
|
{
|
||||||
|
struct vfio_pci_ioeventfd *ioeventfd = opaque;
|
||||||
|
struct vfio_pci_device *vdev = ioeventfd->vdev;
|
||||||
|
|
||||||
|
if (ioeventfd->test_mem) {
|
||||||
|
if (!down_read_trylock(&vdev->memory_lock))
|
||||||
|
return 1; /* Lock contended, use thread */
|
||||||
|
if (!__vfio_pci_memory_enabled(vdev)) {
|
||||||
|
up_read(&vdev->memory_lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
vfio_pci_ioeventfd_do_write(ioeventfd, false);
|
||||||
|
|
||||||
|
if (ioeventfd->test_mem)
|
||||||
|
up_read(&vdev->memory_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vfio_pci_ioeventfd_thread(void *opaque, void *unused)
|
||||||
|
{
|
||||||
|
struct vfio_pci_ioeventfd *ioeventfd = opaque;
|
||||||
|
|
||||||
|
vfio_pci_ioeventfd_do_write(ioeventfd, ioeventfd->test_mem);
|
||||||
|
}
|
||||||
|
|
||||||
long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
|
long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
|
||||||
uint64_t data, int count, int fd)
|
uint64_t data, int count, int fd)
|
||||||
{
|
{
|
||||||
|
@ -457,7 +483,8 @@ long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
|
||||||
ioeventfd->test_mem = vdev->pdev->resource[bar].flags & IORESOURCE_MEM;
|
ioeventfd->test_mem = vdev->pdev->resource[bar].flags & IORESOURCE_MEM;
|
||||||
|
|
||||||
ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler,
|
ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler,
|
||||||
NULL, NULL, &ioeventfd->virqfd, fd);
|
vfio_pci_ioeventfd_thread, NULL,
|
||||||
|
&ioeventfd->virqfd, fd);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
kfree(ioeventfd);
|
kfree(ioeventfd);
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
|
@ -267,7 +267,7 @@ static int vfio_platform_open(void *device_data)
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(vdev->device);
|
ret = pm_runtime_get_sync(vdev->device);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err_pm;
|
goto err_rst;
|
||||||
|
|
||||||
ret = vfio_platform_call_reset(vdev, &extra_dbg);
|
ret = vfio_platform_call_reset(vdev, &extra_dbg);
|
||||||
if (ret && vdev->reset_required) {
|
if (ret && vdev->reset_required) {
|
||||||
|
@ -284,7 +284,6 @@ static int vfio_platform_open(void *device_data)
|
||||||
|
|
||||||
err_rst:
|
err_rst:
|
||||||
pm_runtime_put(vdev->device);
|
pm_runtime_put(vdev->device);
|
||||||
err_pm:
|
|
||||||
vfio_platform_irq_cleanup(vdev);
|
vfio_platform_irq_cleanup(vdev);
|
||||||
err_irq:
|
err_irq:
|
||||||
vfio_platform_regions_cleanup(vdev);
|
vfio_platform_regions_cleanup(vdev);
|
||||||
|
|
|
@ -1993,6 +1993,7 @@ static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu,
|
||||||
|
|
||||||
list_splice_tail(iova_copy, iova);
|
list_splice_tail(iova_copy, iova);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vfio_iommu_type1_attach_group(void *iommu_data,
|
static int vfio_iommu_type1_attach_group(void *iommu_data,
|
||||||
struct iommu_group *iommu_group)
|
struct iommu_group *iommu_group)
|
||||||
{
|
{
|
||||||
|
@ -2009,19 +2010,11 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
||||||
|
|
||||||
mutex_lock(&iommu->lock);
|
mutex_lock(&iommu->lock);
|
||||||
|
|
||||||
list_for_each_entry(d, &iommu->domain_list, next) {
|
/* Check for duplicates */
|
||||||
if (find_iommu_group(d, iommu_group)) {
|
if (vfio_iommu_find_iommu_group(iommu, iommu_group)) {
|
||||||
mutex_unlock(&iommu->lock);
|
mutex_unlock(&iommu->lock);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (iommu->external_domain) {
|
|
||||||
if (find_iommu_group(iommu->external_domain, iommu_group)) {
|
|
||||||
mutex_unlock(&iommu->lock);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
group = kzalloc(sizeof(*group), GFP_KERNEL);
|
group = kzalloc(sizeof(*group), GFP_KERNEL);
|
||||||
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
|
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
|
||||||
|
|
Loading…
Reference in New Issue