vdpa: multiple address spaces support
This patches introduces the multiple address spaces support for vDPA device. This idea is to identify a specific address space via an dedicated identifier - ASID. During vDPA device allocation, vDPA device driver needs to report the number of address spaces supported by the device then the DMA mapping ops of the vDPA device needs to be extended to support ASID. This helps to isolate the environments for the virtqueue that will not be assigned directly. E.g in the case of virtio-net, the control virtqueue will not be assigned directly to guest. As a start, simply claim 1 virtqueue groups and 1 address spaces for all vDPA devices. And vhost-vDPA will simply reject the device with more than 1 virtqueue groups or address spaces. Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Gautam Dawar <gdawar@xilinx.com> Message-Id: <20220330180436.24644-7-gdawar@xilinx.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
d4821902e4
commit
db9adcbf42
|
@ -470,7 +470,7 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
eni_vdpa = vdpa_alloc_device(struct eni_vdpa, vdpa,
|
eni_vdpa = vdpa_alloc_device(struct eni_vdpa, vdpa,
|
||||||
dev, &eni_vdpa_ops, 1, NULL, false);
|
dev, &eni_vdpa_ops, 1, 1, NULL, false);
|
||||||
if (IS_ERR(eni_vdpa)) {
|
if (IS_ERR(eni_vdpa)) {
|
||||||
ENI_ERR(pdev, "failed to allocate vDPA structure\n");
|
ENI_ERR(pdev, "failed to allocate vDPA structure\n");
|
||||||
return PTR_ERR(eni_vdpa);
|
return PTR_ERR(eni_vdpa);
|
||||||
|
|
|
@ -764,7 +764,7 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
|
||||||
pdev = ifcvf_mgmt_dev->pdev;
|
pdev = ifcvf_mgmt_dev->pdev;
|
||||||
dev = &pdev->dev;
|
dev = &pdev->dev;
|
||||||
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
|
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
|
||||||
dev, &ifc_vdpa_ops, 1, name, false);
|
dev, &ifc_vdpa_ops, 1, 1, name, false);
|
||||||
if (IS_ERR(adapter)) {
|
if (IS_ERR(adapter)) {
|
||||||
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
|
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
|
||||||
return PTR_ERR(adapter);
|
return PTR_ERR(adapter);
|
||||||
|
|
|
@ -2409,7 +2409,8 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
|
||||||
return mvdev->generation;
|
return mvdev->generation;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
|
static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
|
||||||
|
struct vhost_iotlb *iotlb)
|
||||||
{
|
{
|
||||||
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
||||||
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||||
|
@ -2823,7 +2824,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
|
||||||
}
|
}
|
||||||
|
|
||||||
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
|
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
|
||||||
1, name, false);
|
1, 1, name, false);
|
||||||
if (IS_ERR(ndev))
|
if (IS_ERR(ndev))
|
||||||
return PTR_ERR(ndev);
|
return PTR_ERR(ndev);
|
||||||
|
|
||||||
|
|
|
@ -159,6 +159,7 @@ static void vdpa_release_dev(struct device *d)
|
||||||
* @parent: the parent device
|
* @parent: the parent device
|
||||||
* @config: the bus operations that is supported by this device
|
* @config: the bus operations that is supported by this device
|
||||||
* @ngroups: number of groups supported by this device
|
* @ngroups: number of groups supported by this device
|
||||||
|
* @nas: number of address spaces supported by this device
|
||||||
* @size: size of the parent structure that contains private data
|
* @size: size of the parent structure that contains private data
|
||||||
* @name: name of the vdpa device; optional.
|
* @name: name of the vdpa device; optional.
|
||||||
* @use_va: indicate whether virtual address must be used by this device
|
* @use_va: indicate whether virtual address must be used by this device
|
||||||
|
@ -171,7 +172,7 @@ static void vdpa_release_dev(struct device *d)
|
||||||
*/
|
*/
|
||||||
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
|
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
|
||||||
const struct vdpa_config_ops *config,
|
const struct vdpa_config_ops *config,
|
||||||
unsigned int ngroups,
|
unsigned int ngroups, unsigned int nas,
|
||||||
size_t size, const char *name,
|
size_t size, const char *name,
|
||||||
bool use_va)
|
bool use_va)
|
||||||
{
|
{
|
||||||
|
@ -205,6 +206,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
|
||||||
vdev->features_valid = false;
|
vdev->features_valid = false;
|
||||||
vdev->use_va = use_va;
|
vdev->use_va = use_va;
|
||||||
vdev->ngroups = ngroups;
|
vdev->ngroups = ngroups;
|
||||||
|
vdev->nas = nas;
|
||||||
|
|
||||||
if (name)
|
if (name)
|
||||||
err = dev_set_name(&vdev->dev, "%s", name);
|
err = dev_set_name(&vdev->dev, "%s", name);
|
||||||
|
|
|
@ -251,7 +251,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
|
||||||
ops = &vdpasim_config_ops;
|
ops = &vdpasim_config_ops;
|
||||||
|
|
||||||
vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, 1,
|
vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, 1,
|
||||||
dev_attr->name, false);
|
1, dev_attr->name, false);
|
||||||
if (IS_ERR(vdpasim)) {
|
if (IS_ERR(vdpasim)) {
|
||||||
ret = PTR_ERR(vdpasim);
|
ret = PTR_ERR(vdpasim);
|
||||||
goto err_alloc;
|
goto err_alloc;
|
||||||
|
@ -539,7 +539,7 @@ static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
|
||||||
return range;
|
return range;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vdpasim_set_map(struct vdpa_device *vdpa,
|
static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
|
||||||
struct vhost_iotlb *iotlb)
|
struct vhost_iotlb *iotlb)
|
||||||
{
|
{
|
||||||
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
|
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
|
||||||
|
@ -566,7 +566,8 @@ err:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
|
static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
|
||||||
|
u64 iova, u64 size,
|
||||||
u64 pa, u32 perm, void *opaque)
|
u64 pa, u32 perm, void *opaque)
|
||||||
{
|
{
|
||||||
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
|
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
|
||||||
|
@ -580,7 +581,8 @@ static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
|
static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid,
|
||||||
|
u64 iova, u64 size)
|
||||||
{
|
{
|
||||||
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
|
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
|
||||||
|
|
||||||
|
|
|
@ -693,6 +693,7 @@ static u32 vduse_vdpa_get_generation(struct vdpa_device *vdpa)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vduse_vdpa_set_map(struct vdpa_device *vdpa,
|
static int vduse_vdpa_set_map(struct vdpa_device *vdpa,
|
||||||
|
unsigned int asid,
|
||||||
struct vhost_iotlb *iotlb)
|
struct vhost_iotlb *iotlb)
|
||||||
{
|
{
|
||||||
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
|
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
|
||||||
|
@ -1495,7 +1496,7 @@ static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
|
||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
|
|
||||||
vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev,
|
vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev,
|
||||||
&vduse_vdpa_config_ops, 1, name, true);
|
&vduse_vdpa_config_ops, 1, 1, name, true);
|
||||||
if (IS_ERR(vdev))
|
if (IS_ERR(vdev))
|
||||||
return PTR_ERR(vdev);
|
return PTR_ERR(vdev);
|
||||||
|
|
||||||
|
|
|
@ -466,7 +466,7 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
|
vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
|
||||||
dev, &vp_vdpa_ops, 1, NULL, false);
|
dev, &vp_vdpa_ops, 1, 1, NULL, false);
|
||||||
if (IS_ERR(vp_vdpa)) {
|
if (IS_ERR(vp_vdpa)) {
|
||||||
dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
|
dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
|
||||||
return PTR_ERR(vp_vdpa);
|
return PTR_ERR(vp_vdpa);
|
||||||
|
|
|
@ -633,10 +633,10 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (ops->dma_map) {
|
if (ops->dma_map) {
|
||||||
r = ops->dma_map(vdpa, iova, size, pa, perm, opaque);
|
r = ops->dma_map(vdpa, 0, iova, size, pa, perm, opaque);
|
||||||
} else if (ops->set_map) {
|
} else if (ops->set_map) {
|
||||||
if (!v->in_batch)
|
if (!v->in_batch)
|
||||||
r = ops->set_map(vdpa, iotlb);
|
r = ops->set_map(vdpa, 0, iotlb);
|
||||||
} else {
|
} else {
|
||||||
r = iommu_map(v->domain, iova, pa, size,
|
r = iommu_map(v->domain, iova, pa, size,
|
||||||
perm_to_iommu_flags(perm));
|
perm_to_iommu_flags(perm));
|
||||||
|
@ -662,10 +662,10 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v,
|
||||||
vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
|
vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
|
||||||
|
|
||||||
if (ops->dma_map) {
|
if (ops->dma_map) {
|
||||||
ops->dma_unmap(vdpa, iova, size);
|
ops->dma_unmap(vdpa, 0, iova, size);
|
||||||
} else if (ops->set_map) {
|
} else if (ops->set_map) {
|
||||||
if (!v->in_batch)
|
if (!v->in_batch)
|
||||||
ops->set_map(vdpa, iotlb);
|
ops->set_map(vdpa, 0, iotlb);
|
||||||
} else {
|
} else {
|
||||||
iommu_unmap(v->domain, iova, size);
|
iommu_unmap(v->domain, iova, size);
|
||||||
}
|
}
|
||||||
|
@ -897,7 +897,7 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
|
||||||
break;
|
break;
|
||||||
case VHOST_IOTLB_BATCH_END:
|
case VHOST_IOTLB_BATCH_END:
|
||||||
if (v->in_batch && ops->set_map)
|
if (v->in_batch && ops->set_map)
|
||||||
ops->set_map(vdpa, iotlb);
|
ops->set_map(vdpa, 0, iotlb);
|
||||||
v->in_batch = false;
|
v->in_batch = false;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -1163,6 +1163,10 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
|
||||||
int minor;
|
int minor;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
/* Only support 1 address space and 1 groups */
|
||||||
|
if (vdpa->ngroups != 1 || vdpa->nas != 1)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
|
v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
|
||||||
if (!v)
|
if (!v)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -69,6 +69,8 @@ struct vdpa_mgmt_dev;
|
||||||
* @cf_lock: Protects get and set access to configuration layout.
|
* @cf_lock: Protects get and set access to configuration layout.
|
||||||
* @index: device index
|
* @index: device index
|
||||||
* @features_valid: were features initialized? for legacy guests
|
* @features_valid: were features initialized? for legacy guests
|
||||||
|
* @ngroups: the number of virtqueue groups
|
||||||
|
* @nas: the number of address spaces
|
||||||
* @use_va: indicate whether virtual address must be used by this device
|
* @use_va: indicate whether virtual address must be used by this device
|
||||||
* @nvqs: maximum number of supported virtqueues
|
* @nvqs: maximum number of supported virtqueues
|
||||||
* @mdev: management device pointer; caller must setup when registering device as part
|
* @mdev: management device pointer; caller must setup when registering device as part
|
||||||
|
@ -86,6 +88,7 @@ struct vdpa_device {
|
||||||
u32 nvqs;
|
u32 nvqs;
|
||||||
struct vdpa_mgmt_dev *mdev;
|
struct vdpa_mgmt_dev *mdev;
|
||||||
unsigned int ngroups;
|
unsigned int ngroups;
|
||||||
|
unsigned int nas;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -241,6 +244,7 @@ struct vdpa_map_file {
|
||||||
* Needed for device that using device
|
* Needed for device that using device
|
||||||
* specific DMA translation (on-chip IOMMU)
|
* specific DMA translation (on-chip IOMMU)
|
||||||
* @vdev: vdpa device
|
* @vdev: vdpa device
|
||||||
|
* @asid: address space identifier
|
||||||
* @iotlb: vhost memory mapping to be
|
* @iotlb: vhost memory mapping to be
|
||||||
* used by the vDPA
|
* used by the vDPA
|
||||||
* Returns integer: success (0) or error (< 0)
|
* Returns integer: success (0) or error (< 0)
|
||||||
|
@ -249,6 +253,7 @@ struct vdpa_map_file {
|
||||||
* specific DMA translation (on-chip IOMMU)
|
* specific DMA translation (on-chip IOMMU)
|
||||||
* and preferring incremental map.
|
* and preferring incremental map.
|
||||||
* @vdev: vdpa device
|
* @vdev: vdpa device
|
||||||
|
* @asid: address space identifier
|
||||||
* @iova: iova to be mapped
|
* @iova: iova to be mapped
|
||||||
* @size: size of the area
|
* @size: size of the area
|
||||||
* @pa: physical address for the map
|
* @pa: physical address for the map
|
||||||
|
@ -260,6 +265,7 @@ struct vdpa_map_file {
|
||||||
* specific DMA translation (on-chip IOMMU)
|
* specific DMA translation (on-chip IOMMU)
|
||||||
* and preferring incremental unmap.
|
* and preferring incremental unmap.
|
||||||
* @vdev: vdpa device
|
* @vdev: vdpa device
|
||||||
|
* @asid: address space identifier
|
||||||
* @iova: iova to be unmapped
|
* @iova: iova to be unmapped
|
||||||
* @size: size of the area
|
* @size: size of the area
|
||||||
* Returns integer: success (0) or error (< 0)
|
* Returns integer: success (0) or error (< 0)
|
||||||
|
@ -313,10 +319,12 @@ struct vdpa_config_ops {
|
||||||
struct vdpa_iova_range (*get_iova_range)(struct vdpa_device *vdev);
|
struct vdpa_iova_range (*get_iova_range)(struct vdpa_device *vdev);
|
||||||
|
|
||||||
/* DMA ops */
|
/* DMA ops */
|
||||||
int (*set_map)(struct vdpa_device *vdev, struct vhost_iotlb *iotlb);
|
int (*set_map)(struct vdpa_device *vdev, unsigned int asid,
|
||||||
int (*dma_map)(struct vdpa_device *vdev, u64 iova, u64 size,
|
struct vhost_iotlb *iotlb);
|
||||||
u64 pa, u32 perm, void *opaque);
|
int (*dma_map)(struct vdpa_device *vdev, unsigned int asid,
|
||||||
int (*dma_unmap)(struct vdpa_device *vdev, u64 iova, u64 size);
|
u64 iova, u64 size, u64 pa, u32 perm, void *opaque);
|
||||||
|
int (*dma_unmap)(struct vdpa_device *vdev, unsigned int asid,
|
||||||
|
u64 iova, u64 size);
|
||||||
|
|
||||||
/* Free device resources */
|
/* Free device resources */
|
||||||
void (*free)(struct vdpa_device *vdev);
|
void (*free)(struct vdpa_device *vdev);
|
||||||
|
@ -324,7 +332,7 @@ struct vdpa_config_ops {
|
||||||
|
|
||||||
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
|
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
|
||||||
const struct vdpa_config_ops *config,
|
const struct vdpa_config_ops *config,
|
||||||
unsigned int ngroups,
|
unsigned int ngroups, unsigned int nas,
|
||||||
size_t size, const char *name,
|
size_t size, const char *name,
|
||||||
bool use_va);
|
bool use_va);
|
||||||
|
|
||||||
|
@ -336,17 +344,19 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
|
||||||
* @parent: the parent device
|
* @parent: the parent device
|
||||||
* @config: the bus operations that is supported by this device
|
* @config: the bus operations that is supported by this device
|
||||||
* @ngroups: the number of virtqueue groups supported by this device
|
* @ngroups: the number of virtqueue groups supported by this device
|
||||||
|
* @nas: the number of address spaces
|
||||||
* @name: name of the vdpa device
|
* @name: name of the vdpa device
|
||||||
* @use_va: indicate whether virtual address must be used by this device
|
* @use_va: indicate whether virtual address must be used by this device
|
||||||
*
|
*
|
||||||
* Return allocated data structure or ERR_PTR upon error
|
* Return allocated data structure or ERR_PTR upon error
|
||||||
*/
|
*/
|
||||||
#define vdpa_alloc_device(dev_struct, member, parent, config, ngroups, name, use_va) \
|
#define vdpa_alloc_device(dev_struct, member, parent, config, ngroups, nas, \
|
||||||
|
name, use_va) \
|
||||||
container_of((__vdpa_alloc_device( \
|
container_of((__vdpa_alloc_device( \
|
||||||
parent, config, ngroups, \
|
parent, config, ngroups, nas, \
|
||||||
sizeof(dev_struct) + \
|
(sizeof(dev_struct) + \
|
||||||
BUILD_BUG_ON_ZERO(offsetof( \
|
BUILD_BUG_ON_ZERO(offsetof( \
|
||||||
dev_struct, member)), name, use_va)), \
|
dev_struct, member))), name, use_va)), \
|
||||||
dev_struct, member)
|
dev_struct, member)
|
||||||
|
|
||||||
int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
|
int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
|
||||||
|
|
Loading…
Reference in New Issue