virtio,vdpa,qemu_fw_cfg: features, cleanups, fixes
partial support for < MAX_ORDER - 1 granularity for virtio-mem driver_override for vdpa sysfs ABI documentation for vdpa multiqueue config support for mlx5 vdpa Misc fixes, cleanups. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmHiDHkPHG1zdEByZWRo YXQuY29tAAoJECgfDbjSjVRpVT4H/3Veixt3uYPOmuLU2tSx+8X+sFTtik81hyiE okz5fRJrxxA8SqS76FnmO10FS4hlPOGNk0Z5WVhr0yihwFvPLvpCM/xi2Lmrz9I7 pB0sXOIocEL1xApsxukR9K1Twpb2hfYsflbJYUVlRfhS5G0izKJNZp5I7OPrzd80 vVNNDWKW2iLDlfqsavumI4Kvm4nsFuCHG03jzMtcIa7YTXYV3DORD4ZGFFVUOIQN t5F74TznwHOeYgJeg7TzjFjfPWmXjLetvx10QX1A1uOvwppWW/QY6My0UafTXNXj VB3gOwJPf+gxXAXl/4bafq4NzM0xys6cpcPpjvhmU+erY4UuyAU= =Y1eO -----END PGP SIGNATURE----- Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost Pull virtio updates from Michael Tsirkin: "virtio,vdpa,qemu_fw_cfg: features, cleanups, and fixes. - partial support for < MAX_ORDER - 1 granularity for virtio-mem - driver_override for vdpa - sysfs ABI documentation for vdpa - multiqueue config support for mlx5 vdpa - and misc fixes, cleanups" * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (42 commits) vdpa/mlx5: Fix tracking of current number of VQs vdpa/mlx5: Fix is_index_valid() to refer to features vdpa: Protect vdpa reset with cf_mutex vdpa: Avoid taking cf_mutex lock on get status vdpa/vdpa_sim_net: Report max device capabilities vdpa: Use BIT_ULL for bit operations vdpa/vdpa_sim: Configure max supported virtqueues vdpa/mlx5: Report max device capabilities vdpa: Support reporting max device capabilities vdpa/mlx5: Restore cur_num_vqs in case of failure in change_num_qps() vdpa: Add support for returning device configuration information vdpa/mlx5: Support configuring max data virtqueue vdpa/mlx5: Fix config_attr_mask assignment vdpa: Allow to configure max data virtqueues vdpa: Read device configuration only if FEATURES_OK vdpa: Sync calls set/get config/status with cf_mutex vdpa/mlx5: Distribute RX virtqueues in RQT object vdpa: Provide interface to read driver features vdpa: clean up get_config_size ret value handling virtio_ring: mark ring unused on error ...
This commit is contained in:
commit
3bf6a9e36e
|
@ -0,0 +1,57 @@
|
|||
What: /sys/bus/vdpa/driver_autoprobe
|
||||
Date: March 2020
|
||||
Contact: virtualization@lists.linux-foundation.org
|
||||
Description:
|
||||
This file determines whether new devices are immediately bound
|
||||
to a driver after the creation. It initially contains 1, which
|
||||
means the kernel automatically binds devices to a compatible
|
||||
driver immediately after they are created.
|
||||
|
||||
Writing "0" to this file disable this feature, any other string
|
||||
enable it.
|
||||
|
||||
What: /sys/bus/vdpa/driver_probe
|
||||
Date: March 2020
|
||||
Contact: virtualization@lists.linux-foundation.org
|
||||
Description:
|
||||
Writing a device name to this file will cause the kernel binds
|
||||
devices to a compatible driver.
|
||||
|
||||
This can be useful when /sys/bus/vdpa/driver_autoprobe is
|
||||
disabled.
|
||||
|
||||
What: /sys/bus/vdpa/drivers/.../bind
|
||||
Date: March 2020
|
||||
Contact: virtualization@lists.linux-foundation.org
|
||||
Description:
|
||||
Writing a device name to this file will cause the driver to
|
||||
attempt to bind to the device. This is useful for overriding
|
||||
default bindings.
|
||||
|
||||
What: /sys/bus/vdpa/drivers/.../unbind
|
||||
Date: March 2020
|
||||
Contact: virtualization@lists.linux-foundation.org
|
||||
Description:
|
||||
Writing a device name to this file will cause the driver to
|
||||
attempt to unbind from the device. This may be useful when
|
||||
overriding default bindings.
|
||||
|
||||
What: /sys/bus/vdpa/devices/.../driver_override
|
||||
Date: November 2021
|
||||
Contact: virtualization@lists.linux-foundation.org
|
||||
Description:
|
||||
This file allows the driver for a device to be specified.
|
||||
When specified, only a driver with a name matching the value
|
||||
written to driver_override will have an opportunity to bind to
|
||||
the device. The override is specified by writing a string to the
|
||||
driver_override file (echo vhost-vdpa > driver_override) and may
|
||||
be cleared with an empty string (echo > driver_override).
|
||||
This returns the device to standard matching rules binding.
|
||||
Writing to driver_override does not automatically unbind the
|
||||
device from its current driver or make any attempt to
|
||||
automatically load the specified driver. If no driver with a
|
||||
matching name is currently loaded in the kernel, the device will
|
||||
not bind to any driver. This also allows devices to opt-out of
|
||||
driver binding using a driver_override name such as "none".
|
||||
Only a single driver may be specified in the override, there is
|
||||
no support for parsing delimiters.
|
|
@ -20377,6 +20377,7 @@ M: "Michael S. Tsirkin" <mst@redhat.com>
|
|||
M: Jason Wang <jasowang@redhat.com>
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
S: Maintained
|
||||
F: Documentation/ABI/testing/sysfs-bus-vdpa
|
||||
F: Documentation/devicetree/bindings/virtio/
|
||||
F: drivers/block/virtio_blk.c
|
||||
F: drivers/crypto/virtio/
|
||||
|
|
|
@ -616,7 +616,7 @@ static void um_pci_virtio_remove(struct virtio_device *vdev)
|
|||
int i;
|
||||
|
||||
/* Stop all virtqueues */
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
vdev->config->del_vqs(vdev);
|
||||
|
||||
device_set_wakeup_enable(&vdev->dev, false);
|
||||
|
|
|
@ -976,7 +976,7 @@ static void virtblk_remove(struct virtio_device *vdev)
|
|||
mutex_lock(&vblk->vdev_mutex);
|
||||
|
||||
/* Stop all the virtqueues. */
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
|
||||
/* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
|
||||
vblk->vdev = NULL;
|
||||
|
@ -995,7 +995,7 @@ static int virtblk_freeze(struct virtio_device *vdev)
|
|||
struct virtio_blk *vblk = vdev->priv;
|
||||
|
||||
/* Ensure we don't receive any more interrupts */
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
|
||||
/* Make sure no work handler is accessing the device. */
|
||||
flush_work(&vblk->config_work);
|
||||
|
|
|
@ -367,7 +367,7 @@ static void virtbt_remove(struct virtio_device *vdev)
|
|||
struct hci_dev *hdev = vbt->hdev;
|
||||
|
||||
hci_unregister_dev(hdev);
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
|
||||
hci_free_dev(hdev);
|
||||
vbt->hdev = NULL;
|
||||
|
|
|
@ -179,9 +179,9 @@ static void remove_common(struct virtio_device *vdev)
|
|||
vi->data_avail = 0;
|
||||
vi->data_idx = 0;
|
||||
complete(&vi->have_data);
|
||||
vdev->config->reset(vdev);
|
||||
if (vi->hwrng_register_done)
|
||||
hwrng_unregister(&vi->hwrng);
|
||||
virtio_reset_device(vdev);
|
||||
vdev->config->del_vqs(vdev);
|
||||
ida_simple_remove(&rng_index_ida, vi->index);
|
||||
kfree(vi);
|
||||
|
|
|
@ -1958,7 +1958,7 @@ static void virtcons_remove(struct virtio_device *vdev)
|
|||
spin_unlock_irq(&pdrvdata_lock);
|
||||
|
||||
/* Disable interrupts for vqs */
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
/* Finish up work that's lined up */
|
||||
if (use_multiport(portdev))
|
||||
cancel_work_sync(&portdev->control_work);
|
||||
|
@ -2148,7 +2148,7 @@ static int virtcons_freeze(struct virtio_device *vdev)
|
|||
|
||||
portdev = vdev->priv;
|
||||
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
|
||||
if (use_multiport(portdev))
|
||||
virtqueue_disable_cb(portdev->c_ivq);
|
||||
|
|
|
@ -404,7 +404,7 @@ static int virtcrypto_probe(struct virtio_device *vdev)
|
|||
free_engines:
|
||||
virtcrypto_clear_crypto_engines(vcrypto);
|
||||
free_vqs:
|
||||
vcrypto->vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
virtcrypto_del_vqs(vcrypto);
|
||||
free_dev:
|
||||
virtcrypto_devmgr_rm_dev(vcrypto);
|
||||
|
@ -436,7 +436,7 @@ static void virtcrypto_remove(struct virtio_device *vdev)
|
|||
|
||||
if (virtcrypto_dev_started(vcrypto))
|
||||
virtcrypto_dev_stop(vcrypto);
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
virtcrypto_free_unused_reqs(vcrypto);
|
||||
virtcrypto_clear_crypto_engines(vcrypto);
|
||||
virtcrypto_del_vqs(vcrypto);
|
||||
|
@ -456,7 +456,7 @@ static int virtcrypto_freeze(struct virtio_device *vdev)
|
|||
{
|
||||
struct virtio_crypto *vcrypto = vdev->priv;
|
||||
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
virtcrypto_free_unused_reqs(vcrypto);
|
||||
if (virtcrypto_dev_started(vcrypto))
|
||||
virtcrypto_dev_stop(vcrypto);
|
||||
|
@ -492,7 +492,7 @@ static int virtcrypto_restore(struct virtio_device *vdev)
|
|||
free_engines:
|
||||
virtcrypto_clear_crypto_engines(vcrypto);
|
||||
free_vqs:
|
||||
vcrypto->vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
virtcrypto_del_vqs(vcrypto);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -452,7 +452,7 @@ static void scmi_vio_remove(struct virtio_device *vdev)
|
|||
* outstanding message on any vqueue to be ignored by complete_cb: now
|
||||
* we can just stop processing buffers and destroy the vqueues.
|
||||
*/
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
vdev->config->del_vqs(vdev);
|
||||
/* Ensure scmi_vdev is visible as NULL */
|
||||
smp_store_mb(scmi_vdev, NULL);
|
||||
|
|
|
@ -450,7 +450,7 @@ static void virtio_gpio_request_vq(struct virtqueue *vq)
|
|||
|
||||
static void virtio_gpio_free_vqs(struct virtio_device *vdev)
|
||||
{
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
vdev->config->del_vqs(vdev);
|
||||
}
|
||||
|
||||
|
|
|
@ -279,7 +279,7 @@ void virtio_gpu_deinit(struct drm_device *dev)
|
|||
flush_work(&vgdev->ctrlq.dequeue_work);
|
||||
flush_work(&vgdev->cursorq.dequeue_work);
|
||||
flush_work(&vgdev->config_changed_work);
|
||||
vgdev->vdev->config->reset(vgdev->vdev);
|
||||
virtio_reset_device(vgdev->vdev);
|
||||
vgdev->vdev->config->del_vqs(vgdev->vdev);
|
||||
}
|
||||
|
||||
|
|
|
@ -165,7 +165,7 @@ err_free:
|
|||
|
||||
static void virtio_i2c_del_vqs(struct virtio_device *vdev)
|
||||
{
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
vdev->config->del_vqs(vdev);
|
||||
}
|
||||
|
||||
|
|
|
@ -1189,7 +1189,7 @@ static void viommu_remove(struct virtio_device *vdev)
|
|||
iommu_device_unregister(&viommu->iommu);
|
||||
|
||||
/* Stop all virtqueues */
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
vdev->config->del_vqs(vdev);
|
||||
|
||||
dev_info(&vdev->dev, "device removed\n");
|
||||
|
|
|
@ -754,7 +754,7 @@ static void cfv_remove(struct virtio_device *vdev)
|
|||
debugfs_remove_recursive(cfv->debugfs);
|
||||
|
||||
vringh_kiov_cleanup(&cfv->ctx.riov);
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
vdev->vringh_config->del_vrhs(cfv->vdev);
|
||||
cfv->vr_rx = NULL;
|
||||
vdev->config->del_vqs(cfv->vdev);
|
||||
|
|
|
@ -3312,7 +3312,7 @@ static int virtnet_probe(struct virtio_device *vdev)
|
|||
return 0;
|
||||
|
||||
free_unregister_netdev:
|
||||
vi->vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
|
||||
unregister_netdev(dev);
|
||||
free_failover:
|
||||
|
@ -3328,7 +3328,7 @@ free:
|
|||
|
||||
static void remove_vq_common(struct virtnet_info *vi)
|
||||
{
|
||||
vi->vdev->config->reset(vi->vdev);
|
||||
virtio_reset_device(vi->vdev);
|
||||
|
||||
/* Free unused buffers in both send and recv, if any. */
|
||||
free_unused_bufs(vi);
|
||||
|
|
|
@ -4498,7 +4498,7 @@ static void remove_vqs(struct virtio_device *vdev)
|
|||
{
|
||||
int i;
|
||||
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(hwsim_vqs); i++) {
|
||||
struct virtqueue *vq = hwsim_vqs[i];
|
||||
|
|
|
@ -105,7 +105,7 @@ static void virtio_pmem_remove(struct virtio_device *vdev)
|
|||
|
||||
nvdimm_bus_unregister(nvdimm_bus);
|
||||
vdev->config->del_vqs(vdev);
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
}
|
||||
|
||||
static struct virtio_driver virtio_pmem_driver = {
|
||||
|
|
|
@ -1024,7 +1024,7 @@ static void rpmsg_remove(struct virtio_device *vdev)
|
|||
size_t total_buf_space = vrp->num_bufs * vrp->buf_size;
|
||||
int ret;
|
||||
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
|
||||
ret = device_for_each_child(&vdev->dev, NULL, rpmsg_remove_device);
|
||||
if (ret)
|
||||
|
|
|
@ -778,7 +778,7 @@ static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
|
|||
static void virtscsi_remove_vqs(struct virtio_device *vdev)
|
||||
{
|
||||
/* Stop all the virtqueues. */
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
vdev->config->del_vqs(vdev);
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ static struct virtio_pci_legacy_device *vdpa_to_ldev(struct vdpa_device *vdpa)
|
|||
return &eni_vdpa->ldev;
|
||||
}
|
||||
|
||||
static u64 eni_vdpa_get_features(struct vdpa_device *vdpa)
|
||||
static u64 eni_vdpa_get_device_features(struct vdpa_device *vdpa)
|
||||
{
|
||||
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
|
||||
u64 features = vp_legacy_get_features(ldev);
|
||||
|
@ -69,7 +69,7 @@ static u64 eni_vdpa_get_features(struct vdpa_device *vdpa)
|
|||
return features;
|
||||
}
|
||||
|
||||
static int eni_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
|
||||
static int eni_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
|
||||
{
|
||||
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
|
||||
|
||||
|
@ -84,6 +84,13 @@ static int eni_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u64 eni_vdpa_get_driver_features(struct vdpa_device *vdpa)
|
||||
{
|
||||
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
|
||||
|
||||
return vp_legacy_get_driver_features(ldev);
|
||||
}
|
||||
|
||||
static u8 eni_vdpa_get_status(struct vdpa_device *vdpa)
|
||||
{
|
||||
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
|
||||
|
@ -401,8 +408,9 @@ static void eni_vdpa_set_config_cb(struct vdpa_device *vdpa,
|
|||
}
|
||||
|
||||
static const struct vdpa_config_ops eni_vdpa_ops = {
|
||||
.get_features = eni_vdpa_get_features,
|
||||
.set_features = eni_vdpa_set_features,
|
||||
.get_device_features = eni_vdpa_get_device_features,
|
||||
.set_driver_features = eni_vdpa_set_driver_features,
|
||||
.get_driver_features = eni_vdpa_get_driver_features,
|
||||
.get_status = eni_vdpa_get_status,
|
||||
.set_status = eni_vdpa_set_status,
|
||||
.reset = eni_vdpa_reset,
|
||||
|
@ -450,11 +458,6 @@ static u16 eni_vdpa_get_num_queues(struct eni_vdpa *eni_vdpa)
|
|||
return num;
|
||||
}
|
||||
|
||||
static void eni_vdpa_free_irq_vectors(void *data)
|
||||
{
|
||||
pci_free_irq_vectors(data);
|
||||
}
|
||||
|
||||
static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
|
@ -488,13 +491,6 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
eni_vdpa->vdpa.dma_dev = &pdev->dev;
|
||||
eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa);
|
||||
|
||||
ret = devm_add_action_or_reset(dev, eni_vdpa_free_irq_vectors, pdev);
|
||||
if (ret) {
|
||||
ENI_ERR(pdev,
|
||||
"failed for adding devres for freeing irq vectors\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues,
|
||||
sizeof(*eni_vdpa->vring),
|
||||
GFP_KERNEL);
|
||||
|
|
|
@ -143,8 +143,8 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
|
|||
IFCVF_DBG(pdev, "hw->isr = %p\n", hw->isr);
|
||||
break;
|
||||
case VIRTIO_PCI_CAP_DEVICE_CFG:
|
||||
hw->net_cfg = get_cap_addr(hw, &cap);
|
||||
IFCVF_DBG(pdev, "hw->net_cfg = %p\n", hw->net_cfg);
|
||||
hw->dev_cfg = get_cap_addr(hw, &cap);
|
||||
IFCVF_DBG(pdev, "hw->dev_cfg = %p\n", hw->dev_cfg);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -153,7 +153,7 @@ next:
|
|||
}
|
||||
|
||||
if (hw->common_cfg == NULL || hw->notify_base == NULL ||
|
||||
hw->isr == NULL || hw->net_cfg == NULL) {
|
||||
hw->isr == NULL || hw->dev_cfg == NULL) {
|
||||
IFCVF_ERR(pdev, "Incomplete PCI capabilities\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -174,7 +174,7 @@ next:
|
|||
IFCVF_DBG(pdev,
|
||||
"PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n",
|
||||
hw->common_cfg, hw->notify_base, hw->isr,
|
||||
hw->net_cfg, hw->notify_off_multiplier);
|
||||
hw->dev_cfg, hw->notify_off_multiplier);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -242,33 +242,54 @@ int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset,
|
||||
u32 ifcvf_get_config_size(struct ifcvf_hw *hw)
|
||||
{
|
||||
struct ifcvf_adapter *adapter;
|
||||
u32 config_size;
|
||||
|
||||
adapter = vf_to_adapter(hw);
|
||||
switch (hw->dev_type) {
|
||||
case VIRTIO_ID_NET:
|
||||
config_size = sizeof(struct virtio_net_config);
|
||||
break;
|
||||
case VIRTIO_ID_BLOCK:
|
||||
config_size = sizeof(struct virtio_blk_config);
|
||||
break;
|
||||
default:
|
||||
config_size = 0;
|
||||
IFCVF_ERR(adapter->pdev, "VIRTIO ID %u not supported\n", hw->dev_type);
|
||||
}
|
||||
|
||||
return config_size;
|
||||
}
|
||||
|
||||
void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset,
|
||||
void *dst, int length)
|
||||
{
|
||||
u8 old_gen, new_gen, *p;
|
||||
int i;
|
||||
|
||||
WARN_ON(offset + length > sizeof(struct virtio_net_config));
|
||||
WARN_ON(offset + length > hw->config_size);
|
||||
do {
|
||||
old_gen = ifc_ioread8(&hw->common_cfg->config_generation);
|
||||
p = dst;
|
||||
for (i = 0; i < length; i++)
|
||||
*p++ = ifc_ioread8(hw->net_cfg + offset + i);
|
||||
*p++ = ifc_ioread8(hw->dev_cfg + offset + i);
|
||||
|
||||
new_gen = ifc_ioread8(&hw->common_cfg->config_generation);
|
||||
} while (old_gen != new_gen);
|
||||
}
|
||||
|
||||
void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset,
|
||||
void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
|
||||
const void *src, int length)
|
||||
{
|
||||
const u8 *p;
|
||||
int i;
|
||||
|
||||
p = src;
|
||||
WARN_ON(offset + length > sizeof(struct virtio_net_config));
|
||||
WARN_ON(offset + length > hw->config_size);
|
||||
for (i = 0; i < length; i++)
|
||||
ifc_iowrite8(*p++, hw->net_cfg + offset + i);
|
||||
ifc_iowrite8(*p++, hw->dev_cfg + offset + i);
|
||||
}
|
||||
|
||||
static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
|
||||
|
|
|
@ -71,12 +71,14 @@ struct ifcvf_hw {
|
|||
u64 hw_features;
|
||||
u32 dev_type;
|
||||
struct virtio_pci_common_cfg __iomem *common_cfg;
|
||||
void __iomem *net_cfg;
|
||||
void __iomem *dev_cfg;
|
||||
struct vring_info vring[IFCVF_MAX_QUEUES];
|
||||
void __iomem * const *base;
|
||||
char config_msix_name[256];
|
||||
struct vdpa_callback config_cb;
|
||||
unsigned int config_irq;
|
||||
/* virtio-net or virtio-blk device config size */
|
||||
u32 config_size;
|
||||
};
|
||||
|
||||
struct ifcvf_adapter {
|
||||
|
@ -105,9 +107,9 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *dev);
|
|||
int ifcvf_start_hw(struct ifcvf_hw *hw);
|
||||
void ifcvf_stop_hw(struct ifcvf_hw *hw);
|
||||
void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid);
|
||||
void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset,
|
||||
void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset,
|
||||
void *dst, int length);
|
||||
void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset,
|
||||
void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
|
||||
const void *src, int length);
|
||||
u8 ifcvf_get_status(struct ifcvf_hw *hw);
|
||||
void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
|
||||
|
@ -120,4 +122,5 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
|
|||
int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
|
||||
struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
|
||||
int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
|
||||
u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
|
||||
#endif /* _IFCVF_H_ */
|
||||
|
|
|
@ -169,7 +169,7 @@ static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
|
|||
return &adapter->vf;
|
||||
}
|
||||
|
||||
static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
|
||||
static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
|
||||
{
|
||||
struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
||||
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
|
||||
|
@ -187,7 +187,7 @@ static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
|
|||
return features;
|
||||
}
|
||||
|
||||
static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
|
||||
static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features)
|
||||
{
|
||||
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
|
||||
int ret;
|
||||
|
@ -201,6 +201,13 @@ static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
|
||||
{
|
||||
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
|
||||
|
||||
return vf->req_features;
|
||||
}
|
||||
|
||||
static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
|
||||
{
|
||||
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
|
||||
|
@ -366,24 +373,9 @@ static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
|
|||
|
||||
static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
|
||||
{
|
||||
struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
||||
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
size_t size;
|
||||
|
||||
switch (vf->dev_type) {
|
||||
case VIRTIO_ID_NET:
|
||||
size = sizeof(struct virtio_net_config);
|
||||
break;
|
||||
case VIRTIO_ID_BLOCK:
|
||||
size = sizeof(struct virtio_blk_config);
|
||||
break;
|
||||
default:
|
||||
size = 0;
|
||||
IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
|
||||
}
|
||||
|
||||
return size;
|
||||
return vf->config_size;
|
||||
}
|
||||
|
||||
static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
|
||||
|
@ -392,8 +384,7 @@ static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
|
|||
{
|
||||
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
|
||||
|
||||
WARN_ON(offset + len > sizeof(struct virtio_net_config));
|
||||
ifcvf_read_net_config(vf, offset, buf, len);
|
||||
ifcvf_read_dev_config(vf, offset, buf, len);
|
||||
}
|
||||
|
||||
static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
|
||||
|
@ -402,8 +393,7 @@ static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
|
|||
{
|
||||
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
|
||||
|
||||
WARN_ON(offset + len > sizeof(struct virtio_net_config));
|
||||
ifcvf_write_net_config(vf, offset, buf, len);
|
||||
ifcvf_write_dev_config(vf, offset, buf, len);
|
||||
}
|
||||
|
||||
static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
|
||||
|
@ -443,8 +433,9 @@ static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_devic
|
|||
* implemented set_map()/dma_map()/dma_unmap()
|
||||
*/
|
||||
static const struct vdpa_config_ops ifc_vdpa_ops = {
|
||||
.get_features = ifcvf_vdpa_get_features,
|
||||
.set_features = ifcvf_vdpa_set_features,
|
||||
.get_device_features = ifcvf_vdpa_get_device_features,
|
||||
.set_driver_features = ifcvf_vdpa_set_driver_features,
|
||||
.get_driver_features = ifcvf_vdpa_get_driver_features,
|
||||
.get_status = ifcvf_vdpa_get_status,
|
||||
.set_status = ifcvf_vdpa_set_status,
|
||||
.reset = ifcvf_vdpa_reset,
|
||||
|
@ -542,6 +533,7 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
|
|||
vf->vring[i].irq = -EINVAL;
|
||||
|
||||
vf->hw_features = ifcvf_get_hw_features(vf);
|
||||
vf->config_size = ifcvf_get_config_size(vf);
|
||||
|
||||
adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
|
||||
ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
|
||||
|
|
|
@ -131,25 +131,24 @@ struct mlx5_vdpa_virtqueue {
|
|||
struct mlx5_vq_restore_info ri;
|
||||
};
|
||||
|
||||
/* We will remove this limitation once mlx5_vdpa_alloc_resources()
|
||||
* provides for driver space allocation
|
||||
*/
|
||||
#define MLX5_MAX_SUPPORTED_VQS 16
|
||||
|
||||
static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)
|
||||
{
|
||||
if (unlikely(idx > mvdev->max_idx))
|
||||
return false;
|
||||
if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) {
|
||||
if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
|
||||
return idx < 2;
|
||||
else
|
||||
return idx < 3;
|
||||
}
|
||||
|
||||
return true;
|
||||
return idx <= mvdev->max_idx;
|
||||
}
|
||||
|
||||
struct mlx5_vdpa_net {
|
||||
struct mlx5_vdpa_dev mvdev;
|
||||
struct mlx5_vdpa_net_resources res;
|
||||
struct virtio_net_config config;
|
||||
struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS];
|
||||
struct vdpa_callback event_cbs[MLX5_MAX_SUPPORTED_VQS + 1];
|
||||
struct mlx5_vdpa_virtqueue *vqs;
|
||||
struct vdpa_callback *event_cbs;
|
||||
|
||||
/* Serialize vq resources creation and destruction. This is required
|
||||
* since memory map might change and we need to destroy and create
|
||||
|
@ -876,8 +875,6 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
|
|||
MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
|
||||
MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size);
|
||||
MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
|
||||
if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type))
|
||||
MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, 1);
|
||||
|
||||
err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
|
||||
if (err)
|
||||
|
@ -1218,7 +1215,7 @@ static void suspend_vqs(struct mlx5_vdpa_net *ndev)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MLX5_MAX_SUPPORTED_VQS; i++)
|
||||
for (i = 0; i < ndev->mvdev.max_vqs; i++)
|
||||
suspend_vq(ndev, &ndev->vqs[i]);
|
||||
}
|
||||
|
||||
|
@ -1244,8 +1241,14 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
|
|||
void *in;
|
||||
int i, j;
|
||||
int err;
|
||||
int num;
|
||||
|
||||
max_rqt = min_t(int, MLX5_MAX_SUPPORTED_VQS / 2,
|
||||
if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
|
||||
num = 1;
|
||||
else
|
||||
num = ndev->cur_num_vqs / 2;
|
||||
|
||||
max_rqt = min_t(int, roundup_pow_of_two(num),
|
||||
1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
|
||||
if (max_rqt < 1)
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -1261,17 +1264,10 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
|
|||
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
|
||||
MLX5_SET(rqtc, rqtc, rqt_max_size, max_rqt);
|
||||
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
|
||||
for (i = 0, j = 0; j < max_rqt; j++) {
|
||||
if (!ndev->vqs[j].initialized)
|
||||
continue;
|
||||
|
||||
if (!vq_is_tx(ndev->vqs[j].index)) {
|
||||
list[i] = cpu_to_be32(ndev->vqs[j].virtq_id);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
MLX5_SET(rqtc, rqtc, rqt_actual_size, i);
|
||||
for (i = 0, j = 0; i < max_rqt; i++, j += 2)
|
||||
list[i] = cpu_to_be32(ndev->vqs[j % (2 * num)].virtq_id);
|
||||
|
||||
MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
|
||||
err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
|
||||
kfree(in);
|
||||
if (err)
|
||||
|
@ -1292,7 +1288,7 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
|
|||
int i, j;
|
||||
int err;
|
||||
|
||||
max_rqt = min_t(int, ndev->cur_num_vqs / 2,
|
||||
max_rqt = min_t(int, roundup_pow_of_two(ndev->cur_num_vqs / 2),
|
||||
1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
|
||||
if (max_rqt < 1)
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -1308,16 +1304,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
|
|||
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
|
||||
|
||||
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
|
||||
for (i = 0, j = 0; j < num; j++) {
|
||||
if (!ndev->vqs[j].initialized)
|
||||
continue;
|
||||
for (i = 0, j = 0; i < max_rqt; i++, j += 2)
|
||||
list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
|
||||
|
||||
if (!vq_is_tx(ndev->vqs[j].index)) {
|
||||
list[i] = cpu_to_be32(ndev->vqs[j].virtq_id);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
MLX5_SET(rqtc, rqtc, rqt_actual_size, i);
|
||||
MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
|
||||
err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
|
||||
kfree(in);
|
||||
if (err)
|
||||
|
@ -1554,9 +1544,11 @@ static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps)
|
|||
return 0;
|
||||
|
||||
clean_added:
|
||||
for (--i; i >= cur_qps; --i)
|
||||
for (--i; i >= 2 * cur_qps; --i)
|
||||
teardown_vq(ndev, &ndev->vqs[i]);
|
||||
|
||||
ndev->cur_num_vqs = 2 * cur_qps;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1581,9 +1573,6 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
|
|||
break;
|
||||
}
|
||||
|
||||
if (newqps & (newqps - 1))
|
||||
break;
|
||||
|
||||
if (!change_num_qps(mvdev, newqps))
|
||||
status = VIRTIO_NET_OK;
|
||||
|
||||
|
@ -1880,21 +1869,29 @@ static u64 mlx_to_vritio_features(u16 dev_features)
|
|||
return result;
|
||||
}
|
||||
|
||||
static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev)
|
||||
static u64 get_supported_features(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
u64 mlx_vdpa_features = 0;
|
||||
u16 dev_features;
|
||||
|
||||
dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mdev, device_features_bits_mask);
|
||||
mlx_vdpa_features |= mlx_to_vritio_features(dev_features);
|
||||
if (MLX5_CAP_DEV_VDPA_EMULATION(mdev, virtio_version_1_0))
|
||||
mlx_vdpa_features |= BIT_ULL(VIRTIO_F_VERSION_1);
|
||||
mlx_vdpa_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
|
||||
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VQ);
|
||||
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR);
|
||||
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MQ);
|
||||
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_STATUS);
|
||||
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MTU);
|
||||
|
||||
return mlx_vdpa_features;
|
||||
}
|
||||
|
||||
static u64 mlx5_vdpa_get_device_features(struct vdpa_device *vdev)
|
||||
{
|
||||
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
||||
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||
u16 dev_features;
|
||||
|
||||
dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask);
|
||||
ndev->mvdev.mlx_features |= mlx_to_vritio_features(dev_features);
|
||||
if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0))
|
||||
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_VERSION_1);
|
||||
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
|
||||
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VQ);
|
||||
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR);
|
||||
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MQ);
|
||||
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_STATUS);
|
||||
|
||||
print_features(mvdev, ndev->mvdev.mlx_features, false);
|
||||
return ndev->mvdev.mlx_features;
|
||||
|
@ -1972,7 +1969,7 @@ static void update_cvq_info(struct mlx5_vdpa_dev *mvdev)
|
|||
}
|
||||
}
|
||||
|
||||
static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
|
||||
static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
|
||||
{
|
||||
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
||||
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||
|
@ -1985,6 +1982,11 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
|
|||
return err;
|
||||
|
||||
ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
|
||||
if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ))
|
||||
ndev->cur_num_vqs = 2 * mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
|
||||
else
|
||||
ndev->cur_num_vqs = 2;
|
||||
|
||||
update_cvq_info(mvdev);
|
||||
return err;
|
||||
}
|
||||
|
@ -2235,7 +2237,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
|
|||
clear_vqs_ready(ndev);
|
||||
mlx5_vdpa_destroy_mr(&ndev->mvdev);
|
||||
ndev->mvdev.status = 0;
|
||||
memset(ndev->event_cbs, 0, sizeof(ndev->event_cbs));
|
||||
ndev->cur_num_vqs = 0;
|
||||
memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1));
|
||||
ndev->mvdev.actual_features = 0;
|
||||
++mvdev->generation;
|
||||
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
|
||||
|
@ -2308,6 +2311,8 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
|
|||
}
|
||||
mlx5_vdpa_free_resources(&ndev->mvdev);
|
||||
mutex_destroy(&ndev->reslock);
|
||||
kfree(ndev->event_cbs);
|
||||
kfree(ndev->vqs);
|
||||
}
|
||||
|
||||
static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device *vdev, u16 idx)
|
||||
|
@ -2339,6 +2344,13 @@ static int mlx5_get_vq_irq(struct vdpa_device *vdv, u16 idx)
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static u64 mlx5_vdpa_get_driver_features(struct vdpa_device *vdev)
|
||||
{
|
||||
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
||||
|
||||
return mvdev->actual_features;
|
||||
}
|
||||
|
||||
static const struct vdpa_config_ops mlx5_vdpa_ops = {
|
||||
.set_vq_address = mlx5_vdpa_set_vq_address,
|
||||
.set_vq_num = mlx5_vdpa_set_vq_num,
|
||||
|
@ -2351,8 +2363,9 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
|
|||
.get_vq_notification = mlx5_get_vq_notification,
|
||||
.get_vq_irq = mlx5_get_vq_irq,
|
||||
.get_vq_align = mlx5_vdpa_get_vq_align,
|
||||
.get_features = mlx5_vdpa_get_features,
|
||||
.set_features = mlx5_vdpa_set_features,
|
||||
.get_device_features = mlx5_vdpa_get_device_features,
|
||||
.set_driver_features = mlx5_vdpa_set_driver_features,
|
||||
.get_driver_features = mlx5_vdpa_get_driver_features,
|
||||
.set_config_cb = mlx5_vdpa_set_config_cb,
|
||||
.get_vq_num_max = mlx5_vdpa_get_vq_num_max,
|
||||
.get_device_id = mlx5_vdpa_get_device_id,
|
||||
|
@ -2545,18 +2558,39 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* we save one virtqueue for control virtqueue should we require it */
|
||||
max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
|
||||
max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
|
||||
if (max_vqs < 2) {
|
||||
dev_warn(mdev->device,
|
||||
"%d virtqueues are supported. At least 2 are required\n",
|
||||
max_vqs);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) {
|
||||
if (add_config->net.max_vq_pairs > max_vqs / 2)
|
||||
return -EINVAL;
|
||||
max_vqs = min_t(u32, max_vqs, 2 * add_config->net.max_vq_pairs);
|
||||
} else {
|
||||
max_vqs = 2;
|
||||
}
|
||||
|
||||
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
|
||||
name, false);
|
||||
if (IS_ERR(ndev))
|
||||
return PTR_ERR(ndev);
|
||||
|
||||
ndev->mvdev.mlx_features = mgtdev->mgtdev.supported_features;
|
||||
ndev->mvdev.max_vqs = max_vqs;
|
||||
mvdev = &ndev->mvdev;
|
||||
mvdev->mdev = mdev;
|
||||
|
||||
ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL);
|
||||
ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL);
|
||||
if (!ndev->vqs || !ndev->event_cbs) {
|
||||
err = -ENOMEM;
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
init_mvqs(ndev);
|
||||
mutex_init(&ndev->reslock);
|
||||
config = &ndev->config;
|
||||
|
@ -2612,9 +2646,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
|
|||
|
||||
ndev->nb.notifier_call = event_handler;
|
||||
mlx5_notifier_register(mdev, &ndev->nb);
|
||||
ndev->cur_num_vqs = 2 * mlx5_vdpa_max_qps(max_vqs);
|
||||
mvdev->vdev.mdev = &mgtdev->mgtdev;
|
||||
err = _vdpa_register_device(&mvdev->vdev, ndev->cur_num_vqs + 1);
|
||||
err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs) + 1);
|
||||
if (err)
|
||||
goto err_reg;
|
||||
|
||||
|
@ -2634,6 +2667,7 @@ err_mpfs:
|
|||
mlx5_mpfs_del_mac(pfmdev, config->mac);
|
||||
err_mtu:
|
||||
mutex_destroy(&ndev->reslock);
|
||||
err_alloc:
|
||||
put_device(&mvdev->vdev.dev);
|
||||
return err;
|
||||
}
|
||||
|
@ -2676,7 +2710,11 @@ static int mlx5v_probe(struct auxiliary_device *adev,
|
|||
mgtdev->mgtdev.ops = &mdev_ops;
|
||||
mgtdev->mgtdev.device = mdev->device;
|
||||
mgtdev->mgtdev.id_table = id_table;
|
||||
mgtdev->mgtdev.config_attr_mask = (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR);
|
||||
mgtdev->mgtdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) |
|
||||
BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
|
||||
mgtdev->mgtdev.max_supported_vqs =
|
||||
MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues) + 1;
|
||||
mgtdev->mgtdev.supported_features = get_supported_features(mdev);
|
||||
mgtdev->madev = madev;
|
||||
|
||||
err = vdpa_mgmtdev_register(&mgtdev->mgtdev);
|
||||
|
|
|
@ -21,6 +21,14 @@ static LIST_HEAD(mdev_head);
|
|||
static DEFINE_MUTEX(vdpa_dev_mutex);
|
||||
static DEFINE_IDA(vdpa_index_ida);
|
||||
|
||||
void vdpa_set_status(struct vdpa_device *vdev, u8 status)
|
||||
{
|
||||
mutex_lock(&vdev->cf_mutex);
|
||||
vdev->config->set_status(vdev, status);
|
||||
mutex_unlock(&vdev->cf_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(vdpa_set_status);
|
||||
|
||||
static struct genl_family vdpa_nl_family;
|
||||
|
||||
static int vdpa_dev_probe(struct device *d)
|
||||
|
@ -52,8 +60,81 @@ static void vdpa_dev_remove(struct device *d)
|
|||
drv->remove(vdev);
|
||||
}
|
||||
|
||||
static int vdpa_dev_match(struct device *dev, struct device_driver *drv)
|
||||
{
|
||||
struct vdpa_device *vdev = dev_to_vdpa(dev);
|
||||
|
||||
/* Check override first, and if set, only use the named driver */
|
||||
if (vdev->driver_override)
|
||||
return strcmp(vdev->driver_override, drv->name) == 0;
|
||||
|
||||
/* Currently devices must be supported by all vDPA bus drivers */
|
||||
return 1;
|
||||
}
|
||||
|
||||
static ssize_t driver_override_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct vdpa_device *vdev = dev_to_vdpa(dev);
|
||||
const char *driver_override, *old;
|
||||
char *cp;
|
||||
|
||||
/* We need to keep extra room for a newline */
|
||||
if (count >= (PAGE_SIZE - 1))
|
||||
return -EINVAL;
|
||||
|
||||
driver_override = kstrndup(buf, count, GFP_KERNEL);
|
||||
if (!driver_override)
|
||||
return -ENOMEM;
|
||||
|
||||
cp = strchr(driver_override, '\n');
|
||||
if (cp)
|
||||
*cp = '\0';
|
||||
|
||||
device_lock(dev);
|
||||
old = vdev->driver_override;
|
||||
if (strlen(driver_override)) {
|
||||
vdev->driver_override = driver_override;
|
||||
} else {
|
||||
kfree(driver_override);
|
||||
vdev->driver_override = NULL;
|
||||
}
|
||||
device_unlock(dev);
|
||||
|
||||
kfree(old);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t driver_override_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct vdpa_device *vdev = dev_to_vdpa(dev);
|
||||
ssize_t len;
|
||||
|
||||
device_lock(dev);
|
||||
len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override);
|
||||
device_unlock(dev);
|
||||
|
||||
return len;
|
||||
}
|
||||
static DEVICE_ATTR_RW(driver_override);
|
||||
|
||||
static struct attribute *vdpa_dev_attrs[] = {
|
||||
&dev_attr_driver_override.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group vdpa_dev_group = {
|
||||
.attrs = vdpa_dev_attrs,
|
||||
};
|
||||
__ATTRIBUTE_GROUPS(vdpa_dev);
|
||||
|
||||
static struct bus_type vdpa_bus = {
|
||||
.name = "vdpa",
|
||||
.dev_groups = vdpa_dev_groups,
|
||||
.match = vdpa_dev_match,
|
||||
.probe = vdpa_dev_probe,
|
||||
.remove = vdpa_dev_remove,
|
||||
};
|
||||
|
@ -68,6 +149,7 @@ static void vdpa_release_dev(struct device *d)
|
|||
|
||||
ida_simple_remove(&vdpa_index_ida, vdev->index);
|
||||
mutex_destroy(&vdev->cf_mutex);
|
||||
kfree(vdev->driver_override);
|
||||
kfree(vdev);
|
||||
}
|
||||
|
||||
|
@ -300,6 +382,21 @@ void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
|
||||
|
||||
static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
|
||||
unsigned int offset,
|
||||
void *buf, unsigned int len)
|
||||
{
|
||||
const struct vdpa_config_ops *ops = vdev->config;
|
||||
|
||||
/*
|
||||
* Config accesses aren't supposed to trigger before features are set.
|
||||
* If it does happen we assume a legacy guest.
|
||||
*/
|
||||
if (!vdev->features_valid)
|
||||
vdpa_set_features(vdev, 0, true);
|
||||
ops->get_config(vdev, offset, buf, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* vdpa_get_config - Get one or more device configuration fields.
|
||||
* @vdev: vdpa device to operate on
|
||||
|
@ -310,16 +407,8 @@ EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
|
|||
void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
|
||||
void *buf, unsigned int len)
|
||||
{
|
||||
const struct vdpa_config_ops *ops = vdev->config;
|
||||
|
||||
mutex_lock(&vdev->cf_mutex);
|
||||
/*
|
||||
* Config accesses aren't supposed to trigger before features are set.
|
||||
* If it does happen we assume a legacy guest.
|
||||
*/
|
||||
if (!vdev->features_valid)
|
||||
vdpa_set_features(vdev, 0);
|
||||
ops->get_config(vdev, offset, buf, len);
|
||||
vdpa_get_config_unlocked(vdev, offset, buf, len);
|
||||
mutex_unlock(&vdev->cf_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vdpa_get_config);
|
||||
|
@ -414,6 +503,16 @@ static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *m
|
|||
err = -EMSGSIZE;
|
||||
goto msg_err;
|
||||
}
|
||||
if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,
|
||||
mdev->max_supported_vqs)) {
|
||||
err = -EMSGSIZE;
|
||||
goto msg_err;
|
||||
}
|
||||
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES,
|
||||
mdev->supported_features, VDPA_ATTR_PAD)) {
|
||||
err = -EMSGSIZE;
|
||||
goto msg_err;
|
||||
}
|
||||
|
||||
genlmsg_end(msg, hdr);
|
||||
return 0;
|
||||
|
@ -480,8 +579,9 @@ out:
|
|||
return msg->len;
|
||||
}
|
||||
|
||||
#define VDPA_DEV_NET_ATTRS_MASK ((1 << VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
|
||||
(1 << VDPA_ATTR_DEV_NET_CFG_MTU))
|
||||
#define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
|
||||
BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU) | \
|
||||
BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
|
||||
|
||||
static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
|
@ -500,12 +600,22 @@ static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *i
|
|||
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
|
||||
macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
|
||||
memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
|
||||
config.mask |= (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR);
|
||||
config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
|
||||
}
|
||||
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
|
||||
config.net.mtu =
|
||||
nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
|
||||
config.mask |= (1 << VDPA_ATTR_DEV_NET_CFG_MTU);
|
||||
config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
|
||||
}
|
||||
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) {
|
||||
config.net.max_vq_pairs =
|
||||
nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]);
|
||||
if (!config.net.max_vq_pairs) {
|
||||
NL_SET_ERR_MSG_MOD(info->extack,
|
||||
"At least one pair of VQs is required");
|
||||
return -EINVAL;
|
||||
}
|
||||
config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
|
||||
}
|
||||
|
||||
/* Skip checking capability if user didn't prefer to configure any
|
||||
|
@ -707,7 +817,7 @@ static int vdpa_dev_net_mq_config_fill(struct vdpa_device *vdev,
|
|||
{
|
||||
u16 val_u16;
|
||||
|
||||
if ((features & (1ULL << VIRTIO_NET_F_MQ)) == 0)
|
||||
if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0)
|
||||
return 0;
|
||||
|
||||
val_u16 = le16_to_cpu(config->max_virtqueue_pairs);
|
||||
|
@ -720,7 +830,7 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms
|
|||
u64 features;
|
||||
u16 val_u16;
|
||||
|
||||
vdpa_get_config(vdev, 0, &config, sizeof(config));
|
||||
vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
|
||||
|
||||
if (nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, sizeof(config.mac),
|
||||
config.mac))
|
||||
|
@ -734,7 +844,10 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms
|
|||
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16))
|
||||
return -EMSGSIZE;
|
||||
|
||||
features = vdev->config->get_features(vdev);
|
||||
features = vdev->config->get_driver_features(vdev);
|
||||
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features,
|
||||
VDPA_ATTR_PAD))
|
||||
return -EMSGSIZE;
|
||||
|
||||
return vdpa_dev_net_mq_config_fill(vdev, msg, features, &config);
|
||||
}
|
||||
|
@ -745,12 +858,23 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
|
|||
{
|
||||
u32 device_id;
|
||||
void *hdr;
|
||||
u8 status;
|
||||
int err;
|
||||
|
||||
mutex_lock(&vdev->cf_mutex);
|
||||
status = vdev->config->get_status(vdev);
|
||||
if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed");
|
||||
err = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
|
||||
VDPA_CMD_DEV_CONFIG_GET);
|
||||
if (!hdr)
|
||||
return -EMSGSIZE;
|
||||
if (!hdr) {
|
||||
err = -EMSGSIZE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
|
||||
err = -EMSGSIZE;
|
||||
|
@ -774,11 +898,14 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
|
|||
if (err)
|
||||
goto msg_err;
|
||||
|
||||
mutex_unlock(&vdev->cf_mutex);
|
||||
genlmsg_end(msg, hdr);
|
||||
return 0;
|
||||
|
||||
msg_err:
|
||||
genlmsg_cancel(msg, hdr);
|
||||
out:
|
||||
mutex_unlock(&vdev->cf_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -399,14 +399,14 @@ static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
|
|||
return VDPASIM_QUEUE_ALIGN;
|
||||
}
|
||||
|
||||
static u64 vdpasim_get_features(struct vdpa_device *vdpa)
|
||||
static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
|
||||
{
|
||||
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
|
||||
|
||||
return vdpasim->dev_attr.supported_features;
|
||||
}
|
||||
|
||||
static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
|
||||
static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
|
||||
{
|
||||
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
|
||||
|
||||
|
@ -419,6 +419,13 @@ static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa)
|
||||
{
|
||||
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
|
||||
|
||||
return vdpasim->features;
|
||||
}
|
||||
|
||||
static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
|
||||
struct vdpa_callback *cb)
|
||||
{
|
||||
|
@ -613,8 +620,9 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
|
|||
.set_vq_state = vdpasim_set_vq_state,
|
||||
.get_vq_state = vdpasim_get_vq_state,
|
||||
.get_vq_align = vdpasim_get_vq_align,
|
||||
.get_features = vdpasim_get_features,
|
||||
.set_features = vdpasim_set_features,
|
||||
.get_device_features = vdpasim_get_device_features,
|
||||
.set_driver_features = vdpasim_set_driver_features,
|
||||
.get_driver_features = vdpasim_get_driver_features,
|
||||
.set_config_cb = vdpasim_set_config_cb,
|
||||
.get_vq_num_max = vdpasim_get_vq_num_max,
|
||||
.get_device_id = vdpasim_get_device_id,
|
||||
|
@ -642,8 +650,9 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
|
|||
.set_vq_state = vdpasim_set_vq_state,
|
||||
.get_vq_state = vdpasim_get_vq_state,
|
||||
.get_vq_align = vdpasim_get_vq_align,
|
||||
.get_features = vdpasim_get_features,
|
||||
.set_features = vdpasim_set_features,
|
||||
.get_device_features = vdpasim_get_device_features,
|
||||
.set_driver_features = vdpasim_set_driver_features,
|
||||
.get_driver_features = vdpasim_get_driver_features,
|
||||
.set_config_cb = vdpasim_set_config_cb,
|
||||
.get_vq_num_max = vdpasim_get_vq_num_max,
|
||||
.get_device_id = vdpasim_get_device_id,
|
||||
|
|
|
@ -191,6 +191,8 @@ static struct vdpa_mgmt_dev mgmt_dev = {
|
|||
.ops = &vdpasim_net_mgmtdev_ops,
|
||||
.config_attr_mask = (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR |
|
||||
1 << VDPA_ATTR_DEV_NET_CFG_MTU),
|
||||
.max_supported_vqs = VDPASIM_NET_VQ_NUM,
|
||||
.supported_features = VDPASIM_NET_FEATURES,
|
||||
};
|
||||
|
||||
static int __init vdpasim_net_init(void)
|
||||
|
|
|
@ -573,14 +573,14 @@ static u32 vduse_vdpa_get_vq_align(struct vdpa_device *vdpa)
|
|||
return dev->vq_align;
|
||||
}
|
||||
|
||||
static u64 vduse_vdpa_get_features(struct vdpa_device *vdpa)
|
||||
static u64 vduse_vdpa_get_device_features(struct vdpa_device *vdpa)
|
||||
{
|
||||
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
|
||||
|
||||
return dev->device_features;
|
||||
}
|
||||
|
||||
static int vduse_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
|
||||
static int vduse_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
|
||||
{
|
||||
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
|
||||
|
||||
|
@ -588,6 +588,13 @@ static int vduse_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u64 vduse_vdpa_get_driver_features(struct vdpa_device *vdpa)
|
||||
{
|
||||
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
|
||||
|
||||
return dev->driver_features;
|
||||
}
|
||||
|
||||
static void vduse_vdpa_set_config_cb(struct vdpa_device *vdpa,
|
||||
struct vdpa_callback *cb)
|
||||
{
|
||||
|
@ -721,8 +728,9 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
|
|||
.set_vq_state = vduse_vdpa_set_vq_state,
|
||||
.get_vq_state = vduse_vdpa_get_vq_state,
|
||||
.get_vq_align = vduse_vdpa_get_vq_align,
|
||||
.get_features = vduse_vdpa_get_features,
|
||||
.set_features = vduse_vdpa_set_features,
|
||||
.get_device_features = vduse_vdpa_get_device_features,
|
||||
.set_driver_features = vduse_vdpa_set_driver_features,
|
||||
.get_driver_features = vduse_vdpa_get_driver_features,
|
||||
.set_config_cb = vduse_vdpa_set_config_cb,
|
||||
.get_vq_num_max = vduse_vdpa_get_vq_num_max,
|
||||
.get_device_id = vduse_vdpa_get_device_id,
|
||||
|
@ -1357,7 +1365,6 @@ err_domain:
|
|||
err_str:
|
||||
vduse_dev_destroy(dev);
|
||||
err:
|
||||
kvfree(config_buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1408,6 +1415,8 @@ static long vduse_ioctl(struct file *file, unsigned int cmd,
|
|||
}
|
||||
config.name[VDUSE_NAME_MAX - 1] = '\0';
|
||||
ret = vduse_create_dev(&config, buf, control->api_version);
|
||||
if (ret)
|
||||
kvfree(buf);
|
||||
break;
|
||||
}
|
||||
case VDUSE_DESTROY_DEV: {
|
||||
|
|
|
@ -53,14 +53,14 @@ static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa)
|
|||
return &vp_vdpa->mdev;
|
||||
}
|
||||
|
||||
static u64 vp_vdpa_get_features(struct vdpa_device *vdpa)
|
||||
static u64 vp_vdpa_get_device_features(struct vdpa_device *vdpa)
|
||||
{
|
||||
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
|
||||
|
||||
return vp_modern_get_features(mdev);
|
||||
}
|
||||
|
||||
static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
|
||||
static int vp_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
|
||||
{
|
||||
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
|
||||
|
||||
|
@ -69,6 +69,13 @@ static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u64 vp_vdpa_get_driver_features(struct vdpa_device *vdpa)
|
||||
{
|
||||
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
|
||||
|
||||
return vp_modern_get_driver_features(mdev);
|
||||
}
|
||||
|
||||
static u8 vp_vdpa_get_status(struct vdpa_device *vdpa)
|
||||
{
|
||||
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
|
||||
|
@ -415,8 +422,9 @@ vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid)
|
|||
}
|
||||
|
||||
static const struct vdpa_config_ops vp_vdpa_ops = {
|
||||
.get_features = vp_vdpa_get_features,
|
||||
.set_features = vp_vdpa_set_features,
|
||||
.get_device_features = vp_vdpa_get_device_features,
|
||||
.set_driver_features = vp_vdpa_set_driver_features,
|
||||
.get_driver_features = vp_vdpa_get_driver_features,
|
||||
.get_status = vp_vdpa_get_status,
|
||||
.set_status = vp_vdpa_set_status,
|
||||
.reset = vp_vdpa_reset,
|
||||
|
|
|
@ -166,6 +166,7 @@ static int vhost_test_release(struct inode *inode, struct file *f)
|
|||
/* We do an extra flush before freeing memory,
|
||||
* since jobs can re-queue themselves. */
|
||||
vhost_test_flush(n);
|
||||
kfree(n->dev.vqs);
|
||||
kfree(n);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -170,7 +170,7 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
|
|||
* Userspace shouldn't remove status bits unless reset the
|
||||
* status to 0.
|
||||
*/
|
||||
if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
|
||||
if (status != 0 && (status_old & ~status) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
|
||||
|
@ -178,11 +178,11 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
|
|||
vhost_vdpa_unsetup_vq_irq(v, i);
|
||||
|
||||
if (status == 0) {
|
||||
ret = ops->reset(vdpa);
|
||||
ret = vdpa_reset(vdpa);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else
|
||||
ops->set_status(vdpa, status);
|
||||
vdpa_set_status(vdpa, status);
|
||||
|
||||
if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
|
||||
for (i = 0; i < nvqs; i++)
|
||||
|
@ -195,7 +195,7 @@ static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
|
|||
struct vhost_vdpa_config *c)
|
||||
{
|
||||
struct vdpa_device *vdpa = v->vdpa;
|
||||
long size = vdpa->config->get_config_size(vdpa);
|
||||
size_t size = vdpa->config->get_config_size(vdpa);
|
||||
|
||||
if (c->len == 0 || c->off > size)
|
||||
return -EINVAL;
|
||||
|
@ -262,7 +262,7 @@ static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
|
|||
const struct vdpa_config_ops *ops = vdpa->config;
|
||||
u64 features;
|
||||
|
||||
features = ops->get_features(vdpa);
|
||||
features = ops->get_device_features(vdpa);
|
||||
|
||||
if (copy_to_user(featurep, &features, sizeof(features)))
|
||||
return -EFAULT;
|
||||
|
@ -286,7 +286,7 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
|
|||
if (copy_from_user(&features, featurep, sizeof(features)))
|
||||
return -EFAULT;
|
||||
|
||||
if (vdpa_set_features(vdpa, features))
|
||||
if (vdpa_set_features(vdpa, features, false))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -204,6 +204,12 @@ int virtio_finalize_features(struct virtio_device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(virtio_finalize_features);
|
||||
|
||||
void virtio_reset_device(struct virtio_device *dev)
|
||||
{
|
||||
dev->config->reset(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtio_reset_device);
|
||||
|
||||
static int virtio_dev_probe(struct device *_d)
|
||||
{
|
||||
int err, i;
|
||||
|
|
|
@ -1056,7 +1056,7 @@ static void remove_common(struct virtio_balloon *vb)
|
|||
return_free_pages_to_mm(vb, ULONG_MAX);
|
||||
|
||||
/* Now we reset the device so we can clean up the queues. */
|
||||
vb->vdev->config->reset(vb->vdev);
|
||||
virtio_reset_device(vb->vdev);
|
||||
|
||||
vb->vdev->config->del_vqs(vb->vdev);
|
||||
}
|
||||
|
|
|
@ -347,7 +347,7 @@ static void virtinput_remove(struct virtio_device *vdev)
|
|||
spin_unlock_irqrestore(&vi->lock, flags);
|
||||
|
||||
input_unregister_device(vi->idev);
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL)
|
||||
kfree(buf);
|
||||
vdev->config->del_vqs(vdev);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/log2.h>
|
||||
|
||||
#include <acpi/acpi_numa.h>
|
||||
|
||||
|
@ -592,7 +593,7 @@ static int virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm)
|
|||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&vm->hotplug_mutex);
|
||||
if (new_bitmap)
|
||||
if (vm->sbm.sb_states)
|
||||
memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE);
|
||||
|
||||
old_bitmap = vm->sbm.sb_states;
|
||||
|
@ -1120,15 +1121,18 @@ static void virtio_mem_clear_fake_offline(unsigned long pfn,
|
|||
*/
|
||||
static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
|
||||
{
|
||||
const unsigned long max_nr_pages = MAX_ORDER_NR_PAGES;
|
||||
unsigned long order = MAX_ORDER - 1;
|
||||
unsigned long i;
|
||||
|
||||
/*
|
||||
* We are always called at least with MAX_ORDER_NR_PAGES
|
||||
* granularity/alignment (e.g., the way subblocks work). All pages
|
||||
* inside such a block are alike.
|
||||
* We might get called for ranges that don't cover properly aligned
|
||||
* MAX_ORDER - 1 pages; however, we can only online properly aligned
|
||||
* pages with an order of MAX_ORDER - 1 at maximum.
|
||||
*/
|
||||
for (i = 0; i < nr_pages; i += max_nr_pages) {
|
||||
while (!IS_ALIGNED(pfn | nr_pages, 1 << order))
|
||||
order--;
|
||||
|
||||
for (i = 0; i < nr_pages; i += 1 << order) {
|
||||
struct page *page = pfn_to_page(pfn + i);
|
||||
|
||||
/*
|
||||
|
@ -1138,14 +1142,12 @@ static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
|
|||
* alike.
|
||||
*/
|
||||
if (PageDirty(page)) {
|
||||
virtio_mem_clear_fake_offline(pfn + i, max_nr_pages,
|
||||
false);
|
||||
generic_online_page(page, MAX_ORDER - 1);
|
||||
virtio_mem_clear_fake_offline(pfn + i, 1 << order, false);
|
||||
generic_online_page(page, order);
|
||||
} else {
|
||||
virtio_mem_clear_fake_offline(pfn + i, max_nr_pages,
|
||||
true);
|
||||
free_contig_range(pfn + i, max_nr_pages);
|
||||
adjust_managed_page_count(page, max_nr_pages);
|
||||
virtio_mem_clear_fake_offline(pfn + i, 1 << order, true);
|
||||
free_contig_range(pfn + i, 1 << order);
|
||||
adjust_managed_page_count(page, 1 << order);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1228,28 +1230,46 @@ static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
|
|||
page_ref_inc(pfn_to_page(pfn + i));
|
||||
}
|
||||
|
||||
static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
|
||||
static void virtio_mem_online_page(struct virtio_mem *vm,
|
||||
struct page *page, unsigned int order)
|
||||
{
|
||||
const unsigned long addr = page_to_phys(page);
|
||||
unsigned long id, sb_id;
|
||||
struct virtio_mem *vm;
|
||||
const unsigned long start = page_to_phys(page);
|
||||
const unsigned long end = start + PFN_PHYS(1 << order);
|
||||
unsigned long addr, next, id, sb_id, count;
|
||||
bool do_online;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
|
||||
if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order)))
|
||||
continue;
|
||||
/*
|
||||
* We can get called with any order up to MAX_ORDER - 1. If our
|
||||
* subblock size is smaller than that and we have a mixture of plugged
|
||||
* and unplugged subblocks within such a page, we have to process in
|
||||
* smaller granularity. In that case we'll adjust the order exactly once
|
||||
* within the loop.
|
||||
*/
|
||||
for (addr = start; addr < end; ) {
|
||||
next = addr + PFN_PHYS(1 << order);
|
||||
|
||||
if (vm->in_sbm) {
|
||||
/*
|
||||
* We exploit here that subblocks have at least
|
||||
* MAX_ORDER_NR_PAGES size/alignment - so we cannot
|
||||
* cross subblocks within one call.
|
||||
*/
|
||||
id = virtio_mem_phys_to_mb_id(addr);
|
||||
sb_id = virtio_mem_phys_to_sb_id(vm, addr);
|
||||
do_online = virtio_mem_sbm_test_sb_plugged(vm, id,
|
||||
sb_id, 1);
|
||||
count = virtio_mem_phys_to_sb_id(vm, next - 1) - sb_id + 1;
|
||||
|
||||
if (virtio_mem_sbm_test_sb_plugged(vm, id, sb_id, count)) {
|
||||
/* Fully plugged. */
|
||||
do_online = true;
|
||||
} else if (count == 1 ||
|
||||
virtio_mem_sbm_test_sb_unplugged(vm, id, sb_id, count)) {
|
||||
/* Fully unplugged. */
|
||||
do_online = false;
|
||||
} else {
|
||||
/*
|
||||
* Mixture, process sub-blocks instead. This
|
||||
* will be at least the size of a pageblock.
|
||||
* We'll run into this case exactly once.
|
||||
*/
|
||||
order = ilog2(vm->sbm.sb_size) - PAGE_SHIFT;
|
||||
do_online = virtio_mem_sbm_test_sb_plugged(vm, id, sb_id, 1);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* If the whole block is marked fake offline, keep
|
||||
|
@ -1260,18 +1280,38 @@ static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
|
|||
VIRTIO_MEM_BBM_BB_FAKE_OFFLINE;
|
||||
}
|
||||
|
||||
if (do_online)
|
||||
generic_online_page(pfn_to_page(PFN_DOWN(addr)), order);
|
||||
else
|
||||
virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
|
||||
false);
|
||||
addr = next;
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
|
||||
{
|
||||
const unsigned long addr = page_to_phys(page);
|
||||
struct virtio_mem *vm;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
|
||||
/*
|
||||
* virtio_mem_set_fake_offline() might sleep, we don't need
|
||||
* the device anymore. See virtio_mem_remove() how races
|
||||
* Pages we're onlining will never cross memory blocks and,
|
||||
* therefore, not virtio-mem devices.
|
||||
*/
|
||||
if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order)))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* virtio_mem_set_fake_offline() might sleep. We can safely
|
||||
* drop the RCU lock at this point because the device
|
||||
* cannot go away. See virtio_mem_remove() how races
|
||||
* between memory onlining and device removal are handled.
|
||||
*/
|
||||
rcu_read_unlock();
|
||||
|
||||
if (do_online)
|
||||
generic_online_page(page, order);
|
||||
else
|
||||
virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
|
||||
false);
|
||||
virtio_mem_online_page(vm, page, order);
|
||||
return;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -2438,8 +2478,6 @@ static int virtio_mem_init_hotplug(struct virtio_mem *vm)
|
|||
/*
|
||||
* We want subblocks to span at least MAX_ORDER_NR_PAGES and
|
||||
* pageblock_nr_pages pages. This:
|
||||
* - Simplifies our page onlining code (virtio_mem_online_page_cb)
|
||||
* and fake page onlining code (virtio_mem_fake_online).
|
||||
* - Is required for now for alloc_contig_range() to work reliably -
|
||||
* it doesn't properly handle smaller granularity on ZONE_NORMAL.
|
||||
*/
|
||||
|
@ -2850,7 +2888,7 @@ static void virtio_mem_remove(struct virtio_device *vdev)
|
|||
virtio_mem_deinit_hotplug(vm);
|
||||
|
||||
/* reset the device and cleanup the queues */
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
vdev->config->del_vqs(vdev);
|
||||
|
||||
kfree(vm);
|
||||
|
|
|
@ -138,7 +138,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
|
|||
q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
|
||||
if (q_pfn >> 32) {
|
||||
dev_err(&vp_dev->pci_dev->dev,
|
||||
"platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
|
||||
"platform bug: legacy virtio-pci must not be used with RAM above 0x%llxGB\n",
|
||||
0x1ULL << (32 + PAGE_SHIFT - 30));
|
||||
err = -E2BIG;
|
||||
goto out_del_vq;
|
||||
|
|
|
@ -45,8 +45,10 @@ int vp_legacy_probe(struct virtio_pci_legacy_device *ldev)
|
|||
return rc;
|
||||
|
||||
ldev->ioaddr = pci_iomap(pci_dev, 0, 0);
|
||||
if (!ldev->ioaddr)
|
||||
if (!ldev->ioaddr) {
|
||||
rc = -EIO;
|
||||
goto err_iomap;
|
||||
}
|
||||
|
||||
ldev->isr = ldev->ioaddr + VIRTIO_PCI_ISR;
|
||||
|
||||
|
|
|
@ -345,7 +345,7 @@ err_map_common:
|
|||
EXPORT_SYMBOL_GPL(vp_modern_probe);
|
||||
|
||||
/*
|
||||
* vp_modern_probe: remove and cleanup the modern virtio pci device
|
||||
* vp_modern_remove: remove and cleanup the modern virtio pci device
|
||||
* @mdev: the modern virtio-pci device
|
||||
*/
|
||||
void vp_modern_remove(struct virtio_pci_modern_device *mdev)
|
||||
|
|
|
@ -1197,8 +1197,10 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
|
|||
if (virtqueue_use_indirect(_vq, total_sg)) {
|
||||
err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
|
||||
in_sgs, data, gfp);
|
||||
if (err != -ENOMEM)
|
||||
if (err != -ENOMEM) {
|
||||
END_USE(vq);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* fall back on direct */
|
||||
}
|
||||
|
|
|
@ -91,9 +91,8 @@ static u8 virtio_vdpa_get_status(struct virtio_device *vdev)
|
|||
static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status)
|
||||
{
|
||||
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
|
||||
const struct vdpa_config_ops *ops = vdpa->config;
|
||||
|
||||
return ops->set_status(vdpa, status);
|
||||
return vdpa_set_status(vdpa, status);
|
||||
}
|
||||
|
||||
static void virtio_vdpa_reset(struct virtio_device *vdev)
|
||||
|
@ -308,7 +307,7 @@ static u64 virtio_vdpa_get_features(struct virtio_device *vdev)
|
|||
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
|
||||
const struct vdpa_config_ops *ops = vdpa->config;
|
||||
|
||||
return ops->get_features(vdpa);
|
||||
return ops->get_device_features(vdpa);
|
||||
}
|
||||
|
||||
static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
|
||||
|
@ -318,7 +317,7 @@ static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
|
|||
/* Give virtio_ring a chance to accept features. */
|
||||
vring_transport_features(vdev);
|
||||
|
||||
return vdpa_set_features(vdpa, vdev->features);
|
||||
return vdpa_set_features(vdpa, vdev->features, false);
|
||||
}
|
||||
|
||||
static const char *virtio_vdpa_bus_name(struct virtio_device *vdev)
|
||||
|
|
|
@ -891,7 +891,7 @@ static int virtio_fs_probe(struct virtio_device *vdev)
|
|||
return 0;
|
||||
|
||||
out_vqs:
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
virtio_fs_cleanup_vqs(vdev, fs);
|
||||
kfree(fs->vqs);
|
||||
|
||||
|
@ -923,7 +923,7 @@ static void virtio_fs_remove(struct virtio_device *vdev)
|
|||
list_del_init(&fs->list);
|
||||
virtio_fs_stop_all_queues(fs);
|
||||
virtio_fs_drain_all_queues_locked(fs);
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
virtio_fs_cleanup_vqs(vdev, fs);
|
||||
|
||||
vdev->priv = NULL;
|
||||
|
|
|
@ -64,6 +64,7 @@ struct vdpa_mgmt_dev;
|
|||
* struct vdpa_device - representation of a vDPA device
|
||||
* @dev: underlying device
|
||||
* @dma_dev: the actual device that is performing DMA
|
||||
* @driver_override: driver name to force a match
|
||||
* @config: the configuration ops for this device.
|
||||
* @cf_mutex: Protects get and set access to configuration layout.
|
||||
* @index: device index
|
||||
|
@ -76,6 +77,7 @@ struct vdpa_mgmt_dev;
|
|||
struct vdpa_device {
|
||||
struct device dev;
|
||||
struct device *dma_dev;
|
||||
const char *driver_override;
|
||||
const struct vdpa_config_ops *config;
|
||||
struct mutex cf_mutex; /* Protects get/set config */
|
||||
unsigned int index;
|
||||
|
@ -99,6 +101,7 @@ struct vdpa_dev_set_config {
|
|||
struct {
|
||||
u8 mac[ETH_ALEN];
|
||||
u16 mtu;
|
||||
u16 max_vq_pairs;
|
||||
} net;
|
||||
u64 mask;
|
||||
};
|
||||
|
@ -155,7 +158,7 @@ struct vdpa_map_file {
|
|||
* @vdev: vdpa device
|
||||
* @idx: virtqueue index
|
||||
* @state: pointer to returned state (last_avail_idx)
|
||||
* @get_vq_notification: Get the notification area for a virtqueue
|
||||
* @get_vq_notification: Get the notification area for a virtqueue (optional)
|
||||
* @vdev: vdpa device
|
||||
* @idx: virtqueue index
|
||||
* Returns the notifcation area
|
||||
|
@ -169,14 +172,17 @@ struct vdpa_map_file {
|
|||
* for the device
|
||||
* @vdev: vdpa device
|
||||
* Returns virtqueue algin requirement
|
||||
* @get_features: Get virtio features supported by the device
|
||||
* @get_device_features: Get virtio features supported by the device
|
||||
* @vdev: vdpa device
|
||||
* Returns the virtio features support by the
|
||||
* device
|
||||
* @set_features: Set virtio features supported by the driver
|
||||
* @set_driver_features: Set virtio features supported by the driver
|
||||
* @vdev: vdpa device
|
||||
* @features: feature support by the driver
|
||||
* Returns integer: success (0) or error (< 0)
|
||||
* @get_driver_features: Get the virtio driver features in action
|
||||
* @vdev: vdpa device
|
||||
* Returns the virtio features accepted
|
||||
* @set_config_cb: Set the config interrupt callback
|
||||
* @vdev: vdpa device
|
||||
* @cb: virtio-vdev interrupt callback structure
|
||||
|
@ -276,8 +282,9 @@ struct vdpa_config_ops {
|
|||
|
||||
/* Device ops */
|
||||
u32 (*get_vq_align)(struct vdpa_device *vdev);
|
||||
u64 (*get_features)(struct vdpa_device *vdev);
|
||||
int (*set_features)(struct vdpa_device *vdev, u64 features);
|
||||
u64 (*get_device_features)(struct vdpa_device *vdev);
|
||||
int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
|
||||
u64 (*get_driver_features)(struct vdpa_device *vdev);
|
||||
void (*set_config_cb)(struct vdpa_device *vdev,
|
||||
struct vdpa_callback *cb);
|
||||
u16 (*get_vq_num_max)(struct vdpa_device *vdev);
|
||||
|
@ -385,23 +392,37 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
|
|||
static inline int vdpa_reset(struct vdpa_device *vdev)
|
||||
{
|
||||
const struct vdpa_config_ops *ops = vdev->config;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&vdev->cf_mutex);
|
||||
vdev->features_valid = false;
|
||||
return ops->reset(vdev);
|
||||
ret = ops->reset(vdev);
|
||||
mutex_unlock(&vdev->cf_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
|
||||
static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features, bool locked)
|
||||
{
|
||||
const struct vdpa_config_ops *ops = vdev->config;
|
||||
int ret;
|
||||
|
||||
if (!locked)
|
||||
mutex_lock(&vdev->cf_mutex);
|
||||
|
||||
vdev->features_valid = true;
|
||||
return ops->set_features(vdev, features);
|
||||
ret = ops->set_driver_features(vdev, features);
|
||||
if (!locked)
|
||||
mutex_unlock(&vdev->cf_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
|
||||
void *buf, unsigned int len);
|
||||
void vdpa_set_config(struct vdpa_device *dev, unsigned int offset,
|
||||
const void *buf, unsigned int length);
|
||||
void vdpa_set_status(struct vdpa_device *vdev, u8 status);
|
||||
|
||||
/**
|
||||
* struct vdpa_mgmtdev_ops - vdpa device ops
|
||||
* @dev_add: Add a vdpa device using alloc and register
|
||||
|
@ -438,6 +459,8 @@ struct vdpa_mgmt_dev {
|
|||
const struct virtio_device_id *id_table;
|
||||
u64 config_attr_mask;
|
||||
struct list_head list;
|
||||
u64 supported_features;
|
||||
u32 max_supported_vqs;
|
||||
};
|
||||
|
||||
int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev);
|
||||
|
|
|
@ -138,6 +138,7 @@ int virtio_finalize_features(struct virtio_device *dev);
|
|||
int virtio_device_freeze(struct virtio_device *dev);
|
||||
int virtio_device_restore(struct virtio_device *dev);
|
||||
#endif
|
||||
void virtio_reset_device(struct virtio_device *dev);
|
||||
|
||||
size_t virtio_max_dma_size(struct virtio_device *vdev);
|
||||
|
||||
|
|
|
@ -23,6 +23,9 @@ enum vdpa_command {
|
|||
enum vdpa_attr {
|
||||
VDPA_ATTR_UNSPEC,
|
||||
|
||||
/* Pad attribute for 64b alignment */
|
||||
VDPA_ATTR_PAD = VDPA_ATTR_UNSPEC,
|
||||
|
||||
/* bus name (optional) + dev name together make the parent device handle */
|
||||
VDPA_ATTR_MGMTDEV_BUS_NAME, /* string */
|
||||
VDPA_ATTR_MGMTDEV_DEV_NAME, /* string */
|
||||
|
@ -40,6 +43,9 @@ enum vdpa_attr {
|
|||
VDPA_ATTR_DEV_NET_CFG_MAX_VQP, /* u16 */
|
||||
VDPA_ATTR_DEV_NET_CFG_MTU, /* u16 */
|
||||
|
||||
VDPA_ATTR_DEV_NEGOTIATED_FEATURES, /* u64 */
|
||||
VDPA_ATTR_DEV_MGMTDEV_MAX_VQS, /* u32 */
|
||||
VDPA_ATTR_DEV_SUPPORTED_FEATURES, /* u64 */
|
||||
/* new attributes must be added above here */
|
||||
VDPA_ATTR_MAX,
|
||||
};
|
||||
|
|
|
@ -721,7 +721,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
|
|||
|
||||
mutex_unlock(&virtio_9p_lock);
|
||||
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
vdev->config->del_vqs(vdev);
|
||||
|
||||
sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
|
||||
|
|
|
@ -665,7 +665,7 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
|
|||
vsock_for_each_connected_socket(virtio_vsock_reset_sock);
|
||||
|
||||
/* Stop all work handlers to make sure no one is accessing the device,
|
||||
* so we can safely call vdev->config->reset().
|
||||
* so we can safely call virtio_reset_device().
|
||||
*/
|
||||
mutex_lock(&vsock->rx_lock);
|
||||
vsock->rx_run = false;
|
||||
|
@ -682,7 +682,7 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
|
|||
/* Flush all device writes and interrupts, device will not use any
|
||||
* more buffers.
|
||||
*/
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
|
||||
mutex_lock(&vsock->rx_lock);
|
||||
while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
|
||||
|
|
|
@ -350,7 +350,7 @@ static void virtsnd_remove(struct virtio_device *vdev)
|
|||
snd_card_free(snd->card);
|
||||
|
||||
vdev->config->del_vqs(vdev);
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
|
||||
for (i = 0; snd->substreams && i < snd->nsubstreams; ++i) {
|
||||
struct virtio_pcm_substream *vss = &snd->substreams[i];
|
||||
|
@ -379,7 +379,7 @@ static int virtsnd_freeze(struct virtio_device *vdev)
|
|||
virtsnd_ctl_msg_cancel_all(snd);
|
||||
|
||||
vdev->config->del_vqs(vdev);
|
||||
vdev->config->reset(vdev);
|
||||
virtio_reset_device(vdev);
|
||||
|
||||
for (i = 0; i < snd->nsubstreams; ++i)
|
||||
cancel_work_sync(&snd->substreams[i].elapsed_period);
|
||||
|
|
Loading…
Reference in New Issue