2020-03-26 22:01:21 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _LINUX_VDPA_H
|
|
|
|
#define _LINUX_VDPA_H
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/vhost_iotlb.h>
|
2021-10-27 01:55:15 +08:00
|
|
|
#include <linux/virtio_net.h>
|
|
|
|
#include <linux/if_ether.h>
|
2020-03-26 22:01:21 +08:00
|
|
|
|
|
|
|
/**
|
2021-04-07 01:04:44 +08:00
|
|
|
* struct vdpa_calllback - vDPA callback definition.
|
2020-03-26 22:01:21 +08:00
|
|
|
* @callback: interrupt callback function
|
|
|
|
* @private: the data passed to the callback function
|
|
|
|
*/
|
|
|
|
struct vdpa_callback {
|
|
|
|
irqreturn_t (*callback)(void *data);
|
|
|
|
void *private;
|
|
|
|
};
|
|
|
|
|
2020-05-29 16:03:00 +08:00
|
|
|
/**
|
2021-04-07 01:04:44 +08:00
|
|
|
* struct vdpa_notification_area - vDPA notification area
|
2020-05-29 16:03:00 +08:00
|
|
|
* @addr: base address of the notification area
|
|
|
|
* @size: size of the notification area
|
|
|
|
*/
|
|
|
|
struct vdpa_notification_area {
|
|
|
|
resource_size_t addr;
|
|
|
|
resource_size_t size;
|
|
|
|
};
|
|
|
|
|
2020-08-05 00:20:43 +08:00
|
|
|
/**
|
2021-06-02 10:15:33 +08:00
|
|
|
* struct vdpa_vq_state_split - vDPA split virtqueue state
|
2020-08-05 00:20:43 +08:00
|
|
|
* @avail_index: available index
|
|
|
|
*/
|
2021-06-02 10:15:33 +08:00
|
|
|
struct vdpa_vq_state_split {
|
2020-08-05 00:20:43 +08:00
|
|
|
u16 avail_index;
|
|
|
|
};
|
|
|
|
|
2021-06-02 10:15:33 +08:00
|
|
|
/**
|
|
|
|
* struct vdpa_vq_state_packed - vDPA packed virtqueue state
|
|
|
|
* @last_avail_counter: last driver ring wrap counter observed by device
|
|
|
|
* @last_avail_idx: device available index
|
|
|
|
* @last_used_counter: device ring wrap counter
|
|
|
|
* @last_used_idx: used index
|
|
|
|
*/
|
|
|
|
struct vdpa_vq_state_packed {
|
2021-08-31 18:36:25 +08:00
|
|
|
u16 last_avail_counter:1;
|
|
|
|
u16 last_avail_idx:15;
|
|
|
|
u16 last_used_counter:1;
|
|
|
|
u16 last_used_idx:15;
|
2021-06-02 10:15:33 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct vdpa_vq_state {
|
2021-08-31 18:36:25 +08:00
|
|
|
union {
|
|
|
|
struct vdpa_vq_state_split split;
|
|
|
|
struct vdpa_vq_state_packed packed;
|
|
|
|
};
|
2021-06-02 10:15:33 +08:00
|
|
|
};
|
|
|
|
|
2021-01-05 18:32:00 +08:00
|
|
|
struct vdpa_mgmt_dev;
|
|
|
|
|
2020-03-26 22:01:21 +08:00
|
|
|
/**
|
2021-04-07 01:04:44 +08:00
|
|
|
* struct vdpa_device - representation of a vDPA device
|
2020-03-26 22:01:21 +08:00
|
|
|
* @dev: underlying device
|
|
|
|
* @dma_dev: the actual device that is performing DMA
|
2022-04-19 19:34:31 +08:00
|
|
|
* @driver_override: driver name to force a match; do not set directly,
|
|
|
|
* because core frees it; use driver_set_override() to
|
|
|
|
* set or clear it.
|
2020-03-26 22:01:21 +08:00
|
|
|
* @config: the configuration ops for this device.
|
2022-05-18 21:38:02 +08:00
|
|
|
* @cf_lock: Protects get and set access to configuration layout.
|
2020-03-26 22:01:21 +08:00
|
|
|
* @index: device index
|
2020-07-27 22:51:55 +08:00
|
|
|
* @features_valid: were features initialized? for legacy guests
|
2022-03-31 02:03:46 +08:00
|
|
|
* @ngroups: the number of virtqueue groups
|
|
|
|
* @nas: the number of address spaces
|
2021-08-31 18:36:31 +08:00
|
|
|
* @use_va: indicate whether virtual address must be used by this device
|
2020-11-12 14:39:59 +08:00
|
|
|
* @nvqs: maximum number of supported virtqueues
|
2021-01-05 18:32:01 +08:00
|
|
|
* @mdev: management device pointer; caller must setup when registering device as part
|
|
|
|
* of dev_add() mgmtdev ops callback before invoking _vdpa_register_device().
|
2020-03-26 22:01:21 +08:00
|
|
|
*/
|
|
|
|
struct vdpa_device {
|
|
|
|
struct device dev;
|
|
|
|
struct device *dma_dev;
|
2021-11-27 00:47:53 +08:00
|
|
|
const char *driver_override;
|
2020-03-26 22:01:21 +08:00
|
|
|
const struct vdpa_config_ops *config;
|
2022-05-18 21:38:02 +08:00
|
|
|
struct rw_semaphore cf_lock; /* Protects get/set config */
|
2020-03-26 22:01:21 +08:00
|
|
|
unsigned int index;
|
2020-07-27 22:51:55 +08:00
|
|
|
bool features_valid;
|
2021-08-31 18:36:31 +08:00
|
|
|
bool use_va;
|
2022-03-15 11:25:52 +08:00
|
|
|
u32 nvqs;
|
2021-01-05 18:32:01 +08:00
|
|
|
struct vdpa_mgmt_dev *mdev;
|
2022-03-31 02:03:45 +08:00
|
|
|
unsigned int ngroups;
|
2022-03-31 02:03:46 +08:00
|
|
|
unsigned int nas;
|
2020-03-26 22:01:21 +08:00
|
|
|
};
|
|
|
|
|
2020-10-23 17:00:41 +08:00
|
|
|
/**
|
2021-04-07 01:04:44 +08:00
|
|
|
* struct vdpa_iova_range - the IOVA range support by the device
|
2020-10-23 17:00:41 +08:00
|
|
|
* @first: start of the IOVA range
|
|
|
|
* @last: end of the IOVA range
|
|
|
|
*/
|
|
|
|
struct vdpa_iova_range {
|
|
|
|
u64 first;
|
|
|
|
u64 last;
|
|
|
|
};
|
|
|
|
|
2021-10-27 01:55:15 +08:00
|
|
|
struct vdpa_dev_set_config {
|
2022-09-27 15:48:08 +08:00
|
|
|
u64 device_features;
|
2021-10-27 01:55:15 +08:00
|
|
|
struct {
|
|
|
|
u8 mac[ETH_ALEN];
|
|
|
|
u16 mtu;
|
2022-01-05 19:46:37 +08:00
|
|
|
u16 max_vq_pairs;
|
2021-10-27 01:55:15 +08:00
|
|
|
} net;
|
|
|
|
u64 mask;
|
|
|
|
};
|
|
|
|
|
2021-08-31 18:36:31 +08:00
|
|
|
/**
|
|
|
|
* Corresponding file area for device memory mapping
|
|
|
|
* @file: vma->vm_file for the mapping
|
|
|
|
* @offset: mapping offset in the vm_file
|
|
|
|
*/
|
|
|
|
struct vdpa_map_file {
|
|
|
|
struct file *file;
|
|
|
|
u64 offset;
|
|
|
|
};
|
|
|
|
|
2020-03-26 22:01:21 +08:00
|
|
|
/**
|
2021-04-07 01:04:44 +08:00
|
|
|
* struct vdpa_config_ops - operations for configuring a vDPA device.
|
2020-03-26 22:01:21 +08:00
|
|
|
* Note: vDPA device drivers are required to implement all of the
|
|
|
|
* operations unless it is mentioned to be optional in the following
|
|
|
|
* list.
|
|
|
|
*
|
|
|
|
* @set_vq_address: Set the address of virtqueue
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* @idx: virtqueue index
|
|
|
|
* @desc_area: address of desc area
|
|
|
|
* @driver_area: address of driver area
|
|
|
|
* @device_area: address of device area
|
|
|
|
* Returns integer: success (0) or error (< 0)
|
|
|
|
* @set_vq_num: Set the size of virtqueue
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* @idx: virtqueue index
|
|
|
|
* @num: the size of virtqueue
|
|
|
|
* @kick_vq: Kick the virtqueue
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* @idx: virtqueue index
|
|
|
|
* @set_vq_cb: Set the interrupt callback function for
|
|
|
|
* a virtqueue
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* @idx: virtqueue index
|
|
|
|
* @cb: virtio-vdev interrupt callback structure
|
|
|
|
* @set_vq_ready: Set ready status for a virtqueue
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* @idx: virtqueue index
|
|
|
|
* @ready: ready (true) not ready(false)
|
|
|
|
* @get_vq_ready: Get ready status for a virtqueue
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* @idx: virtqueue index
|
|
|
|
* Returns boolean: ready (true) or not (false)
|
|
|
|
* @set_vq_state: Set the state for a virtqueue
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* @idx: virtqueue index
|
2020-08-05 00:20:43 +08:00
|
|
|
* @state: pointer to set virtqueue state (last_avail_idx)
|
2020-03-26 22:01:21 +08:00
|
|
|
* Returns integer: success (0) or error (< 0)
|
|
|
|
* @get_vq_state: Get the state for a virtqueue
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* @idx: virtqueue index
|
2020-08-05 00:20:43 +08:00
|
|
|
* @state: pointer to returned state (last_avail_idx)
|
2021-11-05 03:52:48 +08:00
|
|
|
* @get_vq_notification: Get the notification area for a virtqueue (optional)
|
2020-05-29 16:03:00 +08:00
|
|
|
* @vdev: vdpa device
|
|
|
|
* @idx: virtqueue index
|
|
|
|
* Returns the notifcation area
|
2020-07-31 14:55:30 +08:00
|
|
|
* @get_vq_irq: Get the irq number of a virtqueue (optional,
|
|
|
|
* but must implemented if require vq irq offloading)
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* @idx: virtqueue index
|
|
|
|
* Returns int: irq number of a virtqueue,
|
|
|
|
* negative number if no irq assigned.
|
2020-03-26 22:01:21 +08:00
|
|
|
* @get_vq_align: Get the virtqueue align requirement
|
|
|
|
* for the device
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* Returns virtqueue algin requirement
|
2022-06-09 12:19:01 +08:00
|
|
|
* @get_vq_group: Get the group id for a specific
|
|
|
|
* virtqueue (optional)
|
2022-03-31 02:03:45 +08:00
|
|
|
* @vdev: vdpa device
|
|
|
|
* @idx: virtqueue index
|
|
|
|
* Returns u32: group id for this virtqueue
|
2022-01-05 19:46:33 +08:00
|
|
|
* @get_device_features: Get virtio features supported by the device
|
2020-03-26 22:01:21 +08:00
|
|
|
* @vdev: vdpa device
|
|
|
|
* Returns the virtio features support by the
|
|
|
|
* device
|
2022-01-05 19:46:33 +08:00
|
|
|
* @set_driver_features: Set virtio features supported by the driver
|
2020-03-26 22:01:21 +08:00
|
|
|
* @vdev: vdpa device
|
|
|
|
* @features: feature support by the driver
|
|
|
|
* Returns integer: success (0) or error (< 0)
|
2022-01-05 19:46:33 +08:00
|
|
|
* @get_driver_features: Get the virtio driver features in action
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* Returns the virtio features accepted
|
2020-03-26 22:01:21 +08:00
|
|
|
* @set_config_cb: Set the config interrupt callback
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* @cb: virtio-vdev interrupt callback structure
|
|
|
|
* @get_vq_num_max: Get the max size of virtqueue
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* Returns u16: max size of virtqueue
|
2021-10-29 17:14:45 +08:00
|
|
|
* @get_vq_num_min: Get the min size of virtqueue (optional)
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* Returns u16: min size of virtqueue
|
2020-03-26 22:01:21 +08:00
|
|
|
* @get_device_id: Get virtio device id
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* Returns u32: virtio device id
|
|
|
|
* @get_vendor_id: Get id for the vendor that provides this device
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* Returns u32: virtio vendor id
|
|
|
|
* @get_status: Get the device status
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* Returns u8: virtio device status
|
|
|
|
* @set_status: Set the device status
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* @status: virtio device status
|
2021-08-31 18:36:26 +08:00
|
|
|
* @reset: Reset device
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* Returns integer: success (0) or error (< 0)
|
2022-08-11 01:15:09 +08:00
|
|
|
* @suspend: Suspend or resume the device (optional)
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* Returns integer: success (0) or error (< 0)
|
2022-03-15 11:25:51 +08:00
|
|
|
* @get_config_size: Get the size of the configuration space includes
|
|
|
|
* fields that are conditional on feature bits.
|
2021-03-16 00:34:44 +08:00
|
|
|
* @vdev: vdpa device
|
|
|
|
* Returns size_t: configuration size
|
2020-03-26 22:01:21 +08:00
|
|
|
* @get_config: Read from device specific configuration space
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* @offset: offset from the beginning of
|
|
|
|
* configuration space
|
|
|
|
* @buf: buffer used to read to
|
|
|
|
* @len: the length to read from
|
|
|
|
* configuration space
|
|
|
|
* @set_config: Write to device specific configuration space
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* @offset: offset from the beginning of
|
|
|
|
* configuration space
|
|
|
|
* @buf: buffer used to write from
|
|
|
|
* @len: the length to write to
|
|
|
|
* configuration space
|
|
|
|
* @get_generation: Get device config generation (optional)
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* Returns u32: device generation
|
2020-10-23 17:00:41 +08:00
|
|
|
* @get_iova_range: Get supported iova range (optional)
|
|
|
|
* @vdev: vdpa device
|
|
|
|
* Returns the iova range supported by
|
|
|
|
* the device.
|
2022-03-31 02:03:47 +08:00
|
|
|
* @set_group_asid: Set address space identifier for a
|
2022-06-09 12:19:01 +08:00
|
|
|
* virtqueue group (optional)
|
2022-03-31 02:03:47 +08:00
|
|
|
* @vdev: vdpa device
|
|
|
|
* @group: virtqueue group
|
|
|
|
* @asid: address space id for this group
|
|
|
|
* Returns integer: success (0) or error (< 0)
|
2020-03-26 22:01:21 +08:00
|
|
|
* @set_map: Set device memory mapping (optional)
|
|
|
|
* Needed for device that using device
|
|
|
|
* specific DMA translation (on-chip IOMMU)
|
|
|
|
* @vdev: vdpa device
|
2022-03-31 02:03:46 +08:00
|
|
|
* @asid: address space identifier
|
2020-03-26 22:01:21 +08:00
|
|
|
* @iotlb: vhost memory mapping to be
|
|
|
|
* used by the vDPA
|
|
|
|
* Returns integer: success (0) or error (< 0)
|
|
|
|
* @dma_map: Map an area of PA to IOVA (optional)
|
|
|
|
* Needed for device that using device
|
|
|
|
* specific DMA translation (on-chip IOMMU)
|
|
|
|
* and preferring incremental map.
|
|
|
|
* @vdev: vdpa device
|
2022-03-31 02:03:46 +08:00
|
|
|
* @asid: address space identifier
|
2020-03-26 22:01:21 +08:00
|
|
|
* @iova: iova to be mapped
|
|
|
|
* @size: size of the area
|
|
|
|
* @pa: physical address for the map
|
|
|
|
* @perm: device access permission (VHOST_MAP_XX)
|
|
|
|
* Returns integer: success (0) or error (< 0)
|
|
|
|
* @dma_unmap: Unmap an area of IOVA (optional but
|
|
|
|
* must be implemented with dma_map)
|
|
|
|
* Needed for device that using device
|
|
|
|
* specific DMA translation (on-chip IOMMU)
|
|
|
|
* and preferring incremental unmap.
|
|
|
|
* @vdev: vdpa device
|
2022-03-31 02:03:46 +08:00
|
|
|
* @asid: address space identifier
|
2020-03-26 22:01:21 +08:00
|
|
|
* @iova: iova to be unmapped
|
|
|
|
* @size: size of the area
|
|
|
|
* Returns integer: success (0) or error (< 0)
|
|
|
|
* @free: Free resources that belongs to vDPA (optional)
|
|
|
|
* @vdev: vdpa device
|
|
|
|
*/
|
|
|
|
struct vdpa_config_ops {
|
|
|
|
/* Virtqueue ops */
|
|
|
|
int (*set_vq_address)(struct vdpa_device *vdev,
|
|
|
|
u16 idx, u64 desc_area, u64 driver_area,
|
|
|
|
u64 device_area);
|
|
|
|
void (*set_vq_num)(struct vdpa_device *vdev, u16 idx, u32 num);
|
|
|
|
void (*kick_vq)(struct vdpa_device *vdev, u16 idx);
|
|
|
|
void (*set_vq_cb)(struct vdpa_device *vdev, u16 idx,
|
|
|
|
struct vdpa_callback *cb);
|
|
|
|
void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready);
|
|
|
|
bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx);
|
2020-08-05 00:20:43 +08:00
|
|
|
int (*set_vq_state)(struct vdpa_device *vdev, u16 idx,
|
|
|
|
const struct vdpa_vq_state *state);
|
2020-08-05 00:20:44 +08:00
|
|
|
int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
|
|
|
|
struct vdpa_vq_state *state);
|
vdpa: Add support for querying vendor statistics
Allows to read vendor statistics of a vdpa device. The specific
statistics data are received from the upstream driver in the form of an
(attribute name, attribute value) pairs.
An example of statistics for mlx5_vdpa device are:
received_desc - number of descriptors received by the virtqueue
completed_desc - number of descriptors completed by the virtqueue
A descriptor using indirect buffers is still counted as 1. In addition,
N chained descriptors are counted correctly N times as one would expect.
A new callback was added to vdpa_config_ops which provides the means for
the vdpa driver to return statistics results.
The interface allows for reading all the supported virtqueues, including
the control virtqueue if it exists.
Below are some examples taken from mlx5_vdpa which are introduced in the
following patch:
1. Read statistics for the virtqueue at index 1
$ vdpa dev vstats show vdpa-a qidx 1
vdpa-a:
queue_type tx queue_index 1 received_desc 3844836 completed_desc 3844836
2. Read statistics for the virtqueue at index 32
$ vdpa dev vstats show vdpa-a qidx 32
vdpa-a:
queue_type control_vq queue_index 32 received_desc 62 completed_desc 62
3. Read statisitics for the virtqueue at index 0 with json output
$ vdpa -j dev vstats show vdpa-a qidx 0
{"vstats":{"vdpa-a":{
"queue_type":"rx","queue_index":0,"name":"received_desc","value":417776,\
"name":"completed_desc","value":417548}}}
4. Read statistics for the virtqueue at index 0 with preety json output
$ vdpa -jp dev vstats show vdpa-a qidx 0
{
"vstats": {
"vdpa-a": {
"queue_type": "rx",
"queue_index": 0,
"name": "received_desc",
"value": 417776,
"name": "completed_desc",
"value": 417548
}
}
}
Signed-off-by: Eli Cohen <elic@nvidia.com>
Message-Id: <20220518133804.1075129-3-elic@nvidia.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-05-18 21:38:00 +08:00
|
|
|
int (*get_vendor_vq_stats)(struct vdpa_device *vdev, u16 idx,
|
|
|
|
struct sk_buff *msg,
|
|
|
|
struct netlink_ext_ack *extack);
|
2020-05-29 16:03:00 +08:00
|
|
|
struct vdpa_notification_area
|
|
|
|
(*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
|
2020-08-04 18:21:23 +08:00
|
|
|
/* vq irq is not expected to be changed once DRIVER_OK is set */
|
2021-10-29 17:14:43 +08:00
|
|
|
int (*get_vq_irq)(struct vdpa_device *vdev, u16 idx);
|
2020-03-26 22:01:21 +08:00
|
|
|
|
|
|
|
/* Device ops */
|
2020-04-10 04:26:21 +08:00
|
|
|
u32 (*get_vq_align)(struct vdpa_device *vdev);
|
2022-03-31 02:03:45 +08:00
|
|
|
u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx);
|
2022-01-05 19:46:33 +08:00
|
|
|
u64 (*get_device_features)(struct vdpa_device *vdev);
|
|
|
|
int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
|
|
|
|
u64 (*get_driver_features)(struct vdpa_device *vdev);
|
2020-03-26 22:01:21 +08:00
|
|
|
void (*set_config_cb)(struct vdpa_device *vdev,
|
|
|
|
struct vdpa_callback *cb);
|
|
|
|
u16 (*get_vq_num_max)(struct vdpa_device *vdev);
|
2021-10-29 17:14:45 +08:00
|
|
|
u16 (*get_vq_num_min)(struct vdpa_device *vdev);
|
2020-03-26 22:01:21 +08:00
|
|
|
u32 (*get_device_id)(struct vdpa_device *vdev);
|
|
|
|
u32 (*get_vendor_id)(struct vdpa_device *vdev);
|
|
|
|
u8 (*get_status)(struct vdpa_device *vdev);
|
|
|
|
void (*set_status)(struct vdpa_device *vdev, u8 status);
|
2021-08-31 18:36:26 +08:00
|
|
|
int (*reset)(struct vdpa_device *vdev);
|
2022-08-11 01:15:09 +08:00
|
|
|
int (*suspend)(struct vdpa_device *vdev);
|
2021-03-16 00:34:44 +08:00
|
|
|
size_t (*get_config_size)(struct vdpa_device *vdev);
|
2020-03-26 22:01:21 +08:00
|
|
|
void (*get_config)(struct vdpa_device *vdev, unsigned int offset,
|
|
|
|
void *buf, unsigned int len);
|
|
|
|
void (*set_config)(struct vdpa_device *vdev, unsigned int offset,
|
|
|
|
const void *buf, unsigned int len);
|
|
|
|
u32 (*get_generation)(struct vdpa_device *vdev);
|
2020-10-23 17:00:41 +08:00
|
|
|
struct vdpa_iova_range (*get_iova_range)(struct vdpa_device *vdev);
|
2020-03-26 22:01:21 +08:00
|
|
|
|
|
|
|
/* DMA ops */
|
2022-03-31 02:03:46 +08:00
|
|
|
int (*set_map)(struct vdpa_device *vdev, unsigned int asid,
|
|
|
|
struct vhost_iotlb *iotlb);
|
|
|
|
int (*dma_map)(struct vdpa_device *vdev, unsigned int asid,
|
|
|
|
u64 iova, u64 size, u64 pa, u32 perm, void *opaque);
|
|
|
|
int (*dma_unmap)(struct vdpa_device *vdev, unsigned int asid,
|
|
|
|
u64 iova, u64 size);
|
2022-03-31 02:03:47 +08:00
|
|
|
int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group,
|
|
|
|
unsigned int asid);
|
2020-03-26 22:01:21 +08:00
|
|
|
|
|
|
|
/* Free device resources */
|
|
|
|
void (*free)(struct vdpa_device *vdev);
|
|
|
|
};
|
|
|
|
|
|
|
|
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
|
|
|
|
const struct vdpa_config_ops *config,
|
2022-03-31 02:03:46 +08:00
|
|
|
unsigned int ngroups, unsigned int nas,
|
2021-08-31 18:36:31 +08:00
|
|
|
size_t size, const char *name,
|
|
|
|
bool use_va);
|
2020-03-26 22:01:21 +08:00
|
|
|
|
2021-07-15 16:00:26 +08:00
|
|
|
/**
|
|
|
|
* vdpa_alloc_device - allocate and initilaize a vDPA device
|
|
|
|
*
|
|
|
|
* @dev_struct: the type of the parent structure
|
|
|
|
* @member: the name of struct vdpa_device within the @dev_struct
|
|
|
|
* @parent: the parent device
|
|
|
|
* @config: the bus operations that is supported by this device
|
2022-03-31 02:03:45 +08:00
|
|
|
* @ngroups: the number of virtqueue groups supported by this device
|
2022-03-31 02:03:46 +08:00
|
|
|
* @nas: the number of address spaces
|
2021-07-15 16:00:26 +08:00
|
|
|
* @name: name of the vdpa device
|
2021-08-31 18:36:31 +08:00
|
|
|
* @use_va: indicate whether virtual address must be used by this device
|
2021-07-15 16:00:26 +08:00
|
|
|
*
|
|
|
|
* Return allocated data structure or ERR_PTR upon error
|
|
|
|
*/
|
2022-03-31 02:03:46 +08:00
|
|
|
#define vdpa_alloc_device(dev_struct, member, parent, config, ngroups, nas, \
|
|
|
|
name, use_va) \
|
2022-03-31 02:03:45 +08:00
|
|
|
container_of((__vdpa_alloc_device( \
|
2022-03-31 02:03:46 +08:00
|
|
|
parent, config, ngroups, nas, \
|
|
|
|
(sizeof(dev_struct) + \
|
2020-03-26 22:01:21 +08:00
|
|
|
BUILD_BUG_ON_ZERO(offsetof( \
|
2022-03-31 02:03:46 +08:00
|
|
|
dev_struct, member))), name, use_va)), \
|
2020-03-26 22:01:21 +08:00
|
|
|
dev_struct, member)
|
|
|
|
|
2022-03-15 11:25:52 +08:00
|
|
|
int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
|
2020-03-26 22:01:21 +08:00
|
|
|
void vdpa_unregister_device(struct vdpa_device *vdev);
|
|
|
|
|
2022-03-15 11:25:52 +08:00
|
|
|
int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
|
2021-01-05 18:32:01 +08:00
|
|
|
void _vdpa_unregister_device(struct vdpa_device *vdev);
|
|
|
|
|
2020-03-26 22:01:21 +08:00
|
|
|
/**
|
2021-04-07 01:04:44 +08:00
|
|
|
* struct vdpa_driver - operations for a vDPA driver
|
2020-03-26 22:01:21 +08:00
|
|
|
* @driver: underlying device driver
|
|
|
|
* @probe: the function to call when a device is found. Returns 0 or -errno.
|
|
|
|
* @remove: the function to call when a device is removed.
|
|
|
|
*/
|
|
|
|
struct vdpa_driver {
|
|
|
|
struct device_driver driver;
|
|
|
|
int (*probe)(struct vdpa_device *vdev);
|
|
|
|
void (*remove)(struct vdpa_device *vdev);
|
|
|
|
};
|
|
|
|
|
|
|
|
#define vdpa_register_driver(drv) \
|
|
|
|
__vdpa_register_driver(drv, THIS_MODULE)
|
|
|
|
int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner);
|
|
|
|
void vdpa_unregister_driver(struct vdpa_driver *drv);
|
|
|
|
|
|
|
|
#define module_vdpa_driver(__vdpa_driver) \
|
|
|
|
module_driver(__vdpa_driver, vdpa_register_driver, \
|
|
|
|
vdpa_unregister_driver)
|
|
|
|
|
|
|
|
static inline struct vdpa_driver *drv_to_vdpa(struct device_driver *driver)
|
|
|
|
{
|
|
|
|
return container_of(driver, struct vdpa_driver, driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct vdpa_device *dev_to_vdpa(struct device *_dev)
|
|
|
|
{
|
|
|
|
return container_of(_dev, struct vdpa_device, dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *vdpa_get_drvdata(const struct vdpa_device *vdev)
|
|
|
|
{
|
|
|
|
return dev_get_drvdata(&vdev->dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vdpa_set_drvdata(struct vdpa_device *vdev, void *data)
|
|
|
|
{
|
|
|
|
dev_set_drvdata(&vdev->dev, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
|
|
|
|
{
|
|
|
|
return vdev->dma_dev;
|
|
|
|
}
|
2020-07-27 22:51:55 +08:00
|
|
|
|
2021-08-31 18:36:26 +08:00
|
|
|
static inline int vdpa_reset(struct vdpa_device *vdev)
|
2020-07-27 22:51:55 +08:00
|
|
|
{
|
2021-08-31 18:36:25 +08:00
|
|
|
const struct vdpa_config_ops *ops = vdev->config;
|
2022-01-05 19:46:37 +08:00
|
|
|
int ret;
|
2020-07-27 22:51:55 +08:00
|
|
|
|
2022-05-18 21:38:02 +08:00
|
|
|
down_write(&vdev->cf_lock);
|
2020-07-27 22:51:55 +08:00
|
|
|
vdev->features_valid = false;
|
2022-01-05 19:46:37 +08:00
|
|
|
ret = ops->reset(vdev);
|
2022-05-18 21:38:02 +08:00
|
|
|
up_write(&vdev->cf_lock);
|
2022-01-05 19:46:37 +08:00
|
|
|
return ret;
|
2020-07-27 22:51:55 +08:00
|
|
|
}
|
|
|
|
|
2022-01-15 08:27:59 +08:00
|
|
|
static inline int vdpa_set_features_unlocked(struct vdpa_device *vdev, u64 features)
|
2020-07-27 22:51:55 +08:00
|
|
|
{
|
2021-08-31 18:36:25 +08:00
|
|
|
const struct vdpa_config_ops *ops = vdev->config;
|
2022-01-05 19:46:37 +08:00
|
|
|
int ret;
|
|
|
|
|
2020-07-27 22:51:55 +08:00
|
|
|
vdev->features_valid = true;
|
2022-01-05 19:46:37 +08:00
|
|
|
ret = ops->set_driver_features(vdev, features);
|
2022-01-15 08:27:59 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2022-05-18 21:38:02 +08:00
|
|
|
down_write(&vdev->cf_lock);
|
2022-01-15 08:27:59 +08:00
|
|
|
ret = vdpa_set_features_unlocked(vdev, features);
|
2022-05-18 21:38:02 +08:00
|
|
|
up_write(&vdev->cf_lock);
|
2022-01-05 19:46:37 +08:00
|
|
|
|
|
|
|
return ret;
|
2020-07-27 22:51:55 +08:00
|
|
|
}
|
|
|
|
|
2021-10-27 01:55:12 +08:00
|
|
|
void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
|
|
|
|
void *buf, unsigned int len);
|
|
|
|
void vdpa_set_config(struct vdpa_device *dev, unsigned int offset,
|
|
|
|
const void *buf, unsigned int length);
|
2022-01-05 19:46:35 +08:00
|
|
|
void vdpa_set_status(struct vdpa_device *vdev, u8 status);
|
|
|
|
|
2021-01-05 18:32:00 +08:00
|
|
|
/**
|
2021-04-07 01:04:44 +08:00
|
|
|
* struct vdpa_mgmtdev_ops - vdpa device ops
|
|
|
|
* @dev_add: Add a vdpa device using alloc and register
|
|
|
|
* @mdev: parent device to use for device addition
|
|
|
|
* @name: name of the new vdpa device
|
2021-10-27 01:55:15 +08:00
|
|
|
* @config: config attributes to apply to the device under creation
|
2021-04-07 01:04:44 +08:00
|
|
|
* Driver need to add a new device using _vdpa_register_device()
|
|
|
|
* after fully initializing the vdpa device. Driver must return 0
|
|
|
|
* on success or appropriate error code.
|
|
|
|
* @dev_del: Remove a vdpa device using unregister
|
|
|
|
* @mdev: parent device to use for device removal
|
|
|
|
* @dev: vdpa device to remove
|
|
|
|
* Driver need to remove the specified device by calling
|
|
|
|
* _vdpa_unregister_device().
|
2021-01-05 18:32:00 +08:00
|
|
|
*/
|
|
|
|
struct vdpa_mgmtdev_ops {
|
2021-10-27 01:55:15 +08:00
|
|
|
int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name,
|
|
|
|
const struct vdpa_dev_set_config *config);
|
2021-01-05 18:32:00 +08:00
|
|
|
void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
|
|
|
|
};
|
|
|
|
|
2021-10-27 01:55:14 +08:00
|
|
|
/**
|
|
|
|
* struct vdpa_mgmt_dev - vdpa management device
|
|
|
|
* @device: Management parent device
|
|
|
|
* @ops: operations supported by management device
|
|
|
|
* @id_table: Pointer to device id table of supported ids
|
2021-10-27 01:55:15 +08:00
|
|
|
* @config_attr_mask: bit mask of attributes of type enum vdpa_attr that
|
|
|
|
* management device support during dev_add callback
|
2021-10-27 01:55:14 +08:00
|
|
|
* @list: list entry
|
|
|
|
*/
|
2021-01-05 18:32:00 +08:00
|
|
|
struct vdpa_mgmt_dev {
|
|
|
|
struct device *device;
|
|
|
|
const struct vdpa_mgmtdev_ops *ops;
|
2022-04-29 17:10:30 +08:00
|
|
|
struct virtio_device_id *id_table;
|
2021-10-27 01:55:15 +08:00
|
|
|
u64 config_attr_mask;
|
2021-01-05 18:32:00 +08:00
|
|
|
struct list_head list;
|
vdpa: Support reporting max device capabilities
Add max_supported_vqs and supported_features fields to struct
vdpa_mgmt_dev. Upstream drivers need to feel these values according to
the device capabilities.
These values are reported back in a netlink message when showing management
devices.
Examples:
$ auxiliary/mlx5_core.sf.1:
supported_classes net
max_supported_vqs 257
dev_features CSUM GUEST_CSUM MTU HOST_TSO4 HOST_TSO6 STATUS CTRL_VQ MQ \
CTRL_MAC_ADDR VERSION_1 ACCESS_PLATFORM
$ vdpa -j mgmtdev show
{"mgmtdev":{"auxiliary/mlx5_core.sf.1":{"supported_classes":["net"], \
"max_supported_vqs":257,"dev_features":["CSUM","GUEST_CSUM","MTU", \
"HOST_TSO4","HOST_TSO6","STATUS","CTRL_VQ","MQ","CTRL_MAC_ADDR", \
"VERSION_1","ACCESS_PLATFORM"]}}}
$ vdpa -jp mgmtdev show
{
"mgmtdev": {
"auxiliary/mlx5_core.sf.1": {
"supported_classes": [ "net" ],
"max_supported_vqs": 257,
"dev_features": ["CSUM","GUEST_CSUM","MTU","HOST_TSO4", \
"HOST_TSO6","STATUS","CTRL_VQ","MQ", \
"CTRL_MAC_ADDR","VERSION_1","ACCESS_PLATFORM"]
}
}
}
Signed-off-by: Eli Cohen <elic@nvidia.com>
Link: https://lore.kernel.org/r/20220105114646.577224-11-elic@nvidia.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Si-Wei Liu<si-wei.liu@oracle.com>
2022-01-05 19:46:42 +08:00
|
|
|
u64 supported_features;
|
|
|
|
u32 max_supported_vqs;
|
2021-01-05 18:32:00 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev);
|
|
|
|
void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev);
|
|
|
|
|
2020-03-26 22:01:21 +08:00
|
|
|
#endif /* _LINUX_VDPA_H */
|