um: virtio: Implement VHOST_USER_PROTOCOL_F_REPLY_ACK

Implement the VHOST_USER_PROTOCOL_F_REPLY_ACK extension for both
slave requests (previous patch) where we have to reply and our
own requests where it helps understand if the slave failed.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
This commit is contained in:
Johannes Berg 2019-09-11 14:51:22 +02:00 committed by Richard Weinberger
parent 2cd097ba8c
commit 27eca5c474
2 changed files with 84 additions and 18 deletions

View File

@ -6,9 +6,11 @@
/* Message flags */ /* Message flags */
#define VHOST_USER_FLAG_REPLY BIT(2) #define VHOST_USER_FLAG_REPLY BIT(2)
#define VHOST_USER_FLAG_NEED_REPLY BIT(3)
/* Feature bits */ /* Feature bits */
#define VHOST_USER_F_PROTOCOL_FEATURES 30 #define VHOST_USER_F_PROTOCOL_FEATURES 30
/* Protocol feature bits */ /* Protocol feature bits */
#define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
#define VHOST_USER_PROTOCOL_F_SLAVE_REQ 5 #define VHOST_USER_PROTOCOL_F_SLAVE_REQ 5
#define VHOST_USER_PROTOCOL_F_CONFIG 9 #define VHOST_USER_PROTOCOL_F_CONFIG 9
/* Vring state index masks */ /* Vring state index masks */
@ -20,7 +22,8 @@
/* Supported transport features */ /* Supported transport features */
#define VHOST_USER_SUPPORTED_F BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES) #define VHOST_USER_SUPPORTED_F BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)
/* Supported protocol features */ /* Supported protocol features */
#define VHOST_USER_SUPPORTED_PROTOCOL_F (BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \ #define VHOST_USER_SUPPORTED_PROTOCOL_F (BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK) | \
BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)) BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG))
enum vhost_user_request { enum vhost_user_request {

View File

@ -160,30 +160,64 @@ static int vhost_user_recv_req(struct virtio_uml_device *vu_dev,
if (rc) if (rc)
return rc; return rc;
if (msg->header.flags != VHOST_USER_VERSION) if ((msg->header.flags & ~VHOST_USER_FLAG_NEED_REPLY) !=
VHOST_USER_VERSION)
return -EPROTO; return -EPROTO;
return 0; return 0;
} }
static int vhost_user_send(struct virtio_uml_device *vu_dev, static int vhost_user_send(struct virtio_uml_device *vu_dev,
struct vhost_user_msg *msg, bool need_response, struct vhost_user_msg *msg,
int *fds, size_t num_fds) int *fds, size_t num_fds)
{ {
size_t size = sizeof(msg->header) + msg->header.size; size_t size = sizeof(msg->header) + msg->header.size;
bool request_ack;
int rc;
msg->header.flags |= VHOST_USER_VERSION; msg->header.flags |= VHOST_USER_VERSION;
return full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds);
/*
* The need_response flag indicates that we already need a response,
* e.g. to read the features. In these cases, don't request an ACK as
* it is meaningless. Also request an ACK only if supported.
*/
request_ack = !need_response;
if (!(vu_dev->protocol_features &
BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK)))
request_ack = false;
if (request_ack)
msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY;
rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds);
if (rc < 0)
return rc;
if (request_ack) {
uint64_t status;
rc = vhost_user_recv_u64(vu_dev, &status);
if (rc)
return rc;
if (status) {
vu_err(vu_dev, "slave reports error: %llu\n", status);
return -EIO;
}
}
return 0;
} }
static int vhost_user_send_no_payload(struct virtio_uml_device *vu_dev, static int vhost_user_send_no_payload(struct virtio_uml_device *vu_dev,
u32 request) bool need_response, u32 request)
{ {
struct vhost_user_msg msg = { struct vhost_user_msg msg = {
.header.request = request, .header.request = request,
}; };
return vhost_user_send(vu_dev, &msg, NULL, 0); return vhost_user_send(vu_dev, need_response, &msg, NULL, 0);
} }
static int vhost_user_send_no_payload_fd(struct virtio_uml_device *vu_dev, static int vhost_user_send_no_payload_fd(struct virtio_uml_device *vu_dev,
@ -193,7 +227,7 @@ static int vhost_user_send_no_payload_fd(struct virtio_uml_device *vu_dev,
.header.request = request, .header.request = request,
}; };
return vhost_user_send(vu_dev, &msg, &fd, 1); return vhost_user_send(vu_dev, false, &msg, &fd, 1);
} }
static int vhost_user_send_u64(struct virtio_uml_device *vu_dev, static int vhost_user_send_u64(struct virtio_uml_device *vu_dev,
@ -205,18 +239,19 @@ static int vhost_user_send_u64(struct virtio_uml_device *vu_dev,
.payload.integer = value, .payload.integer = value,
}; };
return vhost_user_send(vu_dev, &msg, NULL, 0); return vhost_user_send(vu_dev, false, &msg, NULL, 0);
} }
static int vhost_user_set_owner(struct virtio_uml_device *vu_dev) static int vhost_user_set_owner(struct virtio_uml_device *vu_dev)
{ {
return vhost_user_send_no_payload(vu_dev, VHOST_USER_SET_OWNER); return vhost_user_send_no_payload(vu_dev, false, VHOST_USER_SET_OWNER);
} }
static int vhost_user_get_features(struct virtio_uml_device *vu_dev, static int vhost_user_get_features(struct virtio_uml_device *vu_dev,
u64 *features) u64 *features)
{ {
int rc = vhost_user_send_no_payload(vu_dev, VHOST_USER_GET_FEATURES); int rc = vhost_user_send_no_payload(vu_dev, true,
VHOST_USER_GET_FEATURES);
if (rc) if (rc)
return rc; return rc;
@ -232,7 +267,7 @@ static int vhost_user_set_features(struct virtio_uml_device *vu_dev,
static int vhost_user_get_protocol_features(struct virtio_uml_device *vu_dev, static int vhost_user_get_protocol_features(struct virtio_uml_device *vu_dev,
u64 *protocol_features) u64 *protocol_features)
{ {
int rc = vhost_user_send_no_payload(vu_dev, int rc = vhost_user_send_no_payload(vu_dev, true,
VHOST_USER_GET_PROTOCOL_FEATURES); VHOST_USER_GET_PROTOCOL_FEATURES);
if (rc) if (rc)
@ -247,9 +282,32 @@ static int vhost_user_set_protocol_features(struct virtio_uml_device *vu_dev,
protocol_features); protocol_features);
} }
static void vhost_user_reply(struct virtio_uml_device *vu_dev,
struct vhost_user_msg *msg, int response)
{
struct vhost_user_msg reply = {
.payload.integer = response,
};
size_t size = sizeof(reply.header) + sizeof(reply.payload.integer);
int rc;
reply.header = msg->header;
reply.header.flags &= ~VHOST_USER_FLAG_NEED_REPLY;
reply.header.flags |= VHOST_USER_FLAG_REPLY;
reply.header.size = sizeof(reply.payload.integer);
rc = full_sendmsg_fds(vu_dev->req_fd, &reply, size, NULL, 0);
if (rc)
vu_err(vu_dev,
"sending reply to slave request failed: %d (size %zu)\n",
rc, size);
}
static irqreturn_t vu_req_interrupt(int irq, void *data) static irqreturn_t vu_req_interrupt(int irq, void *data)
{ {
struct virtio_uml_device *vu_dev = data; struct virtio_uml_device *vu_dev = data;
int response = 1;
struct { struct {
struct vhost_user_msg msg; struct vhost_user_msg msg;
u8 extra_payload[512]; u8 extra_payload[512];
@ -266,6 +324,7 @@ static irqreturn_t vu_req_interrupt(int irq, void *data)
switch (msg.msg.header.request) { switch (msg.msg.header.request) {
case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG: case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG:
virtio_config_changed(&vu_dev->vdev); virtio_config_changed(&vu_dev->vdev);
response = 0;
break; break;
case VHOST_USER_SLAVE_IOTLB_MSG: case VHOST_USER_SLAVE_IOTLB_MSG:
/* not supported - VIRTIO_F_IOMMU_PLATFORM */ /* not supported - VIRTIO_F_IOMMU_PLATFORM */
@ -276,6 +335,9 @@ static irqreturn_t vu_req_interrupt(int irq, void *data)
msg.msg.header.request); msg.msg.header.request);
} }
if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY)
vhost_user_reply(vu_dev, &msg.msg, response);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -365,7 +427,7 @@ static void vhost_user_get_config(struct virtio_uml_device *vu_dev,
msg->payload.config.offset = 0; msg->payload.config.offset = 0;
msg->payload.config.size = cfg_size; msg->payload.config.size = cfg_size;
rc = vhost_user_send(vu_dev, msg, NULL, 0); rc = vhost_user_send(vu_dev, true, msg, NULL, 0);
if (rc) { if (rc) {
vu_err(vu_dev, "sending VHOST_USER_GET_CONFIG failed: %d\n", vu_err(vu_dev, "sending VHOST_USER_GET_CONFIG failed: %d\n",
rc); rc);
@ -416,7 +478,7 @@ static void vhost_user_set_config(struct virtio_uml_device *vu_dev,
msg->payload.config.size = len; msg->payload.config.size = len;
memcpy(msg->payload.config.payload, buf, len); memcpy(msg->payload.config.payload, buf, len);
rc = vhost_user_send(vu_dev, msg, NULL, 0); rc = vhost_user_send(vu_dev, false, msg, NULL, 0);
if (rc) if (rc)
vu_err(vu_dev, "sending VHOST_USER_SET_CONFIG failed: %d\n", vu_err(vu_dev, "sending VHOST_USER_SET_CONFIG failed: %d\n",
rc); rc);
@ -506,7 +568,8 @@ static int vhost_user_set_mem_table(struct virtio_uml_device *vu_dev)
return rc; return rc;
} }
return vhost_user_send(vu_dev, &msg, fds, msg.payload.mem_regions.num); return vhost_user_send(vu_dev, false, &msg, fds,
msg.payload.mem_regions.num);
} }
static int vhost_user_set_vring_state(struct virtio_uml_device *vu_dev, static int vhost_user_set_vring_state(struct virtio_uml_device *vu_dev,
@ -519,7 +582,7 @@ static int vhost_user_set_vring_state(struct virtio_uml_device *vu_dev,
.payload.vring_state.num = num, .payload.vring_state.num = num,
}; };
return vhost_user_send(vu_dev, &msg, NULL, 0); return vhost_user_send(vu_dev, false, &msg, NULL, 0);
} }
static int vhost_user_set_vring_num(struct virtio_uml_device *vu_dev, static int vhost_user_set_vring_num(struct virtio_uml_device *vu_dev,
@ -550,7 +613,7 @@ static int vhost_user_set_vring_addr(struct virtio_uml_device *vu_dev,
.payload.vring_addr.log = log, .payload.vring_addr.log = log,
}; };
return vhost_user_send(vu_dev, &msg, NULL, 0); return vhost_user_send(vu_dev, false, &msg, NULL, 0);
} }
static int vhost_user_set_vring_fd(struct virtio_uml_device *vu_dev, static int vhost_user_set_vring_fd(struct virtio_uml_device *vu_dev,
@ -566,9 +629,9 @@ static int vhost_user_set_vring_fd(struct virtio_uml_device *vu_dev,
return -EINVAL; return -EINVAL;
if (fd < 0) { if (fd < 0) {
msg.payload.integer |= VHOST_USER_VRING_POLL_MASK; msg.payload.integer |= VHOST_USER_VRING_POLL_MASK;
return vhost_user_send(vu_dev, &msg, NULL, 0); return vhost_user_send(vu_dev, false, &msg, NULL, 0);
} }
return vhost_user_send(vu_dev, &msg, &fd, 1); return vhost_user_send(vu_dev, false, &msg, &fd, 1);
} }
static int vhost_user_set_vring_call(struct virtio_uml_device *vu_dev, static int vhost_user_set_vring_call(struct virtio_uml_device *vu_dev,