xen: branch for v5.6-rc5

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRTLbB6QfY48x44uB6AXGG7T9hjvgUCXmNp4AAKCRCAXGG7T9hj
 vmPeAP42nekgUNbUzEuei1/v4bJoepxIg22UXTVnjWwx9JVQKgEA+fgswmyy4NN2
 Ab7ty2zw1s3Vwhoq909lWNIJdz/+1wI=
 =C3CJ
 -----END PGP SIGNATURE-----

Merge tag 'for-linus-5.6b-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen fixes from Juergen Gross:
 "Four fixes and a small cleanup patch:

   - two fixes by Dongli Zhang fixing races in the xenbus driver

   - two fixes by me fixing issues introduced in 5.6

   - a small cleanup by Gustavo Silva replacing a zero-length array with
     a flexible-array"

* tag 'for-linus-5.6b-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/blkfront: fix ring info addressing
  xen/xenbus: fix locking
  xenbus: req->err should be updated before req->state
  xenbus: req->body should be updated before req->state
  xen: Replace zero-length array with flexible-array member
This commit is contained in:
Linus Torvalds 2020-03-07 08:04:54 -06:00
commit cbee7c8b44
8 changed files with 64 additions and 51 deletions

View File

@ -213,6 +213,7 @@ struct blkfront_info
struct blk_mq_tag_set tag_set;
struct blkfront_ring_info *rinfo;
unsigned int nr_rings;
unsigned int rinfo_size;
/* Save uncomplete reqs and bios for migration. */
struct list_head requests;
struct bio_list bio_list;
@ -259,6 +260,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
static void blkfront_gather_backend_features(struct blkfront_info *info);
static int negotiate_mq(struct blkfront_info *info);
#define for_each_rinfo(info, ptr, idx) \
for ((ptr) = (info)->rinfo, (idx) = 0; \
(idx) < (info)->nr_rings; \
(idx)++, (ptr) = (void *)(ptr) + (info)->rinfo_size)
static inline struct blkfront_ring_info *
get_rinfo(const struct blkfront_info *info, unsigned int i)
{
BUG_ON(i >= info->nr_rings);
return (void *)info->rinfo + i * info->rinfo_size;
}
static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
{
unsigned long free = rinfo->shadow_free;
@ -883,8 +896,7 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
struct blkfront_info *info = hctx->queue->queuedata;
struct blkfront_ring_info *rinfo = NULL;
BUG_ON(info->nr_rings <= qid);
rinfo = &info->rinfo[qid];
rinfo = get_rinfo(info, qid);
blk_mq_start_request(qd->rq);
spin_lock_irqsave(&rinfo->ring_lock, flags);
if (RING_FULL(&rinfo->ring))
@ -1181,6 +1193,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
static void xlvbd_release_gendisk(struct blkfront_info *info)
{
unsigned int minor, nr_minors, i;
struct blkfront_ring_info *rinfo;
if (info->rq == NULL)
return;
@ -1188,9 +1201,7 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
/* No more blkif_request(). */
blk_mq_stop_hw_queues(info->rq);
for (i = 0; i < info->nr_rings; i++) {
struct blkfront_ring_info *rinfo = &info->rinfo[i];
for_each_rinfo(info, rinfo, i) {
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&rinfo->callback);
@ -1339,6 +1350,7 @@ free_shadow:
static void blkif_free(struct blkfront_info *info, int suspend)
{
unsigned int i;
struct blkfront_ring_info *rinfo;
/* Prevent new requests being issued until we fix things up. */
info->connected = suspend ?
@ -1347,8 +1359,8 @@ static void blkif_free(struct blkfront_info *info, int suspend)
if (info->rq)
blk_mq_stop_hw_queues(info->rq);
for (i = 0; i < info->nr_rings; i++)
blkif_free_ring(&info->rinfo[i]);
for_each_rinfo(info, rinfo, i)
blkif_free_ring(rinfo);
kvfree(info->rinfo);
info->rinfo = NULL;
@ -1775,6 +1787,7 @@ static int talk_to_blkback(struct xenbus_device *dev,
int err;
unsigned int i, max_page_order;
unsigned int ring_page_order;
struct blkfront_ring_info *rinfo;
if (!info)
return -ENODEV;
@ -1788,9 +1801,7 @@ static int talk_to_blkback(struct xenbus_device *dev,
if (err)
goto destroy_blkring;
for (i = 0; i < info->nr_rings; i++) {
struct blkfront_ring_info *rinfo = &info->rinfo[i];
for_each_rinfo(info, rinfo, i) {
/* Create shared ring, alloc event channel. */
err = setup_blkring(dev, rinfo);
if (err)
@ -1815,7 +1826,7 @@ again:
/* We already got the number of queues/rings in _probe */
if (info->nr_rings == 1) {
err = write_per_ring_nodes(xbt, &info->rinfo[0], dev->nodename);
err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename);
if (err)
goto destroy_blkring;
} else {
@ -1837,10 +1848,10 @@ again:
goto abort_transaction;
}
for (i = 0; i < info->nr_rings; i++) {
for_each_rinfo(info, rinfo, i) {
memset(path, 0, pathsize);
snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i);
err = write_per_ring_nodes(xbt, &info->rinfo[i], path);
err = write_per_ring_nodes(xbt, rinfo, path);
if (err) {
kfree(path);
goto destroy_blkring;
@ -1868,9 +1879,8 @@ again:
goto destroy_blkring;
}
for (i = 0; i < info->nr_rings; i++) {
for_each_rinfo(info, rinfo, i) {
unsigned int j;
struct blkfront_ring_info *rinfo = &info->rinfo[i];
for (j = 0; j < BLK_RING_SIZE(info); j++)
rinfo->shadow[j].req.u.rw.id = j + 1;
@ -1900,6 +1910,7 @@ static int negotiate_mq(struct blkfront_info *info)
{
unsigned int backend_max_queues;
unsigned int i;
struct blkfront_ring_info *rinfo;
BUG_ON(info->nr_rings);
@ -1911,20 +1922,16 @@ static int negotiate_mq(struct blkfront_info *info)
if (!info->nr_rings)
info->nr_rings = 1;
info->rinfo = kvcalloc(info->nr_rings,
struct_size(info->rinfo, shadow,
BLK_RING_SIZE(info)),
GFP_KERNEL);
info->rinfo_size = struct_size(info->rinfo, shadow,
BLK_RING_SIZE(info));
info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL);
if (!info->rinfo) {
xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
info->nr_rings = 0;
return -ENOMEM;
}
for (i = 0; i < info->nr_rings; i++) {
struct blkfront_ring_info *rinfo;
rinfo = &info->rinfo[i];
for_each_rinfo(info, rinfo, i) {
INIT_LIST_HEAD(&rinfo->indirect_pages);
INIT_LIST_HEAD(&rinfo->grants);
rinfo->dev_info = info;
@ -2017,6 +2024,7 @@ static int blkif_recover(struct blkfront_info *info)
int rc;
struct bio *bio;
unsigned int segs;
struct blkfront_ring_info *rinfo;
blkfront_gather_backend_features(info);
/* Reset limits changed by blk_mq_update_nr_hw_queues(). */
@ -2024,9 +2032,7 @@ static int blkif_recover(struct blkfront_info *info)
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
for (r_index = 0; r_index < info->nr_rings; r_index++) {
struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
for_each_rinfo(info, rinfo, r_index) {
rc = blkfront_setup_indirect(rinfo);
if (rc)
return rc;
@ -2036,10 +2042,7 @@ static int blkif_recover(struct blkfront_info *info)
/* Now safe for us to use the shared ring */
info->connected = BLKIF_STATE_CONNECTED;
for (r_index = 0; r_index < info->nr_rings; r_index++) {
struct blkfront_ring_info *rinfo;
rinfo = &info->rinfo[r_index];
for_each_rinfo(info, rinfo, r_index) {
/* Kick any other new requests queued since we resumed */
kick_pending_request_queues(rinfo);
}
@ -2072,13 +2075,13 @@ static int blkfront_resume(struct xenbus_device *dev)
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
int err = 0;
unsigned int i, j;
struct blkfront_ring_info *rinfo;
dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
bio_list_init(&info->bio_list);
INIT_LIST_HEAD(&info->requests);
for (i = 0; i < info->nr_rings; i++) {
struct blkfront_ring_info *rinfo = &info->rinfo[i];
for_each_rinfo(info, rinfo, i) {
struct bio_list merge_bio;
struct blk_shadow *shadow = rinfo->shadow;
@ -2337,6 +2340,7 @@ static void blkfront_connect(struct blkfront_info *info)
unsigned int binfo;
char *envp[] = { "RESIZE=1", NULL };
int err, i;
struct blkfront_ring_info *rinfo;
switch (info->connected) {
case BLKIF_STATE_CONNECTED:
@ -2394,8 +2398,8 @@ static void blkfront_connect(struct blkfront_info *info)
"physical-sector-size",
sector_size);
blkfront_gather_backend_features(info);
for (i = 0; i < info->nr_rings; i++) {
err = blkfront_setup_indirect(&info->rinfo[i]);
for_each_rinfo(info, rinfo, i) {
err = blkfront_setup_indirect(rinfo);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
info->xbdev->otherend);
@ -2416,8 +2420,8 @@ static void blkfront_connect(struct blkfront_info *info)
/* Kick pending requests. */
info->connected = BLKIF_STATE_CONNECTED;
for (i = 0; i < info->nr_rings; i++)
kick_pending_request_queues(&info->rinfo[i]);
for_each_rinfo(info, rinfo, i)
kick_pending_request_queues(rinfo);
device_add_disk(&info->xbdev->dev, info->gd, NULL);
@ -2652,9 +2656,9 @@ static void purge_persistent_grants(struct blkfront_info *info)
{
unsigned int i;
unsigned long flags;
struct blkfront_ring_info *rinfo;
for (i = 0; i < info->nr_rings; i++) {
struct blkfront_ring_info *rinfo = &info->rinfo[i];
for_each_rinfo(info, rinfo, i) {
struct grant *gnt_list_entry, *tmp;
spin_lock_irqsave(&rinfo->ring_lock, flags);

View File

@ -52,7 +52,7 @@ struct xen_pcibk_dev_data {
unsigned int ack_intr:1; /* .. and ACK-ing */
unsigned long handled;
unsigned int irq; /* Saved in case device transitions to MSI/MSI-X */
char irq_name[0]; /* xen-pcibk[000:04:00.0] */
char irq_name[]; /* xen-pcibk[000:04:00.0] */
};
/* Used by XenBus and xen_pcibk_ops.c */

View File

@ -313,6 +313,8 @@ static int process_msg(void)
req->msg.type = state.msg.type;
req->msg.len = state.msg.len;
req->body = state.body;
/* write body, then update state */
virt_wmb();
req->state = xb_req_state_got_reply;
req->cb(req);
} else
@ -395,6 +397,8 @@ static int process_writes(void)
if (state.req->state == xb_req_state_aborted)
kfree(state.req);
else {
/* write err, then update state */
virt_wmb();
state.req->state = xb_req_state_got_reply;
wake_up(&state.req->wq);
}

View File

@ -239,9 +239,9 @@ int xenbus_dev_probe(struct device *_dev)
goto fail;
}
spin_lock(&dev->reclaim_lock);
down(&dev->reclaim_sem);
err = drv->probe(dev, id);
spin_unlock(&dev->reclaim_lock);
up(&dev->reclaim_sem);
if (err)
goto fail_put;
@ -271,9 +271,9 @@ int xenbus_dev_remove(struct device *_dev)
free_otherend_watch(dev);
if (drv->remove) {
spin_lock(&dev->reclaim_lock);
down(&dev->reclaim_sem);
drv->remove(dev);
spin_unlock(&dev->reclaim_lock);
up(&dev->reclaim_sem);
}
module_put(drv->driver.owner);
@ -473,7 +473,7 @@ int xenbus_probe_node(struct xen_bus_type *bus,
goto fail;
dev_set_name(&xendev->dev, "%s", devname);
spin_lock_init(&xendev->reclaim_lock);
sema_init(&xendev->reclaim_sem, 1);
/* Register with generic device framework. */
err = device_register(&xendev->dev);

View File

@ -45,6 +45,7 @@
#include <linux/mm.h>
#include <linux/notifier.h>
#include <linux/export.h>
#include <linux/semaphore.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@ -257,10 +258,10 @@ static int backend_reclaim_memory(struct device *dev, void *data)
drv = to_xenbus_driver(dev->driver);
if (drv && drv->reclaim_memory) {
xdev = to_xenbus_device(dev);
if (!spin_trylock(&xdev->reclaim_lock))
if (down_trylock(&xdev->reclaim_sem))
return 0;
drv->reclaim_memory(xdev);
spin_unlock(&xdev->reclaim_lock);
up(&xdev->reclaim_sem);
}
return 0;
}

View File

@ -191,8 +191,11 @@ static bool xenbus_ok(void)
static bool test_reply(struct xb_req_data *req)
{
if (req->state == xb_req_state_got_reply || !xenbus_ok())
if (req->state == xb_req_state_got_reply || !xenbus_ok()) {
/* read req->state before all other fields */
virt_rmb();
return true;
}
/* Make sure to reread req->state each time. */
barrier();
@ -202,7 +205,7 @@ static bool test_reply(struct xb_req_data *req)
static void *read_reply(struct xb_req_data *req)
{
while (req->state != xb_req_state_got_reply) {
do {
wait_event(req->wq, test_reply(req));
if (!xenbus_ok())
@ -216,7 +219,7 @@ static void *read_reply(struct xb_req_data *req)
if (req->err)
return ERR_PTR(req->err);
}
} while (req->state != xb_req_state_got_reply);
return req->body;
}

View File

@ -46,7 +46,7 @@ struct vtpm_shared_page {
uint8_t pad;
uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */
uint32_t extra_pages[0]; /* grant IDs; length in nr_extra_pages */
uint32_t extra_pages[]; /* grant IDs; length in nr_extra_pages */
};
#endif

View File

@ -42,6 +42,7 @@
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/semaphore.h>
#include <xen/interface/xen.h>
#include <xen/interface/grant_table.h>
#include <xen/interface/io/xenbus.h>
@ -76,7 +77,7 @@ struct xenbus_device {
enum xenbus_state state;
struct completion down;
struct work_struct work;
spinlock_t reclaim_lock;
struct semaphore reclaim_sem;
};
static inline struct xenbus_device *to_xenbus_device(struct device *dev)