Xen bug fixes for 4.5-rc5
- Two scsiback fixes (resource leak and spurious warning). - Fix DMA mapping of compound pages on arm/arm64. - Fix some pciback regressions in MSI-X handling. - Fix a pcifront crash due to some uninitialize state. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJWyvatAAoJEFxbo/MsZsTRBFcH+wWnv0/N+gKib3cKCI4lwmTg n8iVgf8dNWwD36M2s/OlzCAglAIt8Xr6ySNvPqTerpm7lT9yXlIVQxGXTbIGuTAA h8Kt8WiC0BNLHHlLxBuCz62KR47DvMhsr84lFURE8FmpUiulFjXmRcbrZkHIMYRS l/X+xJWO1vxwrSYho0P9n3ksTWHm488DTPvZz3ICNI2G2sndDfbT3gv3tMDaQhcX ZaQR93vtIoldqk29Ga59vaVtksbgxHZIbasY9PQ8rqOxHJpDQbPzpjocoLxAzf50 cioQVyKQ7i9vUvZ+B3TTAOhxisA2hDwNhLGQzmjgxe2TXeKdo3yjYwO6m1dDBzY= =VY/S -----END PGP SIGNATURE----- Merge tag 'for-linus-4.5-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip Pull xen bug fixes from David Vrabel: - Two scsiback fixes (resource leak and spurious warning). - Fix DMA mapping of compound pages on arm/arm64. - Fix some pciback regressions in MSI-X handling. - Fix a pcifront crash due to some uninitialize state. * tag 'for-linus-4.5-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/pcifront: Fix mysterious crashes when NUMA locality information was extracted. xen/pcifront: Report the errors better. xen/pciback: Save the number of MSI-X entries to be copied later. xen/pciback: Check PF instead of VF for PCI_COMMAND_MEMORY xen: fix potential integer overflow in queue_reply xen/arm: correctly handle DMA mapping of compound pages xen/scsiback: avoid warnings when adding multiple LUNs to a domain xen/scsiback: correct frontend counting
This commit is contained in:
commit
692b8c663c
|
@ -35,14 +35,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
|
|||
dma_addr_t dev_addr, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir, struct dma_attrs *attrs)
|
||||
{
|
||||
bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page);
|
||||
unsigned long page_pfn = page_to_xen_pfn(page);
|
||||
unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
|
||||
unsigned long compound_pages =
|
||||
(1<<compound_order(page)) * XEN_PFN_PER_PAGE;
|
||||
bool local = (page_pfn <= dev_pfn) &&
|
||||
(dev_pfn - page_pfn < compound_pages);
|
||||
|
||||
/*
|
||||
* Dom0 is mapped 1:1, while the Linux page can be spanned accross
|
||||
* multiple Xen page, it's not possible to have a mix of local and
|
||||
* foreign Xen page. So if the first xen_pfn == mfn the page is local
|
||||
* otherwise it's a foreign page grant-mapped in dom0. If the page is
|
||||
* local we can safely call the native dma_ops function, otherwise we
|
||||
* call the xen specific function.
|
||||
* Dom0 is mapped 1:1, while the Linux page can span across
|
||||
* multiple Xen pages, it's not possible for it to contain a
|
||||
* mix of local and foreign Xen pages. So if the first xen_pfn
|
||||
* == mfn the page is local otherwise it's a foreign page
|
||||
* grant-mapped in dom0. If the page is local we can safely
|
||||
* call the native dma_ops function, otherwise we call the xen
|
||||
* specific function.
|
||||
*/
|
||||
if (local)
|
||||
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
|
||||
|
|
|
@ -57,7 +57,7 @@ static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev,
|
|||
{
|
||||
if (xen_pci_frontend && xen_pci_frontend->enable_msi)
|
||||
return xen_pci_frontend->enable_msi(dev, vectors);
|
||||
return -ENODEV;
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev)
|
||||
{
|
||||
|
@ -69,7 +69,7 @@ static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev,
|
|||
{
|
||||
if (xen_pci_frontend && xen_pci_frontend->enable_msix)
|
||||
return xen_pci_frontend->enable_msix(dev, vectors, nvec);
|
||||
return -ENODEV;
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline void xen_pci_frontend_disable_msix(struct pci_dev *dev)
|
||||
{
|
||||
|
|
|
@ -196,7 +196,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
|
|||
return 0;
|
||||
|
||||
error:
|
||||
dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
|
||||
if (ret == -ENOSYS)
|
||||
dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
|
||||
else if (ret)
|
||||
dev_err(&dev->dev, "Xen PCI frontend error: %d!\n", ret);
|
||||
free:
|
||||
kfree(v);
|
||||
return ret;
|
||||
|
|
|
@ -53,7 +53,7 @@ struct pcifront_device {
|
|||
};
|
||||
|
||||
struct pcifront_sd {
|
||||
int domain;
|
||||
struct pci_sysdata sd;
|
||||
struct pcifront_device *pdev;
|
||||
};
|
||||
|
||||
|
@ -67,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
|
|||
unsigned int domain, unsigned int bus,
|
||||
struct pcifront_device *pdev)
|
||||
{
|
||||
sd->domain = domain;
|
||||
/* Because we do not expose that information via XenBus. */
|
||||
sd->sd.node = first_online_node;
|
||||
sd->sd.domain = domain;
|
||||
sd->pdev = pdev;
|
||||
}
|
||||
|
||||
|
@ -468,8 +470,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
|
|||
dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
|
||||
domain, bus);
|
||||
|
||||
bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
|
||||
sd = kmalloc(sizeof(*sd), GFP_KERNEL);
|
||||
bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
|
||||
sd = kzalloc(sizeof(*sd), GFP_KERNEL);
|
||||
if (!bus_entry || !sd) {
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
|
|
|
@ -227,8 +227,9 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
|
|||
/*
|
||||
* PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
|
||||
* to access the BARs where the MSI-X entries reside.
|
||||
* But VF devices are unique in which the PF needs to be checked.
|
||||
*/
|
||||
pci_read_config_word(dev, PCI_COMMAND, &cmd);
|
||||
pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
|
||||
if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
|
||||
return -ENXIO;
|
||||
|
||||
|
@ -332,6 +333,9 @@ void xen_pcibk_do_op(struct work_struct *data)
|
|||
struct xen_pcibk_dev_data *dev_data = NULL;
|
||||
struct xen_pci_op *op = &pdev->op;
|
||||
int test_intx = 0;
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
unsigned int nr = 0;
|
||||
#endif
|
||||
|
||||
*op = pdev->sh_info->op;
|
||||
barrier();
|
||||
|
@ -360,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data)
|
|||
op->err = xen_pcibk_disable_msi(pdev, dev, op);
|
||||
break;
|
||||
case XEN_PCI_OP_enable_msix:
|
||||
nr = op->value;
|
||||
op->err = xen_pcibk_enable_msix(pdev, dev, op);
|
||||
break;
|
||||
case XEN_PCI_OP_disable_msix:
|
||||
|
@ -382,7 +387,7 @@ void xen_pcibk_do_op(struct work_struct *data)
|
|||
if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < op->value; i++)
|
||||
for (i = 0; i < nr; i++)
|
||||
pdev->sh_info->op.msix_entries[i].vector =
|
||||
op->msix_entries[i].vector;
|
||||
}
|
||||
|
|
|
@ -848,6 +848,24 @@ static int scsiback_map(struct vscsibk_info *info)
|
|||
return scsiback_init_sring(info, ring_ref, evtchn);
|
||||
}
|
||||
|
||||
/*
|
||||
Check for a translation entry being present
|
||||
*/
|
||||
static struct v2p_entry *scsiback_chk_translation_entry(
|
||||
struct vscsibk_info *info, struct ids_tuple *v)
|
||||
{
|
||||
struct list_head *head = &(info->v2p_entry_lists);
|
||||
struct v2p_entry *entry;
|
||||
|
||||
list_for_each_entry(entry, head, l)
|
||||
if ((entry->v.chn == v->chn) &&
|
||||
(entry->v.tgt == v->tgt) &&
|
||||
(entry->v.lun == v->lun))
|
||||
return entry;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
Add a new translation entry
|
||||
*/
|
||||
|
@ -855,9 +873,7 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
|
|||
char *phy, struct ids_tuple *v)
|
||||
{
|
||||
int err = 0;
|
||||
struct v2p_entry *entry;
|
||||
struct v2p_entry *new;
|
||||
struct list_head *head = &(info->v2p_entry_lists);
|
||||
unsigned long flags;
|
||||
char *lunp;
|
||||
unsigned long long unpacked_lun;
|
||||
|
@ -917,15 +933,10 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
|
|||
spin_lock_irqsave(&info->v2p_lock, flags);
|
||||
|
||||
/* Check double assignment to identical virtual ID */
|
||||
list_for_each_entry(entry, head, l) {
|
||||
if ((entry->v.chn == v->chn) &&
|
||||
(entry->v.tgt == v->tgt) &&
|
||||
(entry->v.lun == v->lun)) {
|
||||
pr_warn("Virtual ID is already used. Assignment was not performed.\n");
|
||||
err = -EEXIST;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (scsiback_chk_translation_entry(info, v)) {
|
||||
pr_warn("Virtual ID is already used. Assignment was not performed.\n");
|
||||
err = -EEXIST;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Create a new translation entry and add to the list */
|
||||
|
@ -933,18 +944,18 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
|
|||
new->v = *v;
|
||||
new->tpg = tpg;
|
||||
new->lun = unpacked_lun;
|
||||
list_add_tail(&new->l, head);
|
||||
list_add_tail(&new->l, &info->v2p_entry_lists);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&info->v2p_lock, flags);
|
||||
|
||||
out_free:
|
||||
mutex_lock(&tpg->tv_tpg_mutex);
|
||||
tpg->tv_tpg_fe_count--;
|
||||
mutex_unlock(&tpg->tv_tpg_mutex);
|
||||
|
||||
if (err)
|
||||
if (err) {
|
||||
mutex_lock(&tpg->tv_tpg_mutex);
|
||||
tpg->tv_tpg_fe_count--;
|
||||
mutex_unlock(&tpg->tv_tpg_mutex);
|
||||
kfree(new);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -956,39 +967,40 @@ static void __scsiback_del_translation_entry(struct v2p_entry *entry)
|
|||
}
|
||||
|
||||
/*
|
||||
Delete the translation entry specfied
|
||||
Delete the translation entry specified
|
||||
*/
|
||||
static int scsiback_del_translation_entry(struct vscsibk_info *info,
|
||||
struct ids_tuple *v)
|
||||
{
|
||||
struct v2p_entry *entry;
|
||||
struct list_head *head = &(info->v2p_entry_lists);
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&info->v2p_lock, flags);
|
||||
/* Find out the translation entry specified */
|
||||
list_for_each_entry(entry, head, l) {
|
||||
if ((entry->v.chn == v->chn) &&
|
||||
(entry->v.tgt == v->tgt) &&
|
||||
(entry->v.lun == v->lun)) {
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
entry = scsiback_chk_translation_entry(info, v);
|
||||
if (entry)
|
||||
__scsiback_del_translation_entry(entry);
|
||||
else
|
||||
ret = -ENOENT;
|
||||
|
||||
spin_unlock_irqrestore(&info->v2p_lock, flags);
|
||||
return 1;
|
||||
|
||||
found:
|
||||
/* Delete the translation entry specfied */
|
||||
__scsiback_del_translation_entry(entry);
|
||||
|
||||
spin_unlock_irqrestore(&info->v2p_lock, flags);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
|
||||
char *phy, struct ids_tuple *vir, int try)
|
||||
{
|
||||
struct v2p_entry *entry;
|
||||
unsigned long flags;
|
||||
|
||||
if (try) {
|
||||
spin_lock_irqsave(&info->v2p_lock, flags);
|
||||
entry = scsiback_chk_translation_entry(info, vir);
|
||||
spin_unlock_irqrestore(&info->v2p_lock, flags);
|
||||
if (entry)
|
||||
return;
|
||||
}
|
||||
if (!scsiback_add_translation_entry(info, phy, vir)) {
|
||||
if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
|
||||
"%d", XenbusStateInitialised)) {
|
||||
|
|
|
@ -188,6 +188,8 @@ static int queue_reply(struct list_head *queue, const void *data, size_t len)
|
|||
|
||||
if (len == 0)
|
||||
return 0;
|
||||
if (len > XENSTORE_PAYLOAD_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
|
||||
if (rb == NULL)
|
||||
|
|
Loading…
Reference in New Issue