xen: branch for v6.2-rc1
-----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQRTLbB6QfY48x44uB6AXGG7T9hjvgUCY5bIVwAKCRCAXGG7T9hj vmOTAQCb5CR7W3ywFjkNOuR2qfHSMNUSTWh04+rUdvWFKoQJywD9EVgYT+jvQbXS 2zejw/z2ZODLv6rhC+ML0CbXCzcy7gs= =L1rx -----END PGP SIGNATURE----- Merge tag 'for-linus-6.2-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip Pull xen updates from Juergen Gross: - fix memory leaks in error paths - add support for virtio PCI-devices in Xen guests on ARM - two minor fixes * tag 'for-linus-6.2-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/privcmd: Fix a possible warning in privcmd_ioctl_mmap_resource() x86/xen: Fix memory leak in xen_init_lock_cpu() x86/xen: Fix memory leak in xen_smp_intr_init{_pv}() xen: fix xen.h build for CONFIG_XEN_PVH=y xen/virtio: Handle PCI devices which Host controller is described in DT xen/virtio: Optimize the setup of "xen-grant-dma" devices
This commit is contained in:
commit
e6b160bc4d
|
@ -445,7 +445,7 @@ static int __init xen_guest_init(void)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_XEN_VIRTIO))
|
if (IS_ENABLED(CONFIG_XEN_VIRTIO))
|
||||||
virtio_set_mem_acc_cb(xen_virtio_mem_acc);
|
virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc);
|
||||||
|
|
||||||
if (!acpi_disabled)
|
if (!acpi_disabled)
|
||||||
xen_acpi_guest_init();
|
xen_acpi_guest_init();
|
||||||
|
|
|
@ -32,30 +32,30 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
|
||||||
|
|
||||||
void xen_smp_intr_free(unsigned int cpu)
|
void xen_smp_intr_free(unsigned int cpu)
|
||||||
{
|
{
|
||||||
|
kfree(per_cpu(xen_resched_irq, cpu).name);
|
||||||
|
per_cpu(xen_resched_irq, cpu).name = NULL;
|
||||||
if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
|
if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
|
||||||
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
|
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
|
||||||
per_cpu(xen_resched_irq, cpu).irq = -1;
|
per_cpu(xen_resched_irq, cpu).irq = -1;
|
||||||
kfree(per_cpu(xen_resched_irq, cpu).name);
|
|
||||||
per_cpu(xen_resched_irq, cpu).name = NULL;
|
|
||||||
}
|
}
|
||||||
|
kfree(per_cpu(xen_callfunc_irq, cpu).name);
|
||||||
|
per_cpu(xen_callfunc_irq, cpu).name = NULL;
|
||||||
if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
|
if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
|
||||||
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
|
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
|
||||||
per_cpu(xen_callfunc_irq, cpu).irq = -1;
|
per_cpu(xen_callfunc_irq, cpu).irq = -1;
|
||||||
kfree(per_cpu(xen_callfunc_irq, cpu).name);
|
|
||||||
per_cpu(xen_callfunc_irq, cpu).name = NULL;
|
|
||||||
}
|
}
|
||||||
|
kfree(per_cpu(xen_debug_irq, cpu).name);
|
||||||
|
per_cpu(xen_debug_irq, cpu).name = NULL;
|
||||||
if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
|
if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
|
||||||
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
|
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
|
||||||
per_cpu(xen_debug_irq, cpu).irq = -1;
|
per_cpu(xen_debug_irq, cpu).irq = -1;
|
||||||
kfree(per_cpu(xen_debug_irq, cpu).name);
|
|
||||||
per_cpu(xen_debug_irq, cpu).name = NULL;
|
|
||||||
}
|
}
|
||||||
|
kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
|
||||||
|
per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
|
||||||
if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
|
if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
|
||||||
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
|
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
|
||||||
NULL);
|
NULL);
|
||||||
per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
|
per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
|
||||||
kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
|
|
||||||
per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,6 +65,7 @@ int xen_smp_intr_init(unsigned int cpu)
|
||||||
char *resched_name, *callfunc_name, *debug_name;
|
char *resched_name, *callfunc_name, *debug_name;
|
||||||
|
|
||||||
resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
|
resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
|
||||||
|
per_cpu(xen_resched_irq, cpu).name = resched_name;
|
||||||
rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
|
rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
|
||||||
cpu,
|
cpu,
|
||||||
xen_reschedule_interrupt,
|
xen_reschedule_interrupt,
|
||||||
|
@ -74,9 +75,9 @@ int xen_smp_intr_init(unsigned int cpu)
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
per_cpu(xen_resched_irq, cpu).irq = rc;
|
per_cpu(xen_resched_irq, cpu).irq = rc;
|
||||||
per_cpu(xen_resched_irq, cpu).name = resched_name;
|
|
||||||
|
|
||||||
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
|
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
|
||||||
|
per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
|
||||||
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
|
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
|
||||||
cpu,
|
cpu,
|
||||||
xen_call_function_interrupt,
|
xen_call_function_interrupt,
|
||||||
|
@ -86,10 +87,10 @@ int xen_smp_intr_init(unsigned int cpu)
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
per_cpu(xen_callfunc_irq, cpu).irq = rc;
|
per_cpu(xen_callfunc_irq, cpu).irq = rc;
|
||||||
per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
|
|
||||||
|
|
||||||
if (!xen_fifo_events) {
|
if (!xen_fifo_events) {
|
||||||
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
|
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
|
||||||
|
per_cpu(xen_debug_irq, cpu).name = debug_name;
|
||||||
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
|
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
|
||||||
xen_debug_interrupt,
|
xen_debug_interrupt,
|
||||||
IRQF_PERCPU | IRQF_NOBALANCING,
|
IRQF_PERCPU | IRQF_NOBALANCING,
|
||||||
|
@ -97,10 +98,10 @@ int xen_smp_intr_init(unsigned int cpu)
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
per_cpu(xen_debug_irq, cpu).irq = rc;
|
per_cpu(xen_debug_irq, cpu).irq = rc;
|
||||||
per_cpu(xen_debug_irq, cpu).name = debug_name;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
|
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
|
||||||
|
per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
|
||||||
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
|
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
|
||||||
cpu,
|
cpu,
|
||||||
xen_call_function_single_interrupt,
|
xen_call_function_single_interrupt,
|
||||||
|
@ -110,7 +111,6 @@ int xen_smp_intr_init(unsigned int cpu)
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
|
per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
|
||||||
per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -97,18 +97,18 @@ asmlinkage __visible void cpu_bringup_and_idle(void)
|
||||||
|
|
||||||
void xen_smp_intr_free_pv(unsigned int cpu)
|
void xen_smp_intr_free_pv(unsigned int cpu)
|
||||||
{
|
{
|
||||||
|
kfree(per_cpu(xen_irq_work, cpu).name);
|
||||||
|
per_cpu(xen_irq_work, cpu).name = NULL;
|
||||||
if (per_cpu(xen_irq_work, cpu).irq >= 0) {
|
if (per_cpu(xen_irq_work, cpu).irq >= 0) {
|
||||||
unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
|
unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
|
||||||
per_cpu(xen_irq_work, cpu).irq = -1;
|
per_cpu(xen_irq_work, cpu).irq = -1;
|
||||||
kfree(per_cpu(xen_irq_work, cpu).name);
|
|
||||||
per_cpu(xen_irq_work, cpu).name = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kfree(per_cpu(xen_pmu_irq, cpu).name);
|
||||||
|
per_cpu(xen_pmu_irq, cpu).name = NULL;
|
||||||
if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
|
if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
|
||||||
unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
|
unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
|
||||||
per_cpu(xen_pmu_irq, cpu).irq = -1;
|
per_cpu(xen_pmu_irq, cpu).irq = -1;
|
||||||
kfree(per_cpu(xen_pmu_irq, cpu).name);
|
|
||||||
per_cpu(xen_pmu_irq, cpu).name = NULL;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,6 +118,7 @@ int xen_smp_intr_init_pv(unsigned int cpu)
|
||||||
char *callfunc_name, *pmu_name;
|
char *callfunc_name, *pmu_name;
|
||||||
|
|
||||||
callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
|
callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
|
||||||
|
per_cpu(xen_irq_work, cpu).name = callfunc_name;
|
||||||
rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
|
rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
|
||||||
cpu,
|
cpu,
|
||||||
xen_irq_work_interrupt,
|
xen_irq_work_interrupt,
|
||||||
|
@ -127,10 +128,10 @@ int xen_smp_intr_init_pv(unsigned int cpu)
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
per_cpu(xen_irq_work, cpu).irq = rc;
|
per_cpu(xen_irq_work, cpu).irq = rc;
|
||||||
per_cpu(xen_irq_work, cpu).name = callfunc_name;
|
|
||||||
|
|
||||||
if (is_xen_pmu) {
|
if (is_xen_pmu) {
|
||||||
pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
|
pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
|
||||||
|
per_cpu(xen_pmu_irq, cpu).name = pmu_name;
|
||||||
rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
|
rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
|
||||||
xen_pmu_irq_handler,
|
xen_pmu_irq_handler,
|
||||||
IRQF_PERCPU|IRQF_NOBALANCING,
|
IRQF_PERCPU|IRQF_NOBALANCING,
|
||||||
|
@ -138,7 +139,6 @@ int xen_smp_intr_init_pv(unsigned int cpu)
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
per_cpu(xen_pmu_irq, cpu).irq = rc;
|
per_cpu(xen_pmu_irq, cpu).irq = rc;
|
||||||
per_cpu(xen_pmu_irq, cpu).name = pmu_name;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -75,6 +75,7 @@ void xen_init_lock_cpu(int cpu)
|
||||||
cpu, per_cpu(lock_kicker_irq, cpu));
|
cpu, per_cpu(lock_kicker_irq, cpu));
|
||||||
|
|
||||||
name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
|
name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
|
||||||
|
per_cpu(irq_name, cpu) = name;
|
||||||
irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
|
irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
|
||||||
cpu,
|
cpu,
|
||||||
dummy_handler,
|
dummy_handler,
|
||||||
|
@ -85,7 +86,6 @@ void xen_init_lock_cpu(int cpu)
|
||||||
if (irq >= 0) {
|
if (irq >= 0) {
|
||||||
disable_irq(irq); /* make sure it's never delivered */
|
disable_irq(irq); /* make sure it's never delivered */
|
||||||
per_cpu(lock_kicker_irq, cpu) = irq;
|
per_cpu(lock_kicker_irq, cpu) = irq;
|
||||||
per_cpu(irq_name, cpu) = name;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
printk("cpu %d spinlock event irq %d\n", cpu, irq);
|
printk("cpu %d spinlock event irq %d\n", cpu, irq);
|
||||||
|
@ -98,6 +98,8 @@ void xen_uninit_lock_cpu(int cpu)
|
||||||
if (!xen_pvspin)
|
if (!xen_pvspin)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
kfree(per_cpu(irq_name, cpu));
|
||||||
|
per_cpu(irq_name, cpu) = NULL;
|
||||||
/*
|
/*
|
||||||
* When booting the kernel with 'mitigations=auto,nosmt', the secondary
|
* When booting the kernel with 'mitigations=auto,nosmt', the secondary
|
||||||
* CPUs are not activated, and lock_kicker_irq is not initialized.
|
* CPUs are not activated, and lock_kicker_irq is not initialized.
|
||||||
|
@ -108,8 +110,6 @@ void xen_uninit_lock_cpu(int cpu)
|
||||||
|
|
||||||
unbind_from_irqhandler(irq, NULL);
|
unbind_from_irqhandler(irq, NULL);
|
||||||
per_cpu(lock_kicker_irq, cpu) = -1;
|
per_cpu(lock_kicker_irq, cpu) = -1;
|
||||||
kfree(per_cpu(irq_name, cpu));
|
|
||||||
per_cpu(irq_name, cpu) = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
|
PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/dma-map-ops.h>
|
#include <linux/dma-map-ops.h>
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
|
#include <linux/pci.h>
|
||||||
#include <linux/pfn.h>
|
#include <linux/pfn.h>
|
||||||
#include <linux/xarray.h>
|
#include <linux/xarray.h>
|
||||||
#include <linux/virtio_anchor.h>
|
#include <linux/virtio_anchor.h>
|
||||||
|
@ -292,50 +293,48 @@ static const struct dma_map_ops xen_grant_dma_ops = {
|
||||||
.dma_supported = xen_grant_dma_supported,
|
.dma_supported = xen_grant_dma_supported,
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool xen_is_dt_grant_dma_device(struct device *dev)
|
static struct device_node *xen_dt_get_node(struct device *dev)
|
||||||
{
|
{
|
||||||
struct device_node *iommu_np;
|
if (dev_is_pci(dev)) {
|
||||||
bool has_iommu;
|
struct pci_dev *pdev = to_pci_dev(dev);
|
||||||
|
struct pci_bus *bus = pdev->bus;
|
||||||
|
|
||||||
iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
|
/* Walk up to the root bus to look for PCI Host controller */
|
||||||
has_iommu = iommu_np &&
|
while (!pci_is_root_bus(bus))
|
||||||
of_device_is_compatible(iommu_np, "xen,grant-dma");
|
bus = bus->parent;
|
||||||
of_node_put(iommu_np);
|
|
||||||
|
|
||||||
return has_iommu;
|
return of_node_get(bus->bridge->parent->of_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool xen_is_grant_dma_device(struct device *dev)
|
return of_node_get(dev->of_node);
|
||||||
{
|
|
||||||
/* XXX Handle only DT devices for now */
|
|
||||||
if (dev->of_node)
|
|
||||||
return xen_is_dt_grant_dma_device(dev);
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool xen_virtio_mem_acc(struct virtio_device *dev)
|
|
||||||
{
|
|
||||||
if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain())
|
|
||||||
return true;
|
|
||||||
|
|
||||||
return xen_is_grant_dma_device(dev->dev.parent);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xen_dt_grant_init_backend_domid(struct device *dev,
|
static int xen_dt_grant_init_backend_domid(struct device *dev,
|
||||||
struct xen_grant_dma_data *data)
|
struct device_node *np,
|
||||||
|
domid_t *backend_domid)
|
||||||
{
|
{
|
||||||
struct of_phandle_args iommu_spec;
|
struct of_phandle_args iommu_spec = { .args_count = 1 };
|
||||||
|
|
||||||
if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
|
if (dev_is_pci(dev)) {
|
||||||
0, &iommu_spec)) {
|
struct pci_dev *pdev = to_pci_dev(dev);
|
||||||
dev_err(dev, "Cannot parse iommus property\n");
|
u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
|
||||||
|
|
||||||
|
if (of_map_id(np, rid, "iommu-map", "iommu-map-mask", &iommu_spec.np,
|
||||||
|
iommu_spec.args)) {
|
||||||
|
dev_dbg(dev, "Cannot translate ID\n");
|
||||||
return -ESRCH;
|
return -ESRCH;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if (of_parse_phandle_with_args(np, "iommus", "#iommu-cells",
|
||||||
|
0, &iommu_spec)) {
|
||||||
|
dev_dbg(dev, "Cannot parse iommus property\n");
|
||||||
|
return -ESRCH;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
|
if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
|
||||||
iommu_spec.args_count != 1) {
|
iommu_spec.args_count != 1) {
|
||||||
dev_err(dev, "Incompatible IOMMU node\n");
|
dev_dbg(dev, "Incompatible IOMMU node\n");
|
||||||
of_node_put(iommu_spec.np);
|
of_node_put(iommu_spec.np);
|
||||||
return -ESRCH;
|
return -ESRCH;
|
||||||
}
|
}
|
||||||
|
@ -346,12 +345,31 @@ static int xen_dt_grant_init_backend_domid(struct device *dev,
|
||||||
* The endpoint ID here means the ID of the domain where the
|
* The endpoint ID here means the ID of the domain where the
|
||||||
* corresponding backend is running
|
* corresponding backend is running
|
||||||
*/
|
*/
|
||||||
data->backend_domid = iommu_spec.args[0];
|
*backend_domid = iommu_spec.args[0];
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void xen_grant_setup_dma_ops(struct device *dev)
|
static int xen_grant_init_backend_domid(struct device *dev,
|
||||||
|
domid_t *backend_domid)
|
||||||
|
{
|
||||||
|
struct device_node *np;
|
||||||
|
int ret = -ENODEV;
|
||||||
|
|
||||||
|
np = xen_dt_get_node(dev);
|
||||||
|
if (np) {
|
||||||
|
ret = xen_dt_grant_init_backend_domid(dev, np, backend_domid);
|
||||||
|
of_node_put(np);
|
||||||
|
} else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) {
|
||||||
|
dev_info(dev, "Using dom0 as backend\n");
|
||||||
|
*backend_domid = 0;
|
||||||
|
ret = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void xen_grant_setup_dma_ops(struct device *dev, domid_t backend_domid)
|
||||||
{
|
{
|
||||||
struct xen_grant_dma_data *data;
|
struct xen_grant_dma_data *data;
|
||||||
|
|
||||||
|
@ -365,16 +383,7 @@ void xen_grant_setup_dma_ops(struct device *dev)
|
||||||
if (!data)
|
if (!data)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
if (dev->of_node) {
|
data->backend_domid = backend_domid;
|
||||||
if (xen_dt_grant_init_backend_domid(dev, data))
|
|
||||||
goto err;
|
|
||||||
} else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) {
|
|
||||||
dev_info(dev, "Using dom0 as backend\n");
|
|
||||||
data->backend_domid = 0;
|
|
||||||
} else {
|
|
||||||
/* XXX ACPI device unsupported for now */
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (store_xen_grant_dma_data(dev, data)) {
|
if (store_xen_grant_dma_data(dev, data)) {
|
||||||
dev_err(dev, "Cannot store Xen grant DMA data\n");
|
dev_err(dev, "Cannot store Xen grant DMA data\n");
|
||||||
|
@ -392,12 +401,14 @@ err:
|
||||||
|
|
||||||
bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
|
bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
|
||||||
{
|
{
|
||||||
bool ret = xen_virtio_mem_acc(dev);
|
domid_t backend_domid;
|
||||||
|
|
||||||
if (ret)
|
if (!xen_grant_init_backend_domid(dev->dev.parent, &backend_domid)) {
|
||||||
xen_grant_setup_dma_ops(dev->dev.parent);
|
xen_grant_setup_dma_ops(dev->dev.parent, backend_domid);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
|
MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
|
||||||
|
|
|
@ -760,7 +760,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL);
|
pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN);
|
||||||
if (!pfns) {
|
if (!pfns) {
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -8,9 +8,7 @@
|
||||||
static inline void xen_setup_dma_ops(struct device *dev)
|
static inline void xen_setup_dma_ops(struct device *dev)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_XEN
|
#ifdef CONFIG_XEN
|
||||||
if (xen_is_grant_dma_device(dev))
|
if (xen_swiotlb_detect())
|
||||||
xen_grant_setup_dma_ops(dev);
|
|
||||||
else if (xen_swiotlb_detect())
|
|
||||||
dev->dma_ops = &xen_swiotlb_dma_ops;
|
dev->dma_ops = &xen_swiotlb_dma_ops;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
|
@ -216,26 +216,10 @@ static inline void xen_preemptible_hcall_end(void) { }
|
||||||
#endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
|
#endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
|
||||||
|
|
||||||
#ifdef CONFIG_XEN_GRANT_DMA_OPS
|
#ifdef CONFIG_XEN_GRANT_DMA_OPS
|
||||||
void xen_grant_setup_dma_ops(struct device *dev);
|
|
||||||
bool xen_is_grant_dma_device(struct device *dev);
|
|
||||||
bool xen_virtio_mem_acc(struct virtio_device *dev);
|
|
||||||
bool xen_virtio_restricted_mem_acc(struct virtio_device *dev);
|
bool xen_virtio_restricted_mem_acc(struct virtio_device *dev);
|
||||||
#else
|
#else
|
||||||
static inline void xen_grant_setup_dma_ops(struct device *dev)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
static inline bool xen_is_grant_dma_device(struct device *dev)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct virtio_device;
|
struct virtio_device;
|
||||||
|
|
||||||
static inline bool xen_virtio_mem_acc(struct virtio_device *dev)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
|
static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -2,6 +2,8 @@
|
||||||
#ifndef _XEN_XEN_H
|
#ifndef _XEN_XEN_H
|
||||||
#define _XEN_XEN_H
|
#define _XEN_XEN_H
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
enum xen_domain_type {
|
enum xen_domain_type {
|
||||||
XEN_NATIVE, /* running on bare hardware */
|
XEN_NATIVE, /* running on bare hardware */
|
||||||
XEN_PV_DOMAIN, /* running in a PV domain */
|
XEN_PV_DOMAIN, /* running in a PV domain */
|
||||||
|
@ -25,8 +27,6 @@ extern bool xen_pvh;
|
||||||
#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
|
#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
|
||||||
#define xen_pvh_domain() (xen_pvh)
|
#define xen_pvh_domain() (xen_pvh)
|
||||||
|
|
||||||
#include <linux/types.h>
|
|
||||||
|
|
||||||
extern uint32_t xen_start_flags;
|
extern uint32_t xen_start_flags;
|
||||||
|
|
||||||
#include <xen/interface/hvm/start_info.h>
|
#include <xen/interface/hvm/start_info.h>
|
||||||
|
|
Loading…
Reference in New Issue