xen: branch for v5.11-rc4
-----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQRTLbB6QfY48x44uB6AXGG7T9hjvgUCYAGllQAKCRCAXGG7T9hj vqEtAP9uws/W/JPcnsohK76hMcFAVxZCVdX7C3HvfW5tp6hqMgEAg9ic8sYiuHhn 6FouRu/ZXHJEg3PpS5W66yKNIYPvGgw= =rR+L -----END PGP SIGNATURE----- Merge tag 'for-linus-5.11-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip Pull xen fixes from Juergen Gross: - A series to fix a regression when running as a fully virtualized guest on an old Xen hypervisor not supporting PV interrupt callbacks for HVM guests. - A patch to add support to query Xen resource sizes (setting was possible already) from user mode. * tag 'for-linus-5.11-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: x86/xen: Fix xen_hvm_smp_init() when vector callback not available x86/xen: Don't register Xen IPIs when they aren't going to be used x86/xen: Add xen_no_vector_callback option to test PCI INTX delivery xen: Set platform PCI device INTX affinity to CPU0 xen: Fix event channel callback via INTX/GSI xen/privcmd: allow fetching resource sizes
This commit is contained in:
commit
dcda487c9c
|
@ -5972,6 +5972,10 @@
|
|||
This option is obsoleted by the "nopv" option, which
|
||||
has equivalent effect for XEN platform.
|
||||
|
||||
xen_no_vector_callback
|
||||
[KNL,X86,XEN] Disable the vector callback for Xen
|
||||
event channel interrupts.
|
||||
|
||||
xen_scrub_pages= [XEN]
|
||||
Boolean option to control scrubbing pages before giving them back
|
||||
to Xen, for use by other domains. Can be also changed at runtime
|
||||
|
|
|
@ -371,7 +371,7 @@ static int __init xen_guest_init(void)
|
|||
}
|
||||
gnttab_init();
|
||||
if (!xen_initial_domain())
|
||||
xenbus_probe(NULL);
|
||||
xenbus_probe();
|
||||
|
||||
/*
|
||||
* Making sure board specific code will not set up ops for
|
||||
|
|
|
@ -164,10 +164,10 @@ static int xen_cpu_up_prepare_hvm(unsigned int cpu)
|
|||
else
|
||||
per_cpu(xen_vcpu_id, cpu) = cpu;
|
||||
rc = xen_vcpu_setup(cpu);
|
||||
if (rc)
|
||||
if (rc || !xen_have_vector_callback)
|
||||
return rc;
|
||||
|
||||
if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
|
||||
if (xen_feature(XENFEAT_hvm_safe_pvclock))
|
||||
xen_setup_timer(cpu);
|
||||
|
||||
rc = xen_smp_intr_init(cpu);
|
||||
|
@ -188,6 +188,8 @@ static int xen_cpu_dead_hvm(unsigned int cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool no_vector_callback __initdata;
|
||||
|
||||
static void __init xen_hvm_guest_init(void)
|
||||
{
|
||||
if (xen_pv_domain())
|
||||
|
@ -207,7 +209,7 @@ static void __init xen_hvm_guest_init(void)
|
|||
|
||||
xen_panic_handler_init();
|
||||
|
||||
if (xen_feature(XENFEAT_hvm_callback_vector))
|
||||
if (!no_vector_callback && xen_feature(XENFEAT_hvm_callback_vector))
|
||||
xen_have_vector_callback = 1;
|
||||
|
||||
xen_hvm_smp_init();
|
||||
|
@ -233,6 +235,13 @@ static __init int xen_parse_nopv(char *arg)
|
|||
}
|
||||
early_param("xen_nopv", xen_parse_nopv);
|
||||
|
||||
static __init int xen_parse_no_vector_callback(char *arg)
|
||||
{
|
||||
no_vector_callback = true;
|
||||
return 0;
|
||||
}
|
||||
early_param("xen_no_vector_callback", xen_parse_no_vector_callback);
|
||||
|
||||
bool __init xen_hvm_need_lapic(void)
|
||||
{
|
||||
if (xen_pv_domain())
|
||||
|
|
|
@ -33,9 +33,11 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
|
|||
int cpu;
|
||||
|
||||
native_smp_prepare_cpus(max_cpus);
|
||||
WARN_ON(xen_smp_intr_init(0));
|
||||
|
||||
xen_init_lock_cpu(0);
|
||||
if (xen_have_vector_callback) {
|
||||
WARN_ON(xen_smp_intr_init(0));
|
||||
xen_init_lock_cpu(0);
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (cpu == 0)
|
||||
|
@ -50,9 +52,11 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
|
|||
static void xen_hvm_cpu_die(unsigned int cpu)
|
||||
{
|
||||
if (common_cpu_die(cpu) == 0) {
|
||||
xen_smp_intr_free(cpu);
|
||||
xen_uninit_lock_cpu(cpu);
|
||||
xen_teardown_timer(cpu);
|
||||
if (xen_have_vector_callback) {
|
||||
xen_smp_intr_free(cpu);
|
||||
xen_uninit_lock_cpu(cpu);
|
||||
xen_teardown_timer(cpu);
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
@ -64,14 +68,17 @@ static void xen_hvm_cpu_die(unsigned int cpu)
|
|||
|
||||
void __init xen_hvm_smp_init(void)
|
||||
{
|
||||
if (!xen_have_vector_callback)
|
||||
return;
|
||||
|
||||
smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
|
||||
smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
|
||||
smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
|
||||
smp_ops.smp_cpus_done = xen_smp_cpus_done;
|
||||
smp_ops.cpu_die = xen_hvm_cpu_die;
|
||||
|
||||
if (!xen_have_vector_callback) {
|
||||
nopvspin = true;
|
||||
return;
|
||||
}
|
||||
|
||||
smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
|
||||
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
|
||||
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
|
||||
smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
|
||||
smp_ops.smp_cpus_done = xen_smp_cpus_done;
|
||||
}
|
||||
|
|
|
@ -2060,16 +2060,6 @@ static struct irq_chip xen_percpu_chip __read_mostly = {
|
|||
.irq_ack = ack_dynirq,
|
||||
};
|
||||
|
||||
int xen_set_callback_via(uint64_t via)
|
||||
{
|
||||
struct xen_hvm_param a;
|
||||
a.domid = DOMID_SELF;
|
||||
a.index = HVM_PARAM_CALLBACK_IRQ;
|
||||
a.value = via;
|
||||
return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_set_callback_via);
|
||||
|
||||
#ifdef CONFIG_XEN_PVHVM
|
||||
/* Vector callbacks are better than PCI interrupts to receive event
|
||||
* channel notifications because we can receive vector callbacks on any
|
||||
|
|
|
@ -132,6 +132,13 @@ static int platform_pci_probe(struct pci_dev *pdev,
|
|||
dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
* It doesn't strictly *have* to run on CPU0 but it sure
|
||||
* as hell better process the event channel ports delivered
|
||||
* to CPU0.
|
||||
*/
|
||||
irq_set_affinity(pdev->irq, cpumask_of(0));
|
||||
|
||||
callback_via = get_callback_via(pdev);
|
||||
ret = xen_set_callback_via(callback_via);
|
||||
if (ret) {
|
||||
|
@ -149,7 +156,6 @@ static int platform_pci_probe(struct pci_dev *pdev,
|
|||
ret = gnttab_init();
|
||||
if (ret)
|
||||
goto grant_out;
|
||||
xenbus_probe(NULL);
|
||||
return 0;
|
||||
grant_out:
|
||||
gnttab_free_auto_xlat_frames();
|
||||
|
|
|
@ -717,14 +717,15 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
|
||||
static long privcmd_ioctl_mmap_resource(struct file *file,
|
||||
struct privcmd_mmap_resource __user *udata)
|
||||
{
|
||||
struct privcmd_data *data = file->private_data;
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
struct privcmd_mmap_resource kdata;
|
||||
xen_pfn_t *pfns = NULL;
|
||||
struct xen_mem_acquire_resource xdata;
|
||||
struct xen_mem_acquire_resource xdata = { };
|
||||
int rc;
|
||||
|
||||
if (copy_from_user(&kdata, udata, sizeof(kdata)))
|
||||
|
@ -734,6 +735,22 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
|
|||
if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
|
||||
return -EPERM;
|
||||
|
||||
/* Both fields must be set or unset */
|
||||
if (!!kdata.addr != !!kdata.num)
|
||||
return -EINVAL;
|
||||
|
||||
xdata.domid = kdata.dom;
|
||||
xdata.type = kdata.type;
|
||||
xdata.id = kdata.id;
|
||||
|
||||
if (!kdata.addr && !kdata.num) {
|
||||
/* Query the size of the resource. */
|
||||
rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
|
||||
if (rc)
|
||||
return rc;
|
||||
return __put_user(xdata.nr_frames, &udata->num);
|
||||
}
|
||||
|
||||
mmap_write_lock(mm);
|
||||
|
||||
vma = find_vma(mm, kdata.addr);
|
||||
|
@ -768,10 +785,6 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
|
|||
} else
|
||||
vma->vm_private_data = PRIV_VMA_LOCKED;
|
||||
|
||||
memset(&xdata, 0, sizeof(xdata));
|
||||
xdata.domid = kdata.dom;
|
||||
xdata.type = kdata.type;
|
||||
xdata.id = kdata.id;
|
||||
xdata.frame = kdata.idx;
|
||||
xdata.nr_frames = kdata.num;
|
||||
set_xen_guest_handle(xdata.frame_list, pfns);
|
||||
|
|
|
@ -115,6 +115,7 @@ int xenbus_probe_node(struct xen_bus_type *bus,
|
|||
const char *type,
|
||||
const char *nodename);
|
||||
int xenbus_probe_devices(struct xen_bus_type *bus);
|
||||
void xenbus_probe(void);
|
||||
|
||||
void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
|
||||
|
||||
|
|
|
@ -57,16 +57,8 @@ DEFINE_MUTEX(xs_response_mutex);
|
|||
static int xenbus_irq;
|
||||
static struct task_struct *xenbus_task;
|
||||
|
||||
static DECLARE_WORK(probe_work, xenbus_probe);
|
||||
|
||||
|
||||
static irqreturn_t wake_waiting(int irq, void *unused)
|
||||
{
|
||||
if (unlikely(xenstored_ready == 0)) {
|
||||
xenstored_ready = 1;
|
||||
schedule_work(&probe_work);
|
||||
}
|
||||
|
||||
wake_up(&xb_waitq);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
@ -683,29 +683,76 @@ void unregister_xenstore_notifier(struct notifier_block *nb)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
|
||||
|
||||
void xenbus_probe(struct work_struct *unused)
|
||||
void xenbus_probe(void)
|
||||
{
|
||||
xenstored_ready = 1;
|
||||
|
||||
/*
|
||||
* In the HVM case, xenbus_init() deferred its call to
|
||||
* xs_init() in case callbacks were not operational yet.
|
||||
* So do it now.
|
||||
*/
|
||||
if (xen_store_domain_type == XS_HVM)
|
||||
xs_init();
|
||||
|
||||
/* Notify others that xenstore is up */
|
||||
blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xenbus_probe);
|
||||
|
||||
/*
|
||||
* Returns true when XenStore init must be deferred in order to
|
||||
* allow the PCI platform device to be initialised, before we
|
||||
* can actually have event channel interrupts working.
|
||||
*/
|
||||
static bool xs_hvm_defer_init_for_callback(void)
|
||||
{
|
||||
#ifdef CONFIG_XEN_PVHVM
|
||||
return xen_store_domain_type == XS_HVM &&
|
||||
!xen_have_vector_callback;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __init xenbus_probe_initcall(void)
|
||||
{
|
||||
if (!xen_domain())
|
||||
return -ENODEV;
|
||||
/*
|
||||
* Probe XenBus here in the XS_PV case, and also XS_HVM unless we
|
||||
* need to wait for the platform PCI device to come up.
|
||||
*/
|
||||
if (xen_store_domain_type == XS_PV ||
|
||||
(xen_store_domain_type == XS_HVM &&
|
||||
!xs_hvm_defer_init_for_callback()))
|
||||
xenbus_probe();
|
||||
|
||||
if (xen_initial_domain() || xen_hvm_domain())
|
||||
return 0;
|
||||
|
||||
xenbus_probe(NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
device_initcall(xenbus_probe_initcall);
|
||||
|
||||
int xen_set_callback_via(uint64_t via)
|
||||
{
|
||||
struct xen_hvm_param a;
|
||||
int ret;
|
||||
|
||||
a.domid = DOMID_SELF;
|
||||
a.index = HVM_PARAM_CALLBACK_IRQ;
|
||||
a.value = via;
|
||||
|
||||
ret = HYPERVISOR_hvm_op(HVMOP_set_param, &a);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* If xenbus_probe_initcall() deferred the xenbus_probe()
|
||||
* due to the callback not functioning yet, we can do it now.
|
||||
*/
|
||||
if (!xenstored_ready && xs_hvm_defer_init_for_callback())
|
||||
xenbus_probe();
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_set_callback_via);
|
||||
|
||||
/* Set up event channel for xenstored which is run as a local process
|
||||
* (this is normally used only in dom0)
|
||||
*/
|
||||
|
@ -818,11 +865,17 @@ static int __init xenbus_init(void)
|
|||
break;
|
||||
}
|
||||
|
||||
/* Initialize the interface to xenstore. */
|
||||
err = xs_init();
|
||||
if (err) {
|
||||
pr_warn("Error initializing xenstore comms: %i\n", err);
|
||||
goto out_error;
|
||||
/*
|
||||
* HVM domains may not have a functional callback yet. In that
|
||||
* case let xs_init() be called from xenbus_probe(), which will
|
||||
* get invoked at an appropriate time.
|
||||
*/
|
||||
if (xen_store_domain_type != XS_HVM) {
|
||||
err = xs_init();
|
||||
if (err) {
|
||||
pr_warn("Error initializing xenstore comms: %i\n", err);
|
||||
goto out_error;
|
||||
}
|
||||
}
|
||||
|
||||
if ((xen_store_domain_type != XS_LOCAL) &&
|
||||
|
|
|
@ -192,7 +192,7 @@ void xs_suspend_cancel(void);
|
|||
|
||||
struct work_struct;
|
||||
|
||||
void xenbus_probe(struct work_struct *);
|
||||
void xenbus_probe(void);
|
||||
|
||||
#define XENBUS_IS_ERR_READ(str) ({ \
|
||||
if (!IS_ERR(str) && strlen(str) == 0) { \
|
||||
|
|
Loading…
Reference in New Issue