xen: fix xen_qlock_wait()
Commita856531951
("xen: make xen_qlock_wait() nestable") introduced a regression for Xen guests running fully virtualized (HVM or PVH mode). The Xen hypervisor wouldn't return from the poll hypercall with interrupts disabled in case of an interrupt (for PV guests it does). So instead of disabling interrupts in xen_qlock_wait() use a nesting counter to avoid calling xen_clear_irq_pending() in case xen_qlock_wait() is nested. Fixes:a856531951
("xen: make xen_qlock_wait() nestable") Cc: stable@vger.kernel.org Reported-by: Sander Eikelenboom <linux@eikelenboom.it> Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Tested-by: Sander Eikelenboom <linux@eikelenboom.it> Signed-off-by: Juergen Gross <jgross@suse.com>
This commit is contained in:
parent
1457d8cf76
commit
d3132b3860
|
@ -9,6 +9,7 @@
|
||||||
#include <linux/log2.h>
|
#include <linux/log2.h>
|
||||||
#include <linux/gfp.h>
|
#include <linux/gfp.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/atomic.h>
|
||||||
|
|
||||||
#include <asm/paravirt.h>
|
#include <asm/paravirt.h>
|
||||||
#include <asm/qspinlock.h>
|
#include <asm/qspinlock.h>
|
||||||
|
@ -21,6 +22,7 @@
|
||||||
|
|
||||||
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
|
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
|
||||||
static DEFINE_PER_CPU(char *, irq_name);
|
static DEFINE_PER_CPU(char *, irq_name);
|
||||||
|
static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
|
||||||
static bool xen_pvspin = true;
|
static bool xen_pvspin = true;
|
||||||
|
|
||||||
static void xen_qlock_kick(int cpu)
|
static void xen_qlock_kick(int cpu)
|
||||||
|
@ -39,25 +41,25 @@ static void xen_qlock_kick(int cpu)
|
||||||
*/
|
*/
|
||||||
static void xen_qlock_wait(u8 *byte, u8 val)
|
static void xen_qlock_wait(u8 *byte, u8 val)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
int irq = __this_cpu_read(lock_kicker_irq);
|
int irq = __this_cpu_read(lock_kicker_irq);
|
||||||
|
atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
|
||||||
|
|
||||||
/* If kicker interrupts not initialized yet, just spin */
|
/* If kicker interrupts not initialized yet, just spin */
|
||||||
if (irq == -1 || in_nmi())
|
if (irq == -1 || in_nmi())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Guard against reentry. */
|
/* Detect reentry. */
|
||||||
local_irq_save(flags);
|
atomic_inc(nest_cnt);
|
||||||
|
|
||||||
/* If irq pending already clear it. */
|
/* If irq pending already and no nested call clear it. */
|
||||||
if (xen_test_irq_pending(irq)) {
|
if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
|
||||||
xen_clear_irq_pending(irq);
|
xen_clear_irq_pending(irq);
|
||||||
} else if (READ_ONCE(*byte) == val) {
|
} else if (READ_ONCE(*byte) == val) {
|
||||||
/* Block until irq becomes pending (or a spurious wakeup) */
|
/* Block until irq becomes pending (or a spurious wakeup) */
|
||||||
xen_poll_irq(irq);
|
xen_poll_irq(irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
local_irq_restore(flags);
|
atomic_dec(nest_cnt);
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t dummy_handler(int irq, void *dev_id)
|
static irqreturn_t dummy_handler(int irq, void *dev_id)
|
||||||
|
|
Loading…
Reference in New Issue