locking/spinlocks/x86, paravirt: Remove paravirt_ticketlocks_enabled
This is a follow-up of commit:
cfd8983f03
("x86, locking/spinlocks: Remove ticket (spin)lock implementation")
The static_key structure 'paravirt_ticketlocks_enabled' is now removed as it is
no longer used.
As a result, the init functions kvm_spinlock_init_jump() and
xen_init_spinlocks_jump() are also removed.
A simple build and boot test was done to verify it.
Signed-off-by: Waiman Long <longman@redhat.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Alok Kataria <akataria@vmware.com>
Cc: Chris Wright <chrisw@sous-sol.org>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm@vger.kernel.org
Cc: linux-arch@vger.kernel.org
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1484252878-1962-1-git-send-email-longman@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
6e03f66c00
commit
aef591cd3d
|
@ -23,9 +23,6 @@
|
|||
/* How long a lock should spin before we consider blocking */
|
||||
#define SPIN_THRESHOLD (1 << 15)
|
||||
|
||||
extern struct static_key paravirt_ticketlocks_enabled;
|
||||
static __always_inline bool static_key_false(struct static_key *key);
|
||||
|
||||
#include <asm/qspinlock.h>
|
||||
|
||||
/*
|
||||
|
|
|
@ -620,18 +620,4 @@ void __init kvm_spinlock_init(void)
|
|||
}
|
||||
}
|
||||
|
||||
static __init int kvm_spinlock_init_jump(void)
|
||||
{
|
||||
if (!kvm_para_available())
|
||||
return 0;
|
||||
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
|
||||
return 0;
|
||||
|
||||
static_key_slow_inc(¶virt_ticketlocks_enabled);
|
||||
printk(KERN_INFO "KVM setup paravirtual spinlock\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(kvm_spinlock_init_jump);
|
||||
|
||||
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
|
||||
|
|
|
@ -42,6 +42,3 @@ struct pv_lock_ops pv_lock_ops = {
|
|||
#endif /* SMP */
|
||||
};
|
||||
EXPORT_SYMBOL(pv_lock_ops);
|
||||
|
||||
struct static_key paravirt_ticketlocks_enabled = STATIC_KEY_INIT_FALSE;
|
||||
EXPORT_SYMBOL(paravirt_ticketlocks_enabled);
|
||||
|
|
|
@ -141,25 +141,6 @@ void __init xen_init_spinlocks(void)
|
|||
pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
|
||||
}
|
||||
|
||||
/*
|
||||
* While the jump_label init code needs to happend _after_ the jump labels are
|
||||
* enabled and before SMP is started. Hence we use pre-SMP initcall level
|
||||
* init. We cannot do it in xen_init_spinlocks as that is done before
|
||||
* jump labels are activated.
|
||||
*/
|
||||
static __init int xen_init_spinlocks_jump(void)
|
||||
{
|
||||
if (!xen_pvspin)
|
||||
return 0;
|
||||
|
||||
if (!xen_domain())
|
||||
return 0;
|
||||
|
||||
static_key_slow_inc(¶virt_ticketlocks_enabled);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(xen_init_spinlocks_jump);
|
||||
|
||||
static __init int xen_parse_nopvspin(char *arg)
|
||||
{
|
||||
xen_pvspin = false;
|
||||
|
|
Loading…
Reference in New Issue