Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The tree got pretty big in this development cycle, but the net effect is pretty good: 115 files changed, 673 insertions(+), 1522 deletions(-) The main changes were: - Rework and generalize the mutex code to remove per arch mutex primitives. (Peter Zijlstra) - Add vCPU preemption support: add an interface to query the preemption status of vCPUs and use it in locking primitives - this optimizes paravirt performance. (Pan Xinhui, Juergen Gross, Christian Borntraeger) - Introduce cpu_relax_yield() and remov cpu_relax_lowlatency() to clean up and improve the s390 lock yielding machinery and its core kernel impact. (Christian Borntraeger) - Micro-optimize mutexes some more. (Waiman Long) - Reluctantly add the to-be-deprecated mutex_trylock_recursive() interface on a temporary basis, to give the DRM code more time to get rid of its locking hacks. Any other users will be NAK-ed on sight. (We turned off the deprecation warning for the time being to not pollute the build log.) (Peter Zijlstra) - Improve the rtmutex code a bit, in light of recent long lived bugs/races. (Thomas Gleixner) - Misc fixes, cleanups" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (36 commits) x86/paravirt: Fix bool return type for PVOP_CALL() x86/paravirt: Fix native_patch() locking/ww_mutex: Use relaxed atomics locking/rtmutex: Explain locking rules for rt_mutex_proxy_unlock()/init_proxy_locked() locking/rtmutex: Get rid of RT_MUTEX_OWNER_MASKALL x86/paravirt: Optimize native pv_lock_ops.vcpu_is_preempted() locking/mutex: Break out of expensive busy-loop on {mutex,rwsem}_spin_on_owner() when owner vCPU is preempted locking/osq: Break out of spin-wait busy waiting loop for a preempted vCPU in osq_lock() Documentation/virtual/kvm: Support the vCPU preemption check x86/xen: Support the vCPU preemption check x86/kvm: Support the vCPU preemption check x86/kvm: Support the vCPU preemption check kvm: Introduce kvm_write_guest_offset_cached() locking/core, x86/paravirt: Implement vcpu_is_preempted(cpu) for KVM and Xen guests locking/spinlocks, s390: Implement vcpu_is_preempted(cpu) locking/core, powerpc: Implement vcpu_is_preempted(cpu) sched/core: Introduce the vcpu_is_preempted(cpu) interface sched/wake_q: Rename WAKE_Q to DEFINE_WAKE_Q locking/core: Provide common cpu_relax_yield() definition locking/mutex: Don't mark mutex_trylock_recursive() as deprecated, temporarily ...
This commit is contained in:
commit
6cdf89b1ca
|
@ -208,7 +208,9 @@ MSR_KVM_STEAL_TIME: 0x4b564d03
|
|||
__u64 steal;
|
||||
__u32 version;
|
||||
__u32 flags;
|
||||
__u32 pad[12];
|
||||
__u8 preempted;
|
||||
__u8 u8_pad[3];
|
||||
__u32 pad[11];
|
||||
}
|
||||
|
||||
whose data will be filled in by the hypervisor periodically. Only one
|
||||
|
@ -232,6 +234,11 @@ MSR_KVM_STEAL_TIME: 0x4b564d03
|
|||
nanoseconds. Time during which the vcpu is idle, will not be
|
||||
reported as steal time.
|
||||
|
||||
preempted: indicate the vCPU who owns this struct is running or
|
||||
not. Non-zero values mean the vCPU has been preempted. Zero
|
||||
means the vCPU is not preempted. NOTE, it is always zero if the
|
||||
the hypervisor doesn't support this field.
|
||||
|
||||
MSR_KVM_EOI_EN: 0x4b564d04
|
||||
data: Bit 0 is 1 when PV end of interrupt is enabled on the vcpu; 0
|
||||
when disabled. Bit 1 is reserved and must be zero. When PV end of
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
|
@ -58,7 +58,6 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#define ARCH_HAS_PREFETCH
|
||||
#define ARCH_HAS_PREFETCHW
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
/*
|
||||
* xchg() based mutex fast path maintains a state of 0 or 1, as opposed to
|
||||
* atomic dec based which can "count" any number of lock contenders.
|
||||
* This ideally needs to be fixed in core, but for now switching to dec ver.
|
||||
*/
|
||||
#if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2)
|
||||
#include <asm-generic/mutex-dec.h>
|
||||
#else
|
||||
#include <asm-generic/mutex-xchg.h>
|
||||
#endif
|
|
@ -60,15 +60,12 @@ struct task_struct;
|
|||
#ifndef CONFIG_EZNPS_MTM_EXT
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#else
|
||||
|
||||
#define cpu_relax() \
|
||||
__asm__ __volatile__ (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory")
|
||||
|
||||
#define cpu_relax_lowlatency() barrier()
|
||||
|
||||
#endif
|
||||
|
||||
#define copy_segments(tsk, mm) do { } while (0)
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
/*
|
||||
* arch/arm/include/asm/mutex.h
|
||||
*
|
||||
* ARM optimized mutex locking primitives
|
||||
*
|
||||
* Please look into asm-generic/mutex-xchg.h for a formal definition.
|
||||
*/
|
||||
#ifndef _ASM_MUTEX_H
|
||||
#define _ASM_MUTEX_H
|
||||
/*
|
||||
* On pre-ARMv6 hardware this results in a swp-based implementation,
|
||||
* which is the most efficient. For ARMv6+, we have exclusive memory
|
||||
* accessors and use atomic_dec to avoid the extra xchg operations
|
||||
* on the locking slowpaths.
|
||||
*/
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
#include <asm-generic/mutex-xchg.h>
|
||||
#else
|
||||
#include <asm-generic/mutex-dec.h>
|
||||
#endif
|
||||
#endif /* _ASM_MUTEX_H */
|
|
@ -82,8 +82,6 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define cpu_relax() barrier()
|
||||
#endif
|
||||
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#define task_pt_regs(p) \
|
||||
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ generic-y += mm-arch-hooks.h
|
|||
generic-y += mman.h
|
||||
generic-y += msgbuf.h
|
||||
generic-y += msi.h
|
||||
generic-y += mutex.h
|
||||
generic-y += poll.h
|
||||
generic-y += preempt.h
|
||||
generic-y += resource.h
|
||||
|
|
|
@ -149,8 +149,6 @@ static inline void cpu_relax(void)
|
|||
asm volatile("yield" ::: "memory");
|
||||
}
|
||||
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Thread switching */
|
||||
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
|
||||
struct task_struct *next);
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
|
@ -92,7 +92,6 @@ extern struct avr32_cpuinfo boot_cpu_data;
|
|||
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
#define cpu_sync_pipeline() asm volatile("sub pc, -2" : : : "memory")
|
||||
|
||||
struct cpu_context {
|
||||
|
|
|
@ -24,7 +24,6 @@ generic-y += mcs_spinlock.h
|
|||
generic-y += mm-arch-hooks.h
|
||||
generic-y += mman.h
|
||||
generic-y += msgbuf.h
|
||||
generic-y += mutex.h
|
||||
generic-y += param.h
|
||||
generic-y += percpu.h
|
||||
generic-y += pgalloc.h
|
||||
|
|
|
@ -92,7 +92,6 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
|
||||
|
||||
#define cpu_relax() smp_mb()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Get the Silicon Revision of the chip */
|
||||
static inline uint32_t __pure bfin_revid(void)
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef _ASM_C6X_MUTEX_H
|
||||
#define _ASM_C6X_MUTEX_H
|
||||
|
||||
#include <asm-generic/mutex-null.h>
|
||||
|
||||
#endif /* _ASM_C6X_MUTEX_H */
|
|
@ -121,7 +121,6 @@ extern unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
|
||||
|
||||
#define cpu_relax() do { } while (0)
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
extern const struct seq_operations cpuinfo_op;
|
||||
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
|
@ -63,7 +63,6 @@ static inline void release_thread(struct task_struct *dead_task)
|
|||
#define init_stack (init_thread_union.stack)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
void default_idle(void);
|
||||
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
|
@ -107,7 +107,6 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* data cache prefetch */
|
||||
#define ARCH_HAS_PREFETCH
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
|
@ -127,7 +127,6 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#define HARD_RESET_NOW() ({ \
|
||||
local_irq_disable(); \
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
#include <asm-generic/mutex-xchg.h>
|
|
@ -56,7 +56,6 @@ struct thread_struct {
|
|||
}
|
||||
|
||||
#define cpu_relax() __vmyield()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/*
|
||||
* Decides where the kernel will search for a free chunk of vm space during
|
||||
|
|
|
@ -1,90 +0,0 @@
|
|||
/*
|
||||
* ia64 implementation of the mutex fastpath.
|
||||
*
|
||||
* Copyright (C) 2006 Ken Chen <kenneth.w.chen@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ASM_MUTEX_H
|
||||
#define _ASM_MUTEX_H
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 1
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and call <fail_fn> if
|
||||
* it wasn't 1 originally. This function MUST leave the value lower than
|
||||
* 1 even when the "1" assertion wasn't true.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1. This function returns 0
|
||||
* if the fastpath succeeds, or -1 otherwise.
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_lock_retval(atomic_t *count)
|
||||
{
|
||||
if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_unlock - try to promote the count from 0 to 1
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 0
|
||||
*
|
||||
* Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
|
||||
* In the failure case, this function is allowed to either set the value to
|
||||
* 1, or to set it to a value lower than 1.
|
||||
*
|
||||
* If the implementation sets it to a value of lower than 1, then the
|
||||
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
|
||||
* to return 0 otherwise.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
int ret = ia64_fetchadd4_rel(count, 1);
|
||||
if (unlikely(ret < 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
#define __mutex_slowpath_needs_to_unlock() 1
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
|
||||
*
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: fallback function
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and return 0 (failure)
|
||||
* if it wasn't 1 originally, or return 1 (success) otherwise. This function
|
||||
* MUST leave the value lower than 1 even when the "1" assertion wasn't true.
|
||||
* Additionally, if the value was < 0 originally, this function must not leave
|
||||
* it to 0 on failure.
|
||||
*
|
||||
* If the architecture has no effective trylock variant, it should call the
|
||||
* <fail_fn> spinlock-based trylock variant unconditionally.
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (atomic_read(count) == 1 && cmpxchg_acq(count, 1, 0) == 1)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -547,7 +547,6 @@ ia64_eoi (void)
|
|||
}
|
||||
|
||||
#define cpu_relax() ia64_hint(ia64_hint_pause)
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
static inline int
|
||||
ia64_get_irr(unsigned int vector)
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
|
@ -133,6 +133,5 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) ((tsk)->thread.sp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#endif /* _ASM_M32R_PROCESSOR_H */
|
||||
|
|
|
@ -20,7 +20,6 @@ generic-y += local64.h
|
|||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += mman.h
|
||||
generic-y += mutex.h
|
||||
generic-y += percpu.h
|
||||
generic-y += preempt.h
|
||||
generic-y += resource.h
|
||||
|
|
|
@ -156,6 +156,5 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define task_pt_regs(tsk) ((struct pt_regs *) ((tsk)->thread.esp0))
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#endif
|
||||
|
|
|
@ -27,7 +27,6 @@ generic-y += local64.h
|
|||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += msgbuf.h
|
||||
generic-y += mutex.h
|
||||
generic-y += param.h
|
||||
generic-y += pci.h
|
||||
generic-y += percpu.h
|
||||
|
|
|
@ -152,7 +152,6 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
extern void setup_priv(void);
|
||||
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
#include <asm-generic/mutex-dec.h>
|
|
@ -22,7 +22,6 @@
|
|||
extern const struct seq_operations cpuinfo_op;
|
||||
|
||||
# define cpu_relax() barrier()
|
||||
# define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#define task_pt_regs(tsk) \
|
||||
(((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
|
||||
|
|
|
@ -9,7 +9,6 @@ generic-y += irq_work.h
|
|||
generic-y += local64.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += mutex.h
|
||||
generic-y += parport.h
|
||||
generic-y += percpu.h
|
||||
generic-y += preempt.h
|
||||
|
|
|
@ -389,7 +389,6 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/*
|
||||
* Return_address is a replacement for __builtin_return_address(count)
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
/* MN10300 Mutex fastpath
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public Licence
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the Licence, or (at your option) any later version.
|
||||
*
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
#include <asm-generic/mutex-null.h>
|
|
@ -69,7 +69,6 @@ extern void print_cpu_info(struct mn10300_cpuinfo *);
|
|||
extern void dodgy_tsc(void);
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/*
|
||||
* User space process size: 1.75GB (default).
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
#include <asm-generic/mutex-dec.h>
|
|
@ -88,7 +88,6 @@ extern unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) ((tsk)->thread.kregs->sp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
/*
|
||||
* OpenRISC Linux
|
||||
*
|
||||
* Linux architectural port borrowing liberally from similar works of
|
||||
* others. All original copyrights apply as per the original source
|
||||
* declaration.
|
||||
*
|
||||
* OpenRISC implementation:
|
||||
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
|
||||
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
|
||||
* et al.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
|
@ -92,7 +92,6 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
|
|||
#define init_stack (init_thread_union.stack)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_OPENRISC_PROCESSOR_H */
|
||||
|
|
|
@ -16,7 +16,6 @@ generic-y += local.h
|
|||
generic-y += local64.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += mutex.h
|
||||
generic-y += param.h
|
||||
generic-y += percpu.h
|
||||
generic-y += poll.h
|
||||
|
|
|
@ -309,7 +309,6 @@ extern unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30])
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/*
|
||||
* parisc_requires_coherency() is used to identify the combined VIPT/PIPT
|
||||
|
|
|
@ -1,132 +0,0 @@
|
|||
/*
|
||||
* Optimised mutex implementation of include/asm-generic/mutex-dec.h algorithm
|
||||
*/
|
||||
#ifndef _ASM_POWERPC_MUTEX_H
|
||||
#define _ASM_POWERPC_MUTEX_H
|
||||
|
||||
static inline int __mutex_cmpxchg_lock(atomic_t *v, int old, int new)
|
||||
{
|
||||
int t;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"1: lwarx %0,0,%1 # mutex trylock\n\
|
||||
cmpw 0,%0,%2\n\
|
||||
bne- 2f\n"
|
||||
PPC405_ERR77(0,%1)
|
||||
" stwcx. %3,0,%1\n\
|
||||
bne- 1b"
|
||||
PPC_ACQUIRE_BARRIER
|
||||
"\n\
|
||||
2:"
|
||||
: "=&r" (t)
|
||||
: "r" (&v->counter), "r" (old), "r" (new)
|
||||
: "cc", "memory");
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
static inline int __mutex_dec_return_lock(atomic_t *v)
|
||||
{
|
||||
int t;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: lwarx %0,0,%1 # mutex lock\n\
|
||||
addic %0,%0,-1\n"
|
||||
PPC405_ERR77(0,%1)
|
||||
" stwcx. %0,0,%1\n\
|
||||
bne- 1b"
|
||||
PPC_ACQUIRE_BARRIER
|
||||
: "=&r" (t)
|
||||
: "r" (&v->counter)
|
||||
: "cc", "memory");
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
static inline int __mutex_inc_return_unlock(atomic_t *v)
|
||||
{
|
||||
int t;
|
||||
|
||||
__asm__ __volatile__(
|
||||
PPC_RELEASE_BARRIER
|
||||
"1: lwarx %0,0,%1 # mutex unlock\n\
|
||||
addic %0,%0,1\n"
|
||||
PPC405_ERR77(0,%1)
|
||||
" stwcx. %0,0,%1 \n\
|
||||
bne- 1b"
|
||||
: "=&r" (t)
|
||||
: "r" (&v->counter)
|
||||
: "cc", "memory");
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 1
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and call <fail_fn> if
|
||||
* it wasn't 1 originally. This function MUST leave the value lower than
|
||||
* 1 even when the "1" assertion wasn't true.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (unlikely(__mutex_dec_return_lock(count) < 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1. This function returns 0
|
||||
* if the fastpath succeeds, or -1 otherwise.
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_lock_retval(atomic_t *count)
|
||||
{
|
||||
if (unlikely(__mutex_dec_return_lock(count) < 0))
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_unlock - try to promote the count from 0 to 1
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 0
|
||||
*
|
||||
* Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
|
||||
* In the failure case, this function is allowed to either set the value to
|
||||
* 1, or to set it to a value lower than 1.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (unlikely(__mutex_inc_return_unlock(count) <= 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
#define __mutex_slowpath_needs_to_unlock() 1
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
|
||||
*
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: fallback function
|
||||
*
|
||||
* Change the count from 1 to 0, and return 1 (success), or if the count
|
||||
* was not 1, then return 0 (failure).
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (likely(atomic_read(count) == 1 && __mutex_cmpxchg_lock(count, 1, 0) == 1))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -404,8 +404,6 @@ static inline unsigned long __pack_fe01(unsigned int fpmode)
|
|||
#define cpu_relax() barrier()
|
||||
#endif
|
||||
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Check that a certain kernel stack pointer is valid in task_struct p */
|
||||
int validate_sp(unsigned long sp, struct task_struct *p,
|
||||
unsigned long nbytes);
|
||||
|
|
|
@ -52,6 +52,14 @@
|
|||
#define SYNC_IO
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
#define vcpu_is_preempted vcpu_is_preempted
|
||||
static inline bool vcpu_is_preempted(int cpu)
|
||||
{
|
||||
return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
|
||||
{
|
||||
return lock.slock == 0;
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
|
@ -234,9 +234,10 @@ static inline unsigned short stap(void)
|
|||
/*
|
||||
* Give up the time slice of the virtual PU.
|
||||
*/
|
||||
void cpu_relax(void);
|
||||
#define cpu_relax_yield cpu_relax_yield
|
||||
void cpu_relax_yield(void);
|
||||
|
||||
#define cpu_relax_lowlatency() barrier()
|
||||
#define cpu_relax() barrier()
|
||||
|
||||
#define ECAG_CACHE_ATTRIBUTE 0
|
||||
#define ECAG_CPU_ATTRIBUTE 1
|
||||
|
|
|
@ -23,6 +23,14 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
|
|||
return __sync_bool_compare_and_swap(lock, old, new);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
|
||||
#else
|
||||
bool arch_vcpu_is_preempted(int cpu);
|
||||
#endif
|
||||
|
||||
#define vcpu_is_preempted arch_vcpu_is_preempted
|
||||
|
||||
/*
|
||||
* Simple spin lock operations. There are two variants, one clears IRQ's
|
||||
* on the local processor, one does not.
|
||||
|
|
|
@ -53,7 +53,7 @@ void s390_update_cpu_mhz(void)
|
|||
on_each_cpu(update_cpu_mhz, NULL, 0);
|
||||
}
|
||||
|
||||
void notrace cpu_relax(void)
|
||||
void notrace cpu_relax_yield(void)
|
||||
{
|
||||
if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) {
|
||||
diag_stat_inc(DIAG_STAT_X044);
|
||||
|
@ -61,7 +61,7 @@ void notrace cpu_relax(void)
|
|||
}
|
||||
barrier();
|
||||
}
|
||||
EXPORT_SYMBOL(cpu_relax);
|
||||
EXPORT_SYMBOL(cpu_relax_yield);
|
||||
|
||||
/*
|
||||
* cpu_init - initializes state that is per-CPU.
|
||||
|
|
|
@ -368,10 +368,15 @@ int smp_find_processor_id(u16 address)
|
|||
return -1;
|
||||
}
|
||||
|
||||
int smp_vcpu_scheduled(int cpu)
|
||||
bool arch_vcpu_is_preempted(int cpu)
|
||||
{
|
||||
return pcpu_running(pcpu_devices + cpu);
|
||||
if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
|
||||
return false;
|
||||
if (pcpu_running(pcpu_devices + cpu))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(arch_vcpu_is_preempted);
|
||||
|
||||
void smp_yield_cpu(int cpu)
|
||||
{
|
||||
|
|
|
@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
|
|||
asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
|
||||
}
|
||||
|
||||
static inline int cpu_is_preempted(int cpu)
|
||||
{
|
||||
if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
|
||||
return 0;
|
||||
if (smp_vcpu_scheduled(cpu))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void arch_spin_lock_wait(arch_spinlock_t *lp)
|
||||
{
|
||||
unsigned int cpu = SPINLOCK_LOCKVAL;
|
||||
|
@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
|
|||
continue;
|
||||
}
|
||||
/* First iteration: check if the lock owner is running. */
|
||||
if (first_diag && cpu_is_preempted(~owner)) {
|
||||
if (first_diag && arch_vcpu_is_preempted(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
first_diag = 0;
|
||||
continue;
|
||||
|
@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
|
|||
* yield the CPU unconditionally. For LPAR rely on the
|
||||
* sense running status.
|
||||
*/
|
||||
if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
|
||||
if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
first_diag = 0;
|
||||
}
|
||||
|
@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
|||
continue;
|
||||
}
|
||||
/* Check if the lock owner is running. */
|
||||
if (first_diag && cpu_is_preempted(~owner)) {
|
||||
if (first_diag && arch_vcpu_is_preempted(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
first_diag = 0;
|
||||
continue;
|
||||
|
@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
|||
* yield the CPU unconditionally. For LPAR rely on the
|
||||
* sense running status.
|
||||
*/
|
||||
if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
|
||||
if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
first_diag = 0;
|
||||
}
|
||||
|
@ -165,7 +156,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
|
|||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
if (owner && cpu_is_preempted(~owner))
|
||||
if (owner && arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
|
@ -211,7 +202,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
|
|||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
if (owner && cpu_is_preempted(~owner))
|
||||
if (owner && arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
|
@ -241,7 +232,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
|
|||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
if (owner && cpu_is_preempted(~owner))
|
||||
if (owner && arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
|
@ -285,7 +276,7 @@ void arch_lock_relax(unsigned int cpu)
|
|||
{
|
||||
if (!cpu)
|
||||
return;
|
||||
if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
|
||||
if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
|
||||
return;
|
||||
smp_yield_cpu(~cpu);
|
||||
}
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef _ASM_SCORE_MUTEX_H
|
||||
#define _ASM_SCORE_MUTEX_H
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
||||
|
||||
#endif /* _ASM_SCORE_MUTEX_H */
|
|
@ -24,7 +24,6 @@ extern unsigned long get_wchan(struct task_struct *p);
|
|||
#define current_text_addr() ({ __label__ _l; _l: &&_l; })
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
#define release_thread(thread) do {} while (0)
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,109 +0,0 @@
|
|||
/*
|
||||
* arch/sh/include/asm/mutex-llsc.h
|
||||
*
|
||||
* SH-4A optimized mutex locking primitives
|
||||
*
|
||||
* Please look into asm-generic/mutex-xchg.h for a formal definition.
|
||||
*/
|
||||
#ifndef __ASM_SH_MUTEX_LLSC_H
|
||||
#define __ASM_SH_MUTEX_LLSC_H
|
||||
|
||||
/*
|
||||
* Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure.
|
||||
* with a bastardized atomic decrement (it is not a reliable atomic decrement
|
||||
* but it satisfies the defined semantics for our purpose, while being
|
||||
* smaller and faster than a real atomic decrement or atomic swap.
|
||||
* The idea is to attempt decrementing the lock value only once. If once
|
||||
* decremented it isn't zero, or if its store-back fails due to a dispute
|
||||
* on the exclusive store, we simply bail out immediately through the slow
|
||||
* path where the lock will be reattempted until it succeeds.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
int __done, __res;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"movli.l @%2, %0 \n"
|
||||
"add #-1, %0 \n"
|
||||
"movco.l %0, @%2 \n"
|
||||
"movt %1 \n"
|
||||
: "=&z" (__res), "=&r" (__done)
|
||||
: "r" (&(count)->counter)
|
||||
: "t");
|
||||
|
||||
if (unlikely(!__done || __res != 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
static inline int
|
||||
__mutex_fastpath_lock_retval(atomic_t *count)
|
||||
{
|
||||
int __done, __res;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"movli.l @%2, %0 \n"
|
||||
"add #-1, %0 \n"
|
||||
"movco.l %0, @%2 \n"
|
||||
"movt %1 \n"
|
||||
: "=&z" (__res), "=&r" (__done)
|
||||
: "r" (&(count)->counter)
|
||||
: "t");
|
||||
|
||||
if (unlikely(!__done || __res != 0))
|
||||
__res = -1;
|
||||
|
||||
return __res;
|
||||
}
|
||||
|
||||
static inline void
|
||||
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
int __done, __res;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"movli.l @%2, %0 \n\t"
|
||||
"add #1, %0 \n\t"
|
||||
"movco.l %0, @%2 \n\t"
|
||||
"movt %1 \n\t"
|
||||
: "=&z" (__res), "=&r" (__done)
|
||||
: "r" (&(count)->counter)
|
||||
: "t");
|
||||
|
||||
if (unlikely(!__done || __res <= 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the unlock was done on a contended lock, or if the unlock simply fails
|
||||
* then the mutex remains locked.
|
||||
*/
|
||||
#define __mutex_slowpath_needs_to_unlock() 1
|
||||
|
||||
/*
|
||||
* For __mutex_fastpath_trylock we do an atomic decrement and check the
|
||||
* result and put it in the __res variable.
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
int __res, __orig;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"1: movli.l @%2, %0 \n\t"
|
||||
"dt %0 \n\t"
|
||||
"movco.l %0,@%2 \n\t"
|
||||
"bf 1b \n\t"
|
||||
"cmp/eq #0,%0 \n\t"
|
||||
"bt 2f \n\t"
|
||||
"mov #0, %1 \n\t"
|
||||
"bf 3f \n\t"
|
||||
"2: mov #1, %1 \n\t"
|
||||
"3: "
|
||||
: "=&z" (__orig), "=&r" (__res)
|
||||
: "r" (&count->counter)
|
||||
: "t");
|
||||
|
||||
return __res;
|
||||
}
|
||||
#endif /* __ASM_SH_MUTEX_LLSC_H */
|
|
@ -1,12 +0,0 @@
|
|||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
#if defined(CONFIG_CPU_SH4A)
|
||||
#include <asm/mutex-llsc.h>
|
||||
#else
|
||||
#include <asm-generic/mutex-dec.h>
|
||||
#endif
|
|
@ -97,7 +97,6 @@ extern struct sh_cpuinfo cpu_data[];
|
|||
|
||||
#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory")
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
void default_idle(void);
|
||||
void stop_this_cpu(void *);
|
||||
|
|
|
@ -15,7 +15,6 @@ generic-y += local64.h
|
|||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += module.h
|
||||
generic-y += mutex.h
|
||||
generic-y += preempt.h
|
||||
generic-y += rwsem.h
|
||||
generic-y += serial.h
|
||||
|
|
|
@ -119,7 +119,6 @@ extern struct task_struct *last_task_used_math;
|
|||
int do_mathemu(struct pt_regs *regs, struct task_struct *fpt);
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
extern void (*sparc_idle)(void);
|
||||
|
||||
|
|
|
@ -216,7 +216,6 @@ unsigned long get_wchan(struct task_struct *task);
|
|||
"nop\n\t" \
|
||||
".previous" \
|
||||
::: "memory")
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Prefetch support. This is tuned for UltraSPARC-III and later.
|
||||
* UltraSPARC-I will treat these as nops, and UltraSPARC-II has
|
||||
|
|
|
@ -21,7 +21,6 @@ generic-y += local64.h
|
|||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += msgbuf.h
|
||||
generic-y += mutex.h
|
||||
generic-y += param.h
|
||||
generic-y += parport.h
|
||||
generic-y += poll.h
|
||||
|
|
|
@ -264,8 +264,6 @@ static inline void cpu_relax(void)
|
|||
barrier();
|
||||
}
|
||||
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Info on this processor (see fs/proc/cpuinfo.c) */
|
||||
struct seq_operations;
|
||||
extern const struct seq_operations cpuinfo_op;
|
||||
|
|
|
@ -17,7 +17,6 @@ generic-y += irq_work.h
|
|||
generic-y += kdebug.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += mutex.h
|
||||
generic-y += param.h
|
||||
generic-y += pci.h
|
||||
generic-y += percpu.h
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
/*
|
||||
* linux/arch/unicore32/include/asm/mutex.h
|
||||
*
|
||||
* Code specific to PKUnity SoC and UniCore ISA
|
||||
*
|
||||
* Copyright (C) 2001-2010 GUAN Xue-tao
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* UniCore optimized mutex locking primitives
|
||||
*
|
||||
* Please look into asm-generic/mutex-xchg.h for a formal definition.
|
||||
*/
|
||||
#ifndef __UNICORE_MUTEX_H__
|
||||
#define __UNICORE_MUTEX_H__
|
||||
|
||||
# include <asm-generic/mutex-xchg.h>
|
||||
#endif
|
|
@ -71,7 +71,6 @@ extern void release_thread(struct task_struct *);
|
|||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#define task_pt_regs(p) \
|
||||
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
#ifdef CONFIG_X86_32
|
||||
# include <asm/mutex_32.h>
|
||||
#else
|
||||
# include <asm/mutex_64.h>
|
||||
#endif
|
|
@ -1,110 +0,0 @@
|
|||
/*
|
||||
* Assembly implementation of the mutex fastpath, based on atomic
|
||||
* decrement/increment.
|
||||
*
|
||||
* started by Ingo Molnar:
|
||||
*
|
||||
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
||||
*/
|
||||
#ifndef _ASM_X86_MUTEX_32_H
|
||||
#define _ASM_X86_MUTEX_32_H
|
||||
|
||||
#include <asm/alternative.h>
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
* @fn: function to call if the original value was not 1
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and call <fn> if it
|
||||
* wasn't 1 originally. This function MUST leave the value lower than 1
|
||||
* even when the "1" assertion wasn't true.
|
||||
*/
|
||||
#define __mutex_fastpath_lock(count, fail_fn) \
|
||||
do { \
|
||||
unsigned int dummy; \
|
||||
\
|
||||
typecheck(atomic_t *, count); \
|
||||
typecheck_fn(void (*)(atomic_t *), fail_fn); \
|
||||
\
|
||||
asm volatile(LOCK_PREFIX " decl (%%eax)\n" \
|
||||
" jns 1f \n" \
|
||||
" call " #fail_fn "\n" \
|
||||
"1:\n" \
|
||||
: "=a" (dummy) \
|
||||
: "a" (count) \
|
||||
: "memory", "ecx", "edx"); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1. This function returns 0
|
||||
* if the fastpath succeeds, or -1 otherwise.
|
||||
*/
|
||||
static inline int __mutex_fastpath_lock_retval(atomic_t *count)
|
||||
{
|
||||
if (unlikely(atomic_dec_return(count) < 0))
|
||||
return -1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 0
|
||||
*
|
||||
* try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
|
||||
* In the failure case, this function is allowed to either set the value
|
||||
* to 1, or to set it to a value lower than 1.
|
||||
*
|
||||
* If the implementation sets it to a value of lower than 1, the
|
||||
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
|
||||
* to return 0 otherwise.
|
||||
*/
|
||||
#define __mutex_fastpath_unlock(count, fail_fn) \
|
||||
do { \
|
||||
unsigned int dummy; \
|
||||
\
|
||||
typecheck(atomic_t *, count); \
|
||||
typecheck_fn(void (*)(atomic_t *), fail_fn); \
|
||||
\
|
||||
asm volatile(LOCK_PREFIX " incl (%%eax)\n" \
|
||||
" jg 1f\n" \
|
||||
" call " #fail_fn "\n" \
|
||||
"1:\n" \
|
||||
: "=a" (dummy) \
|
||||
: "a" (count) \
|
||||
: "memory", "ecx", "edx"); \
|
||||
} while (0)
|
||||
|
||||
#define __mutex_slowpath_needs_to_unlock() 1
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
|
||||
*
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: fallback function
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and return 0 (failure)
|
||||
* if it wasn't 1 originally, or return 1 (success) otherwise. This function
|
||||
* MUST leave the value lower than 1 even when the "1" assertion wasn't true.
|
||||
* Additionally, if the value was < 0 originally, this function must not leave
|
||||
* it to 0 on failure.
|
||||
*/
|
||||
static inline int __mutex_fastpath_trylock(atomic_t *count,
|
||||
int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
/* cmpxchg because it never induces a false contention state. */
|
||||
if (likely(atomic_read(count) == 1 && atomic_cmpxchg(count, 1, 0) == 1))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_MUTEX_32_H */
|
|
@ -1,127 +0,0 @@
|
|||
/*
|
||||
* Assembly implementation of the mutex fastpath, based on atomic
|
||||
* decrement/increment.
|
||||
*
|
||||
* started by Ingo Molnar:
|
||||
*
|
||||
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
||||
*/
|
||||
#ifndef _ASM_X86_MUTEX_64_H
|
||||
#define _ASM_X86_MUTEX_64_H
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock - decrement and call function if negative
|
||||
* @v: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the result is negative
|
||||
*
|
||||
* Atomically decrements @v and calls <fail_fn> if the result is negative.
|
||||
*/
|
||||
#ifdef CC_HAVE_ASM_GOTO
|
||||
static inline void __mutex_fastpath_lock(atomic_t *v,
|
||||
void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
asm_volatile_goto(LOCK_PREFIX " decl %0\n"
|
||||
" jns %l[exit]\n"
|
||||
: : "m" (v->counter)
|
||||
: "memory", "cc"
|
||||
: exit);
|
||||
fail_fn(v);
|
||||
exit:
|
||||
return;
|
||||
}
|
||||
#else
|
||||
#define __mutex_fastpath_lock(v, fail_fn) \
|
||||
do { \
|
||||
unsigned long dummy; \
|
||||
\
|
||||
typecheck(atomic_t *, v); \
|
||||
typecheck_fn(void (*)(atomic_t *), fail_fn); \
|
||||
\
|
||||
asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \
|
||||
" jns 1f \n" \
|
||||
" call " #fail_fn "\n" \
|
||||
"1:" \
|
||||
: "=D" (dummy) \
|
||||
: "D" (v) \
|
||||
: "rax", "rsi", "rdx", "rcx", \
|
||||
"r8", "r9", "r10", "r11", "memory"); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1. This function returns 0
|
||||
* if the fastpath succeeds, or -1 otherwise.
|
||||
*/
|
||||
static inline int __mutex_fastpath_lock_retval(atomic_t *count)
|
||||
{
|
||||
if (unlikely(atomic_dec_return(count) < 0))
|
||||
return -1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_unlock - increment and call function if nonpositive
|
||||
* @v: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the result is nonpositive
|
||||
*
|
||||
* Atomically increments @v and calls <fail_fn> if the result is nonpositive.
|
||||
*/
|
||||
#ifdef CC_HAVE_ASM_GOTO
|
||||
static inline void __mutex_fastpath_unlock(atomic_t *v,
|
||||
void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
asm_volatile_goto(LOCK_PREFIX " incl %0\n"
|
||||
" jg %l[exit]\n"
|
||||
: : "m" (v->counter)
|
||||
: "memory", "cc"
|
||||
: exit);
|
||||
fail_fn(v);
|
||||
exit:
|
||||
return;
|
||||
}
|
||||
#else
|
||||
#define __mutex_fastpath_unlock(v, fail_fn) \
|
||||
do { \
|
||||
unsigned long dummy; \
|
||||
\
|
||||
typecheck(atomic_t *, v); \
|
||||
typecheck_fn(void (*)(atomic_t *), fail_fn); \
|
||||
\
|
||||
asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \
|
||||
" jg 1f\n" \
|
||||
" call " #fail_fn "\n" \
|
||||
"1:" \
|
||||
: "=D" (dummy) \
|
||||
: "D" (v) \
|
||||
: "rax", "rsi", "rdx", "rcx", \
|
||||
"r8", "r9", "r10", "r11", "memory"); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#define __mutex_slowpath_needs_to_unlock() 1
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
|
||||
*
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: fallback function
|
||||
*
|
||||
* Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
|
||||
* if it wasn't 1 originally. [the fallback function is never used on
|
||||
* x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
|
||||
*/
|
||||
static inline int __mutex_fastpath_trylock(atomic_t *count,
|
||||
int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (likely(atomic_read(count) == 1 && atomic_cmpxchg(count, 1, 0) == 1))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_MUTEX_64_H */
|
|
@ -678,6 +678,11 @@ static __always_inline void pv_kick(int cpu)
|
|||
PVOP_VCALL1(pv_lock_ops.kick, cpu);
|
||||
}
|
||||
|
||||
static __always_inline bool pv_vcpu_is_preempted(int cpu)
|
||||
{
|
||||
return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
|
||||
}
|
||||
|
||||
#endif /* SMP && PARAVIRT_SPINLOCKS */
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
|
|
@ -310,6 +310,8 @@ struct pv_lock_ops {
|
|||
|
||||
void (*wait)(u8 *ptr, u8 val);
|
||||
void (*kick)(int cpu);
|
||||
|
||||
struct paravirt_callee_save vcpu_is_preempted;
|
||||
};
|
||||
|
||||
/* This contains all the paravirt structures: we get a convenient
|
||||
|
@ -508,6 +510,18 @@ int paravirt_disable_iospace(void);
|
|||
#define PVOP_TEST_NULL(op) ((void)op)
|
||||
#endif
|
||||
|
||||
#define PVOP_RETMASK(rettype) \
|
||||
({ unsigned long __mask = ~0UL; \
|
||||
switch (sizeof(rettype)) { \
|
||||
case 1: __mask = 0xffUL; break; \
|
||||
case 2: __mask = 0xffffUL; break; \
|
||||
case 4: __mask = 0xffffffffUL; break; \
|
||||
default: break; \
|
||||
} \
|
||||
__mask; \
|
||||
})
|
||||
|
||||
|
||||
#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
|
||||
pre, post, ...) \
|
||||
({ \
|
||||
|
@ -535,7 +549,7 @@ int paravirt_disable_iospace(void);
|
|||
paravirt_clobber(clbr), \
|
||||
##__VA_ARGS__ \
|
||||
: "memory", "cc" extra_clbr); \
|
||||
__ret = (rettype)__eax; \
|
||||
__ret = (rettype)(__eax & PVOP_RETMASK(rettype)); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
|
|
@ -588,8 +588,6 @@ static __always_inline void cpu_relax(void)
|
|||
rep_nop();
|
||||
}
|
||||
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Stop speculative execution and prefetching of modified code. */
|
||||
static inline void sync_core(void)
|
||||
{
|
||||
|
|
|
@ -32,6 +32,12 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
|
|||
{
|
||||
pv_queued_spin_unlock(lock);
|
||||
}
|
||||
|
||||
#define vcpu_is_preempted vcpu_is_preempted
|
||||
static inline bool vcpu_is_preempted(int cpu)
|
||||
{
|
||||
return pv_vcpu_is_preempted(cpu);
|
||||
}
|
||||
#else
|
||||
static inline void queued_spin_unlock(struct qspinlock *lock)
|
||||
{
|
||||
|
|
|
@ -45,7 +45,9 @@ struct kvm_steal_time {
|
|||
__u64 steal;
|
||||
__u32 version;
|
||||
__u32 flags;
|
||||
__u32 pad[12];
|
||||
__u8 preempted;
|
||||
__u8 u8_pad[3];
|
||||
__u32 pad[11];
|
||||
};
|
||||
|
||||
#define KVM_STEAL_ALIGNMENT_BITS 5
|
||||
|
|
|
@ -592,6 +592,14 @@ out:
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
__visible bool __kvm_vcpu_is_preempted(int cpu)
|
||||
{
|
||||
struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
|
||||
|
||||
return !!src->preempted;
|
||||
}
|
||||
PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
|
||||
|
||||
/*
|
||||
* Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
|
||||
*/
|
||||
|
@ -608,6 +616,11 @@ void __init kvm_spinlock_init(void)
|
|||
pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
|
||||
pv_lock_ops.wait = kvm_wait;
|
||||
pv_lock_ops.kick = kvm_kick_cpu;
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
|
||||
pv_lock_ops.vcpu_is_preempted =
|
||||
PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
|
||||
}
|
||||
}
|
||||
|
||||
static __init int kvm_spinlock_init_jump(void)
|
||||
|
|
|
@ -12,7 +12,6 @@ __visible void __native_queued_spin_unlock(struct qspinlock *lock)
|
|||
{
|
||||
native_queued_spin_unlock(lock);
|
||||
}
|
||||
|
||||
PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
|
||||
|
||||
bool pv_is_native_spin_unlock(void)
|
||||
|
@ -21,12 +20,25 @@ bool pv_is_native_spin_unlock(void)
|
|||
__raw_callee_save___native_queued_spin_unlock;
|
||||
}
|
||||
|
||||
__visible bool __native_vcpu_is_preempted(int cpu)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
|
||||
|
||||
bool pv_is_native_vcpu_is_preempted(void)
|
||||
{
|
||||
return pv_lock_ops.vcpu_is_preempted.func ==
|
||||
__raw_callee_save___native_vcpu_is_preempted;
|
||||
}
|
||||
|
||||
struct pv_lock_ops pv_lock_ops = {
|
||||
#ifdef CONFIG_SMP
|
||||
.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
|
||||
.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
|
||||
.wait = paravirt_nop,
|
||||
.kick = paravirt_nop,
|
||||
.vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
|
||||
#endif /* SMP */
|
||||
};
|
||||
EXPORT_SYMBOL(pv_lock_ops);
|
||||
|
|
|
@ -12,6 +12,7 @@ DEF_NATIVE(pv_cpu_ops, clts, "clts");
|
|||
|
||||
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||
DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
|
||||
DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax");
|
||||
#endif
|
||||
|
||||
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
|
||||
|
@ -27,6 +28,7 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
|
|||
}
|
||||
|
||||
extern bool pv_is_native_spin_unlock(void);
|
||||
extern bool pv_is_native_vcpu_is_preempted(void);
|
||||
|
||||
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
unsigned long addr, unsigned len)
|
||||
|
@ -56,9 +58,19 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
|||
end = end_pv_lock_ops_queued_spin_unlock;
|
||||
goto patch_site;
|
||||
}
|
||||
goto patch_default;
|
||||
|
||||
case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
|
||||
if (pv_is_native_vcpu_is_preempted()) {
|
||||
start = start_pv_lock_ops_vcpu_is_preempted;
|
||||
end = end_pv_lock_ops_vcpu_is_preempted;
|
||||
goto patch_site;
|
||||
}
|
||||
goto patch_default;
|
||||
#endif
|
||||
|
||||
default:
|
||||
patch_default:
|
||||
ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
|
||||
break;
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ DEF_NATIVE(, mov64, "mov %rdi, %rax");
|
|||
|
||||
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||
DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
|
||||
DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
|
||||
#endif
|
||||
|
||||
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
|
||||
|
@ -36,6 +37,7 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
|
|||
}
|
||||
|
||||
extern bool pv_is_native_spin_unlock(void);
|
||||
extern bool pv_is_native_vcpu_is_preempted(void);
|
||||
|
||||
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
unsigned long addr, unsigned len)
|
||||
|
@ -68,9 +70,19 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
|||
end = end_pv_lock_ops_queued_spin_unlock;
|
||||
goto patch_site;
|
||||
}
|
||||
goto patch_default;
|
||||
|
||||
case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
|
||||
if (pv_is_native_vcpu_is_preempted()) {
|
||||
start = start_pv_lock_ops_vcpu_is_preempted;
|
||||
end = end_pv_lock_ops_vcpu_is_preempted;
|
||||
goto patch_site;
|
||||
}
|
||||
goto patch_default;
|
||||
#endif
|
||||
|
||||
default:
|
||||
patch_default:
|
||||
ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
|
||||
break;
|
||||
|
||||
|
|
|
@ -2071,6 +2071,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
|
|||
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
|
||||
return;
|
||||
|
||||
vcpu->arch.st.steal.preempted = 0;
|
||||
|
||||
if (vcpu->arch.st.steal.version & 1)
|
||||
vcpu->arch.st.steal.version += 1; /* first time write, random junk */
|
||||
|
||||
|
@ -2826,8 +2828,22 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
|
||||
}
|
||||
|
||||
static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
|
||||
return;
|
||||
|
||||
vcpu->arch.st.steal.preempted = 1;
|
||||
|
||||
kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
|
||||
&vcpu->arch.st.steal.preempted,
|
||||
offsetof(struct kvm_steal_time, preempted),
|
||||
sizeof(vcpu->arch.st.steal.preempted));
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_steal_time_set_preempted(vcpu);
|
||||
kvm_x86_ops->vcpu_put(vcpu);
|
||||
kvm_put_guest_fpu(vcpu);
|
||||
vcpu->arch.last_host_tsc = rdtsc();
|
||||
|
|
|
@ -26,7 +26,6 @@ static inline void rep_nop(void)
|
|||
}
|
||||
|
||||
#define cpu_relax() rep_nop()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#define task_pt_regs(t) (&(t)->thread.regs)
|
||||
|
||||
|
|
|
@ -114,6 +114,7 @@ void xen_uninit_lock_cpu(int cpu)
|
|||
per_cpu(irq_name, cpu) = NULL;
|
||||
}
|
||||
|
||||
PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
|
||||
|
||||
/*
|
||||
* Our init of PV spinlocks is split in two init functions due to us
|
||||
|
@ -137,6 +138,7 @@ void __init xen_init_spinlocks(void)
|
|||
pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
|
||||
pv_lock_ops.wait = xen_qlock_wait;
|
||||
pv_lock_ops.kick = xen_qlock_kick;
|
||||
pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
|
@ -206,7 +206,6 @@ extern unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Special register access. */
|
||||
|
||||
|
|
|
@ -723,7 +723,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
|
|||
if (busywait_stop(timeout_us, cpu))
|
||||
break;
|
||||
|
||||
cpu_relax_lowlatency();
|
||||
cpu_relax();
|
||||
} while (!need_resched());
|
||||
|
||||
return false;
|
||||
|
|
|
@ -35,19 +35,6 @@
|
|||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
|
||||
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
|
||||
{
|
||||
if (!mutex_is_locked(mutex))
|
||||
return false;
|
||||
|
||||
#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
|
||||
return mutex->owner == task;
|
||||
#else
|
||||
/* Since UP may be pre-empted, we cannot assume that we own the lock */
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
|
@ -240,15 +227,20 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
|
|||
|
||||
static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
|
||||
{
|
||||
if (!mutex_trylock(&dev->struct_mutex)) {
|
||||
if (!mutex_is_locked_by(&dev->struct_mutex, current))
|
||||
return false;
|
||||
switch (mutex_trylock_recursive(&dev->struct_mutex)) {
|
||||
case MUTEX_TRYLOCK_FAILED:
|
||||
return false;
|
||||
|
||||
*unlock = false;
|
||||
} else
|
||||
case MUTEX_TRYLOCK_SUCCESS:
|
||||
*unlock = true;
|
||||
return true;
|
||||
|
||||
return true;
|
||||
case MUTEX_TRYLOCK_RECURSIVE:
|
||||
*unlock = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
BUG();
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
|
|
|
@ -18,33 +18,24 @@
|
|||
#include "msm_drv.h"
|
||||
#include "msm_gem.h"
|
||||
|
||||
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
|
||||
{
|
||||
if (!mutex_is_locked(mutex))
|
||||
return false;
|
||||
|
||||
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
|
||||
return mutex->owner == task;
|
||||
#else
|
||||
/* Since UP may be pre-empted, we cannot assume that we own the lock */
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
|
||||
{
|
||||
if (!mutex_trylock(&dev->struct_mutex)) {
|
||||
if (!mutex_is_locked_by(&dev->struct_mutex, current))
|
||||
return false;
|
||||
*unlock = false;
|
||||
} else {
|
||||
switch (mutex_trylock_recursive(&dev->struct_mutex)) {
|
||||
case MUTEX_TRYLOCK_FAILED:
|
||||
return false;
|
||||
|
||||
case MUTEX_TRYLOCK_SUCCESS:
|
||||
*unlock = true;
|
||||
return true;
|
||||
|
||||
case MUTEX_TRYLOCK_RECURSIVE:
|
||||
*unlock = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
return true;
|
||||
BUG();
|
||||
}
|
||||
|
||||
|
||||
static unsigned long
|
||||
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
||||
{
|
||||
|
|
|
@ -342,7 +342,7 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
|
|||
endtime = busy_clock() + vq->busyloop_timeout;
|
||||
while (vhost_can_busy_poll(vq->dev, endtime) &&
|
||||
vhost_vq_avail_empty(vq->dev, vq))
|
||||
cpu_relax_lowlatency();
|
||||
cpu_relax();
|
||||
preempt_enable();
|
||||
r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
|
||||
out_num, in_num, NULL, NULL);
|
||||
|
@ -533,7 +533,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
|
|||
while (vhost_can_busy_poll(&net->dev, endtime) &&
|
||||
!sk_has_rx_data(sk) &&
|
||||
vhost_vq_avail_empty(&net->dev, vq))
|
||||
cpu_relax_lowlatency();
|
||||
cpu_relax();
|
||||
|
||||
preempt_enable();
|
||||
|
||||
|
|
|
@ -1,88 +0,0 @@
|
|||
/*
|
||||
* include/asm-generic/mutex-dec.h
|
||||
*
|
||||
* Generic implementation of the mutex fastpath, based on atomic
|
||||
* decrement/increment.
|
||||
*/
|
||||
#ifndef _ASM_GENERIC_MUTEX_DEC_H
|
||||
#define _ASM_GENERIC_MUTEX_DEC_H
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 1
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and call <fail_fn> if
|
||||
* it wasn't 1 originally. This function MUST leave the value lower than
|
||||
* 1 even when the "1" assertion wasn't true.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (unlikely(atomic_dec_return_acquire(count) < 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1. This function returns 0
|
||||
* if the fastpath succeeds, or -1 otherwise.
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_lock_retval(atomic_t *count)
|
||||
{
|
||||
if (unlikely(atomic_dec_return_acquire(count) < 0))
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_unlock - try to promote the count from 0 to 1
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 0
|
||||
*
|
||||
* Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
|
||||
* In the failure case, this function is allowed to either set the value to
|
||||
* 1, or to set it to a value lower than 1.
|
||||
*
|
||||
* If the implementation sets it to a value of lower than 1, then the
|
||||
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
|
||||
* to return 0 otherwise.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (unlikely(atomic_inc_return_release(count) <= 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
#define __mutex_slowpath_needs_to_unlock() 1
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
|
||||
*
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: fallback function
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and return 0 (failure)
|
||||
* if it wasn't 1 originally, or return 1 (success) otherwise. This function
|
||||
* MUST leave the value lower than 1 even when the "1" assertion wasn't true.
|
||||
* Additionally, if the value was < 0 originally, this function must not leave
|
||||
* it to 0 on failure.
|
||||
*
|
||||
* If the architecture has no effective trylock variant, it should call the
|
||||
* <fail_fn> spinlock-based trylock variant unconditionally.
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (likely(atomic_read(count) == 1 && atomic_cmpxchg_acquire(count, 1, 0) == 1))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -1,19 +0,0 @@
|
|||
/*
|
||||
* include/asm-generic/mutex-null.h
|
||||
*
|
||||
* Generic implementation of the mutex fastpath, based on NOP :-)
|
||||
*
|
||||
* This is used by the mutex-debugging infrastructure, but it can also
|
||||
* be used by architectures that (for whatever reason) want to use the
|
||||
* spinlock based slowpath.
|
||||
*/
|
||||
#ifndef _ASM_GENERIC_MUTEX_NULL_H
|
||||
#define _ASM_GENERIC_MUTEX_NULL_H
|
||||
|
||||
#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
|
||||
#define __mutex_fastpath_lock_retval(count) (-1)
|
||||
#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
|
||||
#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
|
||||
#define __mutex_slowpath_needs_to_unlock() 1
|
||||
|
||||
#endif
|
|
@ -1,120 +0,0 @@
|
|||
/*
|
||||
* include/asm-generic/mutex-xchg.h
|
||||
*
|
||||
* Generic implementation of the mutex fastpath, based on xchg().
|
||||
*
|
||||
* NOTE: An xchg based implementation might be less optimal than an atomic
|
||||
* decrement/increment based implementation. If your architecture
|
||||
* has a reasonable atomic dec/inc then you should probably use
|
||||
* asm-generic/mutex-dec.h instead, or you could open-code an
|
||||
* optimized version in asm/mutex.h.
|
||||
*/
|
||||
#ifndef _ASM_GENERIC_MUTEX_XCHG_H
|
||||
#define _ASM_GENERIC_MUTEX_XCHG_H
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 1
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and call <fail_fn> if it
|
||||
* wasn't 1 originally. This function MUST leave the value lower than 1
|
||||
* even when the "1" assertion wasn't true.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (unlikely(atomic_xchg(count, 0) != 1))
|
||||
/*
|
||||
* We failed to acquire the lock, so mark it contended
|
||||
* to ensure that any waiting tasks are woken up by the
|
||||
* unlock slow path.
|
||||
*/
|
||||
if (likely(atomic_xchg_acquire(count, -1) != 1))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1. This function returns 0
|
||||
* if the fastpath succeeds, or -1 otherwise.
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_lock_retval(atomic_t *count)
|
||||
{
|
||||
if (unlikely(atomic_xchg_acquire(count, 0) != 1))
|
||||
if (likely(atomic_xchg(count, -1) != 1))
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 0
|
||||
*
|
||||
* try to promote the mutex from 0 to 1. if it wasn't 0, call <function>
|
||||
* In the failure case, this function is allowed to either set the value to
|
||||
* 1, or to set it to a value lower than one.
|
||||
* If the implementation sets it to a value of lower than one, the
|
||||
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
|
||||
* to return 0 otherwise.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (unlikely(atomic_xchg_release(count, 1) != 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
#define __mutex_slowpath_needs_to_unlock() 0
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
|
||||
*
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: spinlock based trylock implementation
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and return 0 (failure)
|
||||
* if it wasn't 1 originally, or return 1 (success) otherwise. This function
|
||||
* MUST leave the value lower than 1 even when the "1" assertion wasn't true.
|
||||
* Additionally, if the value was < 0 originally, this function must not leave
|
||||
* it to 0 on failure.
|
||||
*
|
||||
* If the architecture has no effective trylock variant, it should call the
|
||||
* <fail_fn> spinlock-based trylock variant unconditionally.
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
int prev;
|
||||
|
||||
if (atomic_read(count) != 1)
|
||||
return 0;
|
||||
|
||||
prev = atomic_xchg_acquire(count, 0);
|
||||
if (unlikely(prev < 0)) {
|
||||
/*
|
||||
* The lock was marked contended so we must restore that
|
||||
* state. If while doing so we get back a prev value of 1
|
||||
* then we just own it.
|
||||
*
|
||||
* [ In the rare case of the mutex going to 1, to 0, to -1
|
||||
* and then back to 0 in this few-instructions window,
|
||||
* this has the potential to trigger the slowpath for the
|
||||
* owner's unlock path needlessly, but that's not a problem
|
||||
* in practice. ]
|
||||
*/
|
||||
prev = atomic_xchg_acquire(count, prev);
|
||||
if (prev < 0)
|
||||
prev = 0;
|
||||
}
|
||||
|
||||
return prev;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -1,9 +0,0 @@
|
|||
#ifndef __ASM_GENERIC_MUTEX_H
|
||||
#define __ASM_GENERIC_MUTEX_H
|
||||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath,
|
||||
* which is a reasonable default on many architectures.
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
||||
#endif /* __ASM_GENERIC_MUTEX_H */
|
|
@ -645,6 +645,8 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
|
|||
unsigned long len);
|
||||
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
||||
void *data, unsigned long len);
|
||||
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
||||
void *data, int offset, unsigned long len);
|
||||
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
||||
gpa_t gpa, unsigned long len);
|
||||
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
|
||||
|
|
|
@ -1,24 +0,0 @@
|
|||
#ifndef __LINUX_MUTEX_DEBUG_H
|
||||
#define __LINUX_MUTEX_DEBUG_H
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/debug_locks.h>
|
||||
|
||||
/*
|
||||
* Mutexes - debugging helpers:
|
||||
*/
|
||||
|
||||
#define __DEBUG_MUTEX_INITIALIZER(lockname) \
|
||||
, .magic = &lockname
|
||||
|
||||
#define mutex_init(mutex) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
__mutex_init((mutex), #mutex, &__key); \
|
||||
} while (0)
|
||||
|
||||
extern void mutex_destroy(struct mutex *lock);
|
||||
|
||||
#endif
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/atomic.h>
|
||||
#include <asm/processor.h>
|
||||
#include <linux/osq_lock.h>
|
||||
#include <linux/debug_locks.h>
|
||||
|
||||
/*
|
||||
* Simple, straightforward mutexes with strict semantics:
|
||||
|
@ -48,16 +49,12 @@
|
|||
* locks and tasks (and only those tasks)
|
||||
*/
|
||||
struct mutex {
|
||||
/* 1: unlocked, 0: locked, negative: locked, possible waiters */
|
||||
atomic_t count;
|
||||
atomic_long_t owner;
|
||||
spinlock_t wait_lock;
|
||||
struct list_head wait_list;
|
||||
#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
|
||||
struct task_struct *owner;
|
||||
#endif
|
||||
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
||||
struct optimistic_spin_queue osq; /* Spinner MCS lock */
|
||||
#endif
|
||||
struct list_head wait_list;
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
void *magic;
|
||||
#endif
|
||||
|
@ -66,6 +63,11 @@ struct mutex {
|
|||
#endif
|
||||
};
|
||||
|
||||
static inline struct task_struct *__mutex_owner(struct mutex *lock)
|
||||
{
|
||||
return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x03);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the control structure for tasks blocked on mutex,
|
||||
* which resides on the blocked task's kernel stack:
|
||||
|
@ -79,9 +81,20 @@ struct mutex_waiter {
|
|||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
# include <linux/mutex-debug.h>
|
||||
|
||||
#define __DEBUG_MUTEX_INITIALIZER(lockname) \
|
||||
, .magic = &lockname
|
||||
|
||||
extern void mutex_destroy(struct mutex *lock);
|
||||
|
||||
#else
|
||||
|
||||
# define __DEBUG_MUTEX_INITIALIZER(lockname)
|
||||
|
||||
static inline void mutex_destroy(struct mutex *lock) {}
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* mutex_init - initialize the mutex
|
||||
* @mutex: the mutex to be initialized
|
||||
|
@ -90,14 +103,12 @@ struct mutex_waiter {
|
|||
*
|
||||
* It is not allowed to initialize an already locked mutex.
|
||||
*/
|
||||
# define mutex_init(mutex) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
__mutex_init((mutex), #mutex, &__key); \
|
||||
#define mutex_init(mutex) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
__mutex_init((mutex), #mutex, &__key); \
|
||||
} while (0)
|
||||
static inline void mutex_destroy(struct mutex *lock) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
|
||||
|
@ -107,7 +118,7 @@ static inline void mutex_destroy(struct mutex *lock) {}
|
|||
#endif
|
||||
|
||||
#define __MUTEX_INITIALIZER(lockname) \
|
||||
{ .count = ATOMIC_INIT(1) \
|
||||
{ .owner = ATOMIC_LONG_INIT(0) \
|
||||
, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
|
||||
, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
|
||||
__DEBUG_MUTEX_INITIALIZER(lockname) \
|
||||
|
@ -127,7 +138,10 @@ extern void __mutex_init(struct mutex *lock, const char *name,
|
|||
*/
|
||||
static inline int mutex_is_locked(struct mutex *lock)
|
||||
{
|
||||
return atomic_read(&lock->count) != 1;
|
||||
/*
|
||||
* XXX think about spin_is_locked
|
||||
*/
|
||||
return __mutex_owner(lock) != NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -175,4 +189,35 @@ extern void mutex_unlock(struct mutex *lock);
|
|||
|
||||
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
|
||||
|
||||
/*
|
||||
* These values are chosen such that FAIL and SUCCESS match the
|
||||
* values of the regular mutex_trylock().
|
||||
*/
|
||||
enum mutex_trylock_recursive_enum {
|
||||
MUTEX_TRYLOCK_FAILED = 0,
|
||||
MUTEX_TRYLOCK_SUCCESS = 1,
|
||||
MUTEX_TRYLOCK_RECURSIVE,
|
||||
};
|
||||
|
||||
/**
|
||||
* mutex_trylock_recursive - trylock variant that allows recursive locking
|
||||
* @lock: mutex to be locked
|
||||
*
|
||||
* This function should not be used, _ever_. It is purely for hysterical GEM
|
||||
* raisins, and once those are gone this will be removed.
|
||||
*
|
||||
* Returns:
|
||||
* MUTEX_TRYLOCK_FAILED - trylock failed,
|
||||
* MUTEX_TRYLOCK_SUCCESS - lock acquired,
|
||||
* MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
|
||||
*/
|
||||
static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
|
||||
mutex_trylock_recursive(struct mutex *lock)
|
||||
{
|
||||
if (unlikely(__mutex_owner(lock) == current))
|
||||
return MUTEX_TRYLOCK_RECURSIVE;
|
||||
|
||||
return mutex_trylock(lock);
|
||||
}
|
||||
|
||||
#endif /* __LINUX_MUTEX_H */
|
||||
|
|
|
@ -989,7 +989,7 @@ enum cpu_idle_type {
|
|||
* already in a wake queue, the wakeup will happen soon and the second
|
||||
* waker can just skip it.
|
||||
*
|
||||
* The WAKE_Q macro declares and initializes the list head.
|
||||
* The DEFINE_WAKE_Q macro declares and initializes the list head.
|
||||
* wake_up_q() does NOT reinitialize the list; it's expected to be
|
||||
* called near the end of a function, where the fact that the queue is
|
||||
* not used again will be easy to see by inspection.
|
||||
|
@ -1009,7 +1009,7 @@ struct wake_q_head {
|
|||
|
||||
#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
|
||||
|
||||
#define WAKE_Q(name) \
|
||||
#define DEFINE_WAKE_Q(name) \
|
||||
struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
|
||||
|
||||
extern void wake_q_add(struct wake_q_head *head,
|
||||
|
@ -2444,6 +2444,10 @@ static inline void calc_load_enter_idle(void) { }
|
|||
static inline void calc_load_exit_idle(void) { }
|
||||
#endif /* CONFIG_NO_HZ_COMMON */
|
||||
|
||||
#ifndef cpu_relax_yield
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Do not use outside of architecture code which knows its limitations.
|
||||
*
|
||||
|
@ -3508,6 +3512,18 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
|
|||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* In order to reduce various lock holder preemption latencies provide an
|
||||
* interface to see if a vCPU is currently running or not.
|
||||
*
|
||||
* This allows us to terminate optimistic spin loops and block, analogous to
|
||||
* the native optimistic spin heuristic of testing if the lock owner task is
|
||||
* running or not.
|
||||
*/
|
||||
#ifndef vcpu_is_preempted
|
||||
# define vcpu_is_preempted(cpu) false
|
||||
#endif
|
||||
|
||||
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
|
||||
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
|
||||
|
||||
|
|
|
@ -120,7 +120,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
|
|||
struct ww_class *ww_class)
|
||||
{
|
||||
ctx->task = current;
|
||||
ctx->stamp = atomic_long_inc_return(&ww_class->stamp);
|
||||
ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp);
|
||||
ctx->acquired = 0;
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
ctx->ww_class = ww_class;
|
||||
|
|
|
@ -967,7 +967,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
|
|||
struct timespec ts;
|
||||
struct posix_msg_tree_node *new_leaf = NULL;
|
||||
int ret = 0;
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
if (u_abs_timeout) {
|
||||
int res = prepare_timeout(u_abs_timeout, &expires, &ts);
|
||||
|
@ -1151,7 +1151,7 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
|
|||
msg_ptr = wait.msg;
|
||||
}
|
||||
} else {
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
msg_ptr = msg_get(info);
|
||||
|
||||
|
|
|
@ -235,7 +235,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
|
|||
{
|
||||
struct msg_msg *msg, *t;
|
||||
struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
expunge_all(msq, -EIDRM, &wake_q);
|
||||
ss_wakeup(msq, &wake_q, true);
|
||||
|
@ -397,7 +397,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
|
|||
goto out_up;
|
||||
case IPC_SET:
|
||||
{
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
|
||||
!capable(CAP_SYS_RESOURCE)) {
|
||||
|
@ -634,7 +634,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
|
|||
struct msg_msg *msg;
|
||||
int err;
|
||||
struct ipc_namespace *ns;
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
ns = current->nsproxy->ipc_ns;
|
||||
|
||||
|
@ -850,7 +850,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
|
|||
struct msg_queue *msq;
|
||||
struct ipc_namespace *ns;
|
||||
struct msg_msg *msg, *copy = NULL;
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
ns = current->nsproxy->ipc_ns;
|
||||
|
||||
|
|
|
@ -225,7 +225,7 @@ config ARCH_SUPPORTS_ATOMIC_RMW
|
|||
|
||||
config MUTEX_SPIN_ON_OWNER
|
||||
def_bool y
|
||||
depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
|
||||
depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW
|
||||
|
||||
config RWSEM_SPIN_ON_OWNER
|
||||
def_bool y
|
||||
|
|
|
@ -1298,7 +1298,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
|
|||
struct task_struct *new_owner;
|
||||
struct futex_pi_state *pi_state = this->pi_state;
|
||||
u32 uninitialized_var(curval), newval;
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
bool deboost;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -1415,7 +1415,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
|
|||
struct futex_q *this, *next;
|
||||
union futex_key key = FUTEX_KEY_INIT;
|
||||
int ret;
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
if (!bitset)
|
||||
return -EINVAL;
|
||||
|
@ -1469,7 +1469,7 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
|
|||
struct futex_hash_bucket *hb1, *hb2;
|
||||
struct futex_q *this, *next;
|
||||
int ret, op_ret;
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
retry:
|
||||
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
|
||||
|
@ -1708,7 +1708,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
|
|||
struct futex_pi_state *pi_state = NULL;
|
||||
struct futex_hash_bucket *hb1, *hb2;
|
||||
struct futex_q *this, *next;
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
if (requeue_pi) {
|
||||
/*
|
||||
|
|
|
@ -840,9 +840,9 @@ static struct lock_list *alloc_list_entry(void)
|
|||
/*
|
||||
* Add a new dependency to the head of the list:
|
||||
*/
|
||||
static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
|
||||
struct list_head *head, unsigned long ip,
|
||||
int distance, struct stack_trace *trace)
|
||||
static int add_lock_to_list(struct lock_class *this, struct list_head *head,
|
||||
unsigned long ip, int distance,
|
||||
struct stack_trace *trace)
|
||||
{
|
||||
struct lock_list *entry;
|
||||
/*
|
||||
|
@ -1868,14 +1868,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
|||
* Ok, all validations passed, add the new lock
|
||||
* to the previous lock's dependency list:
|
||||
*/
|
||||
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
|
||||
ret = add_lock_to_list(hlock_class(next),
|
||||
&hlock_class(prev)->locks_after,
|
||||
next->acquire_ip, distance, &trace);
|
||||
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
|
||||
ret = add_lock_to_list(hlock_class(prev),
|
||||
&hlock_class(next)->locks_before,
|
||||
next->acquire_ip, distance, &trace);
|
||||
if (!ret)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue