[IA64] Fix race in mm-context wrap-around logic.
The patch below should fix a race which could cause stale TLB entries. Specifically, when 2 CPUs ended up racing for entrance to wrap_mmu_context(). The losing CPU would find that by the time it acquired ctx.lock, mm->context already had a valid value, but then it failed to (re-)check the delayed TLB flushing logic and hence could end up using a context number when there were still stale entries in its TLB. The fix is to check for delayed TLB flushes only after mm->context is valid (non-zero). The patch also makes GCC v4.x happier by defining a non-volatile variant of mm_context_t called nv_mm_context_t. Signed-off-by: David Mosberger-Tang <David.Mosberger@acm.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
parent
7d69fa6266
commit
badea125d7
|
@ -2,10 +2,12 @@
|
||||||
#define __MMU_H
|
#define __MMU_H
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Type for a context number. We declare it volatile to ensure proper ordering when it's
|
* Type for a context number. We declare it volatile to ensure proper
|
||||||
* accessed outside of spinlock'd critical sections (e.g., as done in activate_mm() and
|
* ordering when it's accessed outside of spinlock'd critical sections
|
||||||
* init_new_context()).
|
* (e.g., as done in activate_mm() and init_new_context()).
|
||||||
*/
|
*/
|
||||||
typedef volatile unsigned long mm_context_t;
|
typedef volatile unsigned long mm_context_t;
|
||||||
|
|
||||||
|
typedef unsigned long nv_mm_context_t;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -55,22 +55,27 @@ static inline void
|
||||||
delayed_tlb_flush (void)
|
delayed_tlb_flush (void)
|
||||||
{
|
{
|
||||||
extern void local_flush_tlb_all (void);
|
extern void local_flush_tlb_all (void);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
|
if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
|
||||||
|
spin_lock_irqsave(&ia64_ctx.lock, flags);
|
||||||
|
{
|
||||||
|
if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
|
||||||
local_flush_tlb_all();
|
local_flush_tlb_all();
|
||||||
__ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
|
__ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
spin_unlock_irqrestore(&ia64_ctx.lock, flags);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static inline mm_context_t
|
static inline nv_mm_context_t
|
||||||
get_mmu_context (struct mm_struct *mm)
|
get_mmu_context (struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
mm_context_t context = mm->context;
|
nv_mm_context_t context = mm->context;
|
||||||
|
|
||||||
if (context)
|
|
||||||
return context;
|
|
||||||
|
|
||||||
|
if (unlikely(!context)) {
|
||||||
spin_lock_irqsave(&ia64_ctx.lock, flags);
|
spin_lock_irqsave(&ia64_ctx.lock, flags);
|
||||||
{
|
{
|
||||||
/* re-check, now that we've got the lock: */
|
/* re-check, now that we've got the lock: */
|
||||||
|
@ -83,6 +88,13 @@ get_mmu_context (struct mm_struct *mm)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&ia64_ctx.lock, flags);
|
spin_unlock_irqrestore(&ia64_ctx.lock, flags);
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Ensure we're not starting to use "context" before any old
|
||||||
|
* uses of it are gone from our TLB.
|
||||||
|
*/
|
||||||
|
delayed_tlb_flush();
|
||||||
|
|
||||||
return context;
|
return context;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,7 +116,7 @@ destroy_context (struct mm_struct *mm)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
reload_context (mm_context_t context)
|
reload_context (nv_mm_context_t context)
|
||||||
{
|
{
|
||||||
unsigned long rid;
|
unsigned long rid;
|
||||||
unsigned long rid_incr = 0;
|
unsigned long rid_incr = 0;
|
||||||
|
@ -138,7 +150,7 @@ reload_context (mm_context_t context)
|
||||||
static inline void
|
static inline void
|
||||||
activate_context (struct mm_struct *mm)
|
activate_context (struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
mm_context_t context;
|
nv_mm_context_t context;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
context = get_mmu_context(mm);
|
context = get_mmu_context(mm);
|
||||||
|
@ -157,8 +169,6 @@ activate_context (struct mm_struct *mm)
|
||||||
static inline void
|
static inline void
|
||||||
activate_mm (struct mm_struct *prev, struct mm_struct *next)
|
activate_mm (struct mm_struct *prev, struct mm_struct *next)
|
||||||
{
|
{
|
||||||
delayed_tlb_flush();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We may get interrupts here, but that's OK because interrupt handlers cannot
|
* We may get interrupts here, but that's OK because interrupt handlers cannot
|
||||||
* touch user-space.
|
* touch user-space.
|
||||||
|
|
Loading…
Reference in New Issue