sched/preempt, mm/fault: Count pagefault_disable() levels in pagefault_disabled

Until now, pagefault_disable()/pagefault_enabled() used the preempt
count to track whether in an environment with pagefaults disabled (can
be queried via in_atomic()).

This patch introduces a separate counter in task_struct to count the
level of pagefault_disable() calls. We'll keep manipulating the preempt
count to retain compatibility to existing pagefault handlers.

It is now possible to verify whether in a pagefault_disable() envionment
by calling pagefault_disabled(). In contrast to in_atomic() it will not
be influenced by preempt_enable()/preempt_disable().

This patch is based on a patch from Ingo Molnar.

Reviewed-and-tested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: David.Laight@ACULAB.COM
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: airlied@linux.ie
Cc: akpm@linux-foundation.org
Cc: benh@kernel.crashing.org
Cc: bigeasy@linutronix.de
Cc: borntraeger@de.ibm.com
Cc: daniel.vetter@intel.com
Cc: heiko.carstens@de.ibm.com
Cc: herbert@gondor.apana.org.au
Cc: hocko@suse.cz
Cc: hughd@google.com
Cc: mst@redhat.com
Cc: paulus@samba.org
Cc: ralf@linux-mips.org
Cc: schwidefsky@de.ibm.com
Cc: yang.shi@windriver.com
Link: http://lkml.kernel.org/r/1431359540-32227-2-git-send-email-dahi@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
David Hildenbrand 2015-05-11 17:52:06 +02:00 committed by Ingo Molnar
parent 3e51f3c400
commit 8bcbde5480
3 changed files with 33 additions and 7 deletions

View File

@ -1788,6 +1788,7 @@ struct task_struct {
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change; unsigned long task_state_change;
#endif #endif
int pagefault_disabled;
}; };
/* Future-safe accessor for struct task_struct's cpus_allowed. */ /* Future-safe accessor for struct task_struct's cpus_allowed. */

View File

@ -2,20 +2,36 @@
#define __LINUX_UACCESS_H__ #define __LINUX_UACCESS_H__
#include <linux/preempt.h> #include <linux/preempt.h>
#include <linux/sched.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
static __always_inline void pagefault_disabled_inc(void)
{
current->pagefault_disabled++;
}
static __always_inline void pagefault_disabled_dec(void)
{
current->pagefault_disabled--;
WARN_ON(current->pagefault_disabled < 0);
}
/* /*
* These routines enable/disable the pagefault handler in that * These routines enable/disable the pagefault handler. If disabled, it will
* it will not take any locks and go straight to the fixup table. * not take any locks and go straight to the fixup table.
*
* We increase the preempt and the pagefault count, to be able to distinguish
* whether we run in simple atomic context or in a real pagefault_disable()
* context.
*
* For now, after pagefault_disabled() has been called, we run in atomic
* context. User access methods will not sleep.
* *
* They have great resemblance to the preempt_disable/enable calls
* and in fact they are identical; this is because currently there is
* no other way to make the pagefault handlers do this. So we do
* disable preemption but we don't necessarily care about that.
*/ */
static inline void pagefault_disable(void) static inline void pagefault_disable(void)
{ {
preempt_count_inc(); preempt_count_inc();
pagefault_disabled_inc();
/* /*
* make sure to have issued the store before a pagefault * make sure to have issued the store before a pagefault
* can hit. * can hit.
@ -25,18 +41,24 @@ static inline void pagefault_disable(void)
static inline void pagefault_enable(void) static inline void pagefault_enable(void)
{ {
#ifndef CONFIG_PREEMPT
/* /*
* make sure to issue those last loads/stores before enabling * make sure to issue those last loads/stores before enabling
* the pagefault handler again. * the pagefault handler again.
*/ */
barrier(); barrier();
pagefault_disabled_dec();
#ifndef CONFIG_PREEMPT
preempt_count_dec(); preempt_count_dec();
#else #else
preempt_enable(); preempt_enable();
#endif #endif
} }
/*
* Is the pagefault handler disabled? If so, user access methods will not sleep.
*/
#define pagefault_disabled() (current->pagefault_disabled != 0)
#ifndef ARCH_HAS_NOCACHE_UACCESS #ifndef ARCH_HAS_NOCACHE_UACCESS
static inline unsigned long __copy_from_user_inatomic_nocache(void *to, static inline unsigned long __copy_from_user_inatomic_nocache(void *to,

View File

@ -1393,6 +1393,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->hardirq_context = 0; p->hardirq_context = 0;
p->softirq_context = 0; p->softirq_context = 0;
#endif #endif
p->pagefault_disabled = 0;
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0; /* no locks held yet */ p->lockdep_depth = 0; /* no locks held yet */
p->curr_chain_key = 0; p->curr_chain_key = 0;