Add new 'cond_resched_bkl()' helper function
It acts exactly like a regular 'cond_resched()', but will not get
optimized away when CONFIG_PREEMPT is set.
Normal kernel code is already preemptable in the presense of
CONFIG_PREEMPT, so cond_resched() is optimized away (see commit
02b67cc3ba
"sched: do not do
cond_resched() when CONFIG_PREEMPT").
But when wanting to conditionally reschedule while holding a lock, you
need to use "cond_sched_lock(lock)", and the new function is the BKL
equivalent of that.
Also make fs/locks.c use it.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
9662369786
commit
c3921ab715
|
@ -773,7 +773,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
|
||||||
* give it the opportunity to lock the file.
|
* give it the opportunity to lock the file.
|
||||||
*/
|
*/
|
||||||
if (found)
|
if (found)
|
||||||
cond_resched();
|
cond_resched_bkl();
|
||||||
|
|
||||||
find_conflict:
|
find_conflict:
|
||||||
for_each_lock(inode, before) {
|
for_each_lock(inode, before) {
|
||||||
|
|
|
@ -2037,13 +2037,13 @@ static inline int need_resched(void)
|
||||||
* cond_resched_lock() will drop the spinlock before scheduling,
|
* cond_resched_lock() will drop the spinlock before scheduling,
|
||||||
* cond_resched_softirq() will enable bhs before scheduling.
|
* cond_resched_softirq() will enable bhs before scheduling.
|
||||||
*/
|
*/
|
||||||
|
extern int _cond_resched(void);
|
||||||
#ifdef CONFIG_PREEMPT
|
#ifdef CONFIG_PREEMPT
|
||||||
static inline int cond_resched(void)
|
static inline int cond_resched(void)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
extern int _cond_resched(void);
|
|
||||||
static inline int cond_resched(void)
|
static inline int cond_resched(void)
|
||||||
{
|
{
|
||||||
return _cond_resched();
|
return _cond_resched();
|
||||||
|
@ -2051,6 +2051,10 @@ static inline int cond_resched(void)
|
||||||
#endif
|
#endif
|
||||||
extern int cond_resched_lock(spinlock_t * lock);
|
extern int cond_resched_lock(spinlock_t * lock);
|
||||||
extern int cond_resched_softirq(void);
|
extern int cond_resched_softirq(void);
|
||||||
|
static inline int cond_resched_bkl(void)
|
||||||
|
{
|
||||||
|
return _cond_resched();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Does a critical section need to be broken due to another
|
* Does a critical section need to be broken due to another
|
||||||
|
|
|
@ -5525,7 +5525,6 @@ static void __cond_resched(void)
|
||||||
} while (need_resched());
|
} while (need_resched());
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
|
|
||||||
int __sched _cond_resched(void)
|
int __sched _cond_resched(void)
|
||||||
{
|
{
|
||||||
if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
|
if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
|
||||||
|
@ -5536,7 +5535,6 @@ int __sched _cond_resched(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_cond_resched);
|
EXPORT_SYMBOL(_cond_resched);
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* cond_resched_lock() - if a reschedule is pending, drop the given lock,
|
* cond_resched_lock() - if a reschedule is pending, drop the given lock,
|
||||||
|
|
Loading…
Reference in New Issue