percpu-rwsem: use synchronize_sched_expedited
Use synchronize_sched_expedited() instead of synchronize_sched() to improve mount speed. This patch improves mount time from 0.500s to 0.013s for Jeff's test-case. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Reported-and-tested-by: Jeff Chua <jeff.chua.linux@gmail.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e23739b4ad
commit
4b05a1c74d
|
@ -13,7 +13,7 @@ struct percpu_rw_semaphore {
|
||||||
};
|
};
|
||||||
|
|
||||||
#define light_mb() barrier()
|
#define light_mb() barrier()
|
||||||
#define heavy_mb() synchronize_sched()
|
#define heavy_mb() synchronize_sched_expedited()
|
||||||
|
|
||||||
static inline void percpu_down_read(struct percpu_rw_semaphore *p)
|
static inline void percpu_down_read(struct percpu_rw_semaphore *p)
|
||||||
{
|
{
|
||||||
|
@ -51,7 +51,7 @@ static inline void percpu_down_write(struct percpu_rw_semaphore *p)
|
||||||
{
|
{
|
||||||
mutex_lock(&p->mtx);
|
mutex_lock(&p->mtx);
|
||||||
p->locked = true;
|
p->locked = true;
|
||||||
synchronize_sched(); /* make sure that all readers exit the rcu_read_lock_sched region */
|
synchronize_sched_expedited(); /* make sure that all readers exit the rcu_read_lock_sched region */
|
||||||
while (__percpu_count(p->counters))
|
while (__percpu_count(p->counters))
|
||||||
msleep(1);
|
msleep(1);
|
||||||
heavy_mb(); /* C, between read of p->counter and write to data, paired with B */
|
heavy_mb(); /* C, between read of p->counter and write to data, paired with B */
|
||||||
|
|
Loading…
Reference in New Issue