From 39b6a429c30482c349f1bb3746470fe473cbdb0f Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Wed, 24 Feb 2021 14:30:07 +0100 Subject: [PATCH] sched/fair: Reduce the window for duplicated update Start to update last_blocked_load_update_tick to reduce the possibility of another cpu starting the update one more time Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Reviewed-by: Valentin Schneider Link: https://lkml.kernel.org/r/20210224133007.28644-8-vincent.guittot@linaro.org --- kernel/sched/fair.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e87e1b3bcdca..f1b55f9a085d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7852,16 +7852,20 @@ static inline bool others_have_blocked(struct rq *rq) return false; } +static inline void update_blocked_load_tick(struct rq *rq) +{ + WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies); +} + static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) { - rq->last_blocked_load_update_tick = jiffies; - if (!has_blocked) rq->has_blocked_load = 0; } #else static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; } static inline bool others_have_blocked(struct rq *rq) { return false; } +static inline void update_blocked_load_tick(struct rq *rq) {} static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {} #endif @@ -8022,6 +8026,7 @@ static void update_blocked_averages(int cpu) struct rq_flags rf; rq_lock_irqsave(rq, &rf); + update_blocked_load_tick(rq); update_rq_clock(rq); decayed |= __update_blocked_others(rq, &done); @@ -8363,7 +8368,7 @@ static bool update_nohz_stats(struct rq *rq) if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) return false; - if (!time_after(jiffies, rq->last_blocked_load_update_tick)) + if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick))) return true; update_blocked_averages(cpu);