sched/deadline: Do not reclaim the whole CPU bandwidth

Original GRUB tends to reclaim 100% of the CPU time... And this
allows a CPU hog to starve non-deadline tasks.
To address this issue, allow the scheduler to reclaim only a
specified fraction of CPU time, stored in the new "bw_ratio"
field of the dl runqueue structure.

Tested-by: Daniel Bristot de Oliveira <bristot@redhat.com>
Signed-off-by: Luca Abeni <luca.abeni@santannapisa.it>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Claudio Scordino <claudio@evidence.eu.com>
Cc: Joel Fernandes <joelaf@google.com>
Cc: Juri Lelli <juri.lelli@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tommaso Cucinotta <tommaso.cucinotta@sssup.it>
Link: http://lkml.kernel.org/r/1495138417-6203-6-git-send-email-luca.abeni@santannapisa.it
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Luca Abeni 2017-05-18 22:13:32 +02:00 committed by Ingo Molnar
parent c52f14d384
commit 4da3abcefe
3 changed files with 30 additions and 1 deletions

View File

@ -6759,6 +6759,16 @@ static int sched_dl_global_validate(void)
return ret;
}
void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
{
if (global_rt_runtime() == RUNTIME_INF) {
dl_rq->bw_ratio = 1 << RATIO_SHIFT;
} else {
dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
}
}
static void sched_dl_do_global(void)
{
u64 new_bw = -1;
@ -6784,6 +6794,7 @@ static void sched_dl_do_global(void)
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched();
init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
}
}

View File

@ -268,6 +268,7 @@ void init_dl_rq(struct dl_rq *dl_rq)
#endif
dl_rq->running_bw = 0;
init_dl_rq_bw_ratio(dl_rq);
}
#ifdef CONFIG_SMP
@ -924,11 +925,20 @@ extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
* Uact is the (per-runqueue) active utilization.
* Since rq->dl.running_bw contains Uact * 2^BW_SHIFT, the result
* has to be shifted right by BW_SHIFT.
* To reclaim only a fraction Umax of the CPU time, the
* runtime accounting rule is modified as
* "dq = -Uact / Umax dt"; since rq->dl.bw_ratio contains
* 2^RATIO_SHIFT / Umax, delta is multiplied by bw_ratio and shifted
* right by RATIO_SHIFT.
* Since delta is a 64 bit variable, to have an overflow its value
* should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
* So, overflow is not an issue here.
*/
u64 grub_reclaim(u64 delta, struct rq *rq)
{
delta *= rq->dl.running_bw;
delta >>= BW_SHIFT;
delta *= rq->dl.bw_ratio;
delta >>= BW_SHIFT + RATIO_SHIFT;
return delta;
}

View File

@ -565,6 +565,12 @@ struct dl_rq {
* task blocks
*/
u64 running_bw;
/*
* Inverse of the fraction of CPU utilization that can be reclaimed
* by the GRUB algorithm.
*/
u64 bw_ratio;
};
#ifdef CONFIG_SMP
@ -1495,9 +1501,11 @@ extern struct dl_bandwidth def_dl_bandwidth;
extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
#define BW_SHIFT 20
#define BW_UNIT (1 << BW_SHIFT)
#define RATIO_SHIFT 8
unsigned long to_ratio(u64 period, u64 runtime);
extern void init_entity_runnable_average(struct sched_entity *se);