sched: Move rt_period/runtime sysctls to rt.c

move rt_period/runtime sysctls to rt.c and use the new
register_sysctl_init() to register the sysctl interface.

Signed-off-by: Zhen Ni <nizhen@uniontech.com>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
This commit is contained in:
Zhen Ni 2022-02-15 19:45:59 +08:00 committed by Luis Chamberlain
parent f5ef06d58b
commit d9ab0e63fa
6 changed files with 48 additions and 39 deletions

View File

@ -31,15 +31,6 @@ extern int sysctl_numa_balancing_mode;
#define sysctl_numa_balancing_mode 0 #define sysctl_numa_balancing_mode 0
#endif #endif
/*
* control realtime throttling:
*
* /proc/sys/kernel/sched_rt_period_us
* /proc/sys/kernel/sched_rt_runtime_us
*/
extern unsigned int sysctl_sched_rt_period;
extern int sysctl_sched_rt_runtime;
extern unsigned int sysctl_sched_dl_period_max; extern unsigned int sysctl_sched_dl_period_max;
extern unsigned int sysctl_sched_dl_period_min; extern unsigned int sysctl_sched_dl_period_min;
@ -58,8 +49,6 @@ extern int sched_rr_timeslice;
int sched_rr_handler(struct ctl_table *table, int write, void *buffer, int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos); size_t *lenp, loff_t *ppos);
int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos); void *buffer, size_t *lenp, loff_t *ppos);
int sysctl_numa_balancing(struct ctl_table *table, int write, void *buffer, int sysctl_numa_balancing(struct ctl_table *table, int write, void *buffer,

View File

@ -23,6 +23,8 @@
#define RCU_SEQ_CTR_SHIFT 2 #define RCU_SEQ_CTR_SHIFT 2
#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1) #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
extern int sysctl_sched_rt_runtime;
/* /*
* Return the counter portion of a sequence number previously returned * Return the counter portion of a sequence number previously returned
* by rcu_seq_snap() or rcu_seq_current(). * by rcu_seq_snap() or rcu_seq_current().

View File

@ -145,12 +145,6 @@ const_debug unsigned int sysctl_sched_nr_migrate = 8;
const_debug unsigned int sysctl_sched_nr_migrate = 32; const_debug unsigned int sysctl_sched_nr_migrate = 32;
#endif #endif
/*
* period over which we measure -rt task CPU usage in us.
* default: 1s
*/
unsigned int sysctl_sched_rt_period = 1000000;
__read_mostly int scheduler_running; __read_mostly int scheduler_running;
#ifdef CONFIG_SCHED_CORE #ifdef CONFIG_SCHED_CORE
@ -444,13 +438,6 @@ sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
#endif /* CONFIG_SCHED_CORE */ #endif /* CONFIG_SCHED_CORE */
/*
* part of the period that we allow rt tasks to run in us.
* default: 0.95s
*/
int sysctl_sched_rt_runtime = 950000;
/* /*
* Serialization rules: * Serialization rules:
* *

View File

@ -13,6 +13,47 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
struct rt_bandwidth def_rt_bandwidth; struct rt_bandwidth def_rt_bandwidth;
/*
* period over which we measure -rt task CPU usage in us.
* default: 1s
*/
unsigned int sysctl_sched_rt_period = 1000000;
/*
* part of the period that we allow rt tasks to run in us.
* default: 0.95s
*/
int sysctl_sched_rt_runtime = 950000;
static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
#ifdef CONFIG_SYSCTL
static struct ctl_table sched_rt_sysctls[] = {
{
.procname = "sched_rt_period_us",
.data = &sysctl_sched_rt_period,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_rt_handler,
},
{
.procname = "sched_rt_runtime_us",
.data = &sysctl_sched_rt_runtime,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = sched_rt_handler,
},
{}
};
static int __init sched_rt_sysctl_init(void)
{
register_sysctl_init("kernel", sched_rt_sysctls);
return 0;
}
late_initcall(sched_rt_sysctl_init);
#endif
static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
{ {
struct rt_bandwidth *rt_b = struct rt_bandwidth *rt_b =
@ -2925,7 +2966,7 @@ static void sched_rt_do_global(void)
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
} }
int sched_rt_handler(struct ctl_table *table, int write, void *buffer, static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos) size_t *lenp, loff_t *ppos)
{ {
int old_period, old_runtime; int old_period, old_runtime;

View File

@ -114,6 +114,10 @@ extern void calc_global_load_tick(struct rq *this_rq);
extern long calc_load_fold_active(struct rq *this_rq, long adjust); extern long calc_load_fold_active(struct rq *this_rq, long adjust);
extern void call_trace_sched_update_nr_running(struct rq *rq, int count); extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
extern unsigned int sysctl_sched_rt_period;
extern int sysctl_sched_rt_runtime;
/* /*
* Helpers for converting nanosecond timing to jiffy resolution * Helpers for converting nanosecond timing to jiffy resolution
*/ */

View File

@ -1681,20 +1681,6 @@ static struct ctl_table kern_table[] = {
.extra2 = SYSCTL_FOUR, .extra2 = SYSCTL_FOUR,
}, },
#endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA_BALANCING */
{
.procname = "sched_rt_period_us",
.data = &sysctl_sched_rt_period,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_rt_handler,
},
{
.procname = "sched_rt_runtime_us",
.data = &sysctl_sched_rt_runtime,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = sched_rt_handler,
},
{ {
.procname = "sched_deadline_period_max_us", .procname = "sched_deadline_period_max_us",
.data = &sysctl_sched_dl_period_max, .data = &sysctl_sched_dl_period_max,