cgroupfs: quota aware support

Upstream: no

Add quota aware support when showing container cpuinfo and
stat information.

Signed-off-by: caelli <caelli@tencent.com>
Reviewed-by: Peng Hao <flyingpeng@tencent.com>
Reviewed-by: Bin Lai <robinlai@tencent.com>
Signed-off-by: katrinzhou <katrinzhou@tencent.com>
Signed-off-by: Kairui Song <kasong@tencent.com>
This commit is contained in:
caelli 2023-11-20 15:57:28 +08:00 committed by Kairui Song
parent 660e8c3de3
commit 042f6cf855
4 changed files with 95 additions and 10 deletions

View File

@ -3035,9 +3035,9 @@ static u64 get_iowait_time(int cpu)
}
static int cpuset_cgroup_stat_show_comm(struct seq_file *sf, void *v, struct cpuset *cs)
static int cpuset_cgroup_stat_show_comm(struct seq_file *sf, void *v, struct cpuset *cs, int max_cpu)
{
int i, j;
int i, j, k = 0;
u64 user, nice, system, idle, iowait, irq, softirq, steal;
u64 guest, guest_nice, n_ctx_switch, n_process, n_running, n_blocked;
u64 sum = 0;
@ -3055,10 +3055,13 @@ static int cpuset_cgroup_stat_show_comm(struct seq_file *sf, void *v, struct cpu
is_top_cgrp = !cs->css.parent ? true : false;
for_each_cpu(i, cs->cpus_allowed) {
struct kernel_cpustat kcs;
if (++k > max_cpu)
break;
user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
struct kernel_cpustat kcs;
kcpustat_cpu_fetch(&kcs, i);
idle += get_idle_time(&kcs, i);
iowait += get_iowait_time(i);
@ -3092,12 +3095,16 @@ static int cpuset_cgroup_stat_show_comm(struct seq_file *sf, void *v, struct cpu
seq_putc(sf, '\n');
j = 0;
k = 0;
for_each_cpu(i, cs->cpus_allowed) {
struct kernel_cpustat kcs;
/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
if (++k > max_cpu)
break;
user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
struct kernel_cpustat kcs;
kcpustat_cpu_fetch(&kcs, i);
idle += get_idle_time(&kcs, i);
iowait = get_iowait_time(i);
@ -3125,10 +3132,14 @@ static int cpuset_cgroup_stat_show_comm(struct seq_file *sf, void *v, struct cpu
seq_put_decimal_ull(sf, "intr ", (unsigned long long)sum);
/* sum again ? it could be updated? */
k = 0;
for_each_irq_nr(j) {
sum = 0;
for_each_cpu(i, cs->cpus_allowed)
for_each_cpu(i, cs->cpus_allowed) {
if (++k > max_cpu)
break;
sum += kstat_irqs_cpu(j, i);
}
seq_put_decimal_ull(sf, " ", sum);
}
@ -3137,7 +3148,10 @@ static int cpuset_cgroup_stat_show_comm(struct seq_file *sf, void *v, struct cpu
n_process = 0;
n_running = 0;
n_blocked = 0;
k = 0;
for_each_cpu(i, cs->cpus_allowed) {
if (++k > max_cpu)
break;
n_ctx_switch += nr_context_switches_cpu(i);
n_process += per_cpu(total_forks, i);
n_running += nr_running_cpu(i);
@ -3167,7 +3181,7 @@ static int cpuset_cgroup_stat_show_comm(struct seq_file *sf, void *v, struct cpu
static int cpuset_cgroup_stat_show(struct seq_file *sf, void *v)
{
struct cpuset *cs = css_cs(seq_css(sf));
return cpuset_cgroup_stat_show_comm(sf, v, cs);
return cpuset_cgroup_stat_show_comm(sf, v, cs, INT_MAX);
}
#ifdef CONFIG_X86
@ -3218,7 +3232,7 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
}
#endif
static int cpuset_cgroup_cpuinfo_show_comm(struct seq_file *sf, void *v, struct cpuset *cs)
static int cpuset_cgroup_cpuinfo_show_comm(struct seq_file *sf, void *v, struct cpuset *cs, int max_cpu)
{
int i, j, k = 0;
struct cpuinfo_x86 *c;
@ -3237,6 +3251,8 @@ static int cpuset_cgroup_cpuinfo_show_comm(struct seq_file *sf, void *v, struct
else
cpu = k;
k++;
if (k > max_cpu)
break;
seq_printf(sf, "processor\t: %u\n"
"vendor_id\t: %s\n"
"cpu family\t: %d\n"
@ -3318,7 +3334,7 @@ static int cpuset_cgroup_cpuinfo_show_comm(struct seq_file *sf, void *v, struct
static int cpuset_cgroup_cpuinfo_show(struct seq_file *sf, void *v)
{
struct cpuset *cs = css_cs(seq_css(sf));
return cpuset_cgroup_cpuinfo_show_comm(sf, v, cs);
return cpuset_cgroup_cpuinfo_show_comm(sf, v, cs, INT_MAX);
}
#endif

View File

@ -10551,6 +10551,33 @@ static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
return css ? container_of(css, struct task_group, css) : NULL;
}
#ifdef CONFIG_CGROUPFS
int container_cpuquota_aware;
#define cpu_quota_aware_enabled(tg) \
(tg && tg != &root_task_group && tg->cpuquota_aware)
int cpu_get_max_cpus(struct task_struct *p)
{
int max_cpus = INT_MAX;
struct task_group *tg = task_group(p);
if (!cpu_quota_aware_enabled(tg))
return max_cpus;
if (tg->cfs_bandwidth.quota == RUNTIME_INF)
return max_cpus;
max_cpus = DIV_ROUND_UP(tg->cfs_bandwidth.quota, tg->cfs_bandwidth.period);
return max_cpus;
}
#else /* CONFIG_CGROUPFS */
int cpu_get_max_cpus(struct task_struct *p)
{
return INT_MAX;
}
#endif /* CONFIG_CGROUPFS */
static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
@ -10565,7 +10592,9 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
tg = sched_create_group(parent);
if (IS_ERR(tg))
return ERR_PTR(-ENOMEM);
#ifdef CONFIG_CGROUPFS
tg->cpuquota_aware = container_cpuquota_aware;
#endif
return &tg->css;
}
@ -11214,7 +11243,33 @@ static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
}
#endif
#ifdef CONFIG_CGROUPFS
static u64 cpu_quota_aware_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
struct task_group *tg = css_tg(css);
return tg->cpuquota_aware;
}
static int cpu_quota_aware_write_u64(struct cgroup_subsys_state *css,
struct cftype *cft, u64 val)
{
struct task_group *tg = css_tg(css);
tg->cpuquota_aware = !!val;
return 0;
}
#endif
static struct cftype cpu_legacy_files[] = {
#ifdef CONFIG_CGROUPFS
{
.name = "quota_aware",
.flags = CFTYPE_NOT_ON_ROOT,
.read_u64 = cpu_quota_aware_read_u64,
.write_u64 = cpu_quota_aware_write_u64,
},
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
{
.name = "shares",

View File

@ -409,7 +409,9 @@ struct task_group {
#ifdef CONFIG_SCHED_AUTOGROUP
struct autogroup *autogroup;
#endif
#ifdef CONFIG_CGROUPFS
u64 cpuquota_aware;
#endif
struct cfs_bandwidth cfs_bandwidth;
#ifdef CONFIG_UCLAMP_TASK_GROUP

View File

@ -134,6 +134,9 @@ static enum sysctl_writes_mode sysctl_writes_strict = SYSCTL_WRITES_STRICT;
defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT)
int sysctl_legacy_va_layout;
#endif
#ifdef CONFIG_CGROUPFS
extern int container_cpuquota_aware;
#endif
#endif /* CONFIG_SYSCTL */
@ -2104,6 +2107,15 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_CGROUPFS
{
.procname = "container_cpuquota_aware",
.data = &container_cpuquota_aware,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
{ }
};