Revert "blk-cgroup: Flush stats at blkgs destruction path"

This reverts commit dae590a6c9.

We've had a few reports on this causing a crash at boot time, because
of a reference issue. While this problem seemginly did exist before
the patch and needs solving separately, this patch makes it a lot
easier to trigger.

Link: https://lore.kernel.org/linux-block/CA+QYu4oxiRKC6hJ7F27whXy-PRBx=Tvb+-7TQTONN8qTtV3aDA@mail.gmail.com/
Link: https://lore.kernel.org/linux-block/69af7ccb-6901-c84c-0e95-5682ccfb750c@acm.org/
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2022-11-30 08:25:46 -07:00
parent 8d283ee62b
commit c62256dda3
3 changed files with 1 additions and 35 deletions

View File

@ -1084,12 +1084,10 @@ struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css)
*/
static void blkcg_destroy_blkgs(struct blkcg *blkcg)
{
int cpu;
might_sleep();
css_get(&blkcg->css);
spin_lock_irq(&blkcg->lock);
while (!hlist_empty(&blkcg->blkg_list)) {
struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
struct blkcg_gq, blkcg_node);
@ -1112,17 +1110,6 @@ static void blkcg_destroy_blkgs(struct blkcg *blkcg)
}
spin_unlock_irq(&blkcg->lock);
/*
* Flush all the non-empty percpu lockless lists.
*/
for_each_possible_cpu(cpu) {
struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu);
if (!llist_empty(lhead))
cgroup_rstat_css_cpu_flush(&blkcg->css, cpu);
}
css_put(&blkcg->css);
}
/**

View File

@ -766,7 +766,6 @@ void cgroup_rstat_flush(struct cgroup *cgrp);
void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
void cgroup_rstat_flush_hold(struct cgroup *cgrp);
void cgroup_rstat_flush_release(void);
void cgroup_rstat_css_cpu_flush(struct cgroup_subsys_state *css, int cpu);
/*
* Basic resource stats.

View File

@ -281,26 +281,6 @@ void cgroup_rstat_flush_release(void)
spin_unlock_irq(&cgroup_rstat_lock);
}
/**
* cgroup_rstat_css_cpu_flush - flush stats for the given css and cpu
* @css: target css to be flush
* @cpu: the cpu that holds the stats to be flush
*
* A lightweight rstat flush operation for a given css and cpu.
* Only the cpu_lock is being held for mutual exclusion, the cgroup_rstat_lock
* isn't used.
*/
void cgroup_rstat_css_cpu_flush(struct cgroup_subsys_state *css, int cpu)
{
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
raw_spin_lock_irq(cpu_lock);
rcu_read_lock();
css->ss->css_rstat_flush(css, cpu);
rcu_read_unlock();
raw_spin_unlock_irq(cpu_lock);
}
int cgroup_rstat_init(struct cgroup *cgrp)
{
int cpu;