block: fix the incorrect spin_lock_irq to spin_lock

The process already run in irq disabled state.
Should use spin_lock instead of spin_lock_irq, otherwise
spin_unlock_irq may enable the irq in wrong stage.

   Call Trace:
    _raw_spin_lock_irq+0x20/0x24
    blkcg_print_blkgs+0x4f/0xe0
    blkg_print_stat_bytes+0x44/0x50
    cgroup_seqfile_show+0x4c/0xb0
    kernfs_seq_show+0x21/0x30
    seq_read+0x14c/0x3f0
    kernfs_fop_read+0x35/0x190
    __vfs_read+0x18/0x40
    vfs_read+0x99/0x160
    ksys_read+0x61/0xe0
    __x64_sys_read+0x1a/0x20
    do_syscall_64+0x47/0x140
    entry_SYSCALL_64_after_hwframe+0x44/0xa9

Fixes: f2519e1ed9a16 ("blkcg: add per blkcg diskstats")
Signed-off-by: Haisu Wang <haisuwang@tencent.com>
Reviewed-by:: Honglin Li <honglinli@tencent.com>
This commit is contained in:
Haisu Wang 2022-10-24 16:08:04 +08:00 committed by Jianping Liu
parent 904baaf92b
commit cee4b3596d
1 changed files with 4 additions and 4 deletions

View File

@ -302,9 +302,9 @@ static void blkcg_dkstats_free(struct blkcg_dkstats *ds)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (!list_empty(&ds->alloc_node)) { if (!list_empty(&ds->alloc_node)) {
spin_lock_irq(&alloc_lock); spin_lock(&alloc_lock);
list_del_init(&ds->alloc_node); list_del_init(&ds->alloc_node);
spin_unlock_irq(&alloc_lock); spin_unlock(&alloc_lock);
} }
#endif #endif
list_del_init(&ds->list_node); list_del_init(&ds->list_node);
@ -320,7 +320,7 @@ static void blkcg_dkstats_free_all(struct request_queue *q)
struct blkcg *blkcg = blkg->blkcg; struct blkcg *blkcg = blkg->blkcg;
struct blkcg_dkstats *ds, *ns; struct blkcg_dkstats *ds, *ns;
spin_lock_irq(&blkcg->lock); spin_lock(&blkcg->lock);
list_for_each_entry_safe(ds, ns, &blkcg->dkstats_list, list_node) { list_for_each_entry_safe(ds, ns, &blkcg->dkstats_list, list_node) {
if (part_to_disk(ds->part)->queue != q) if (part_to_disk(ds->part)->queue != q)
continue; continue;
@ -330,7 +330,7 @@ static void blkcg_dkstats_free_all(struct request_queue *q)
blkcg_dkstats_free(ds); blkcg_dkstats_free(ds);
} }
spin_unlock_irq(&blkcg->lock); spin_unlock(&blkcg->lock);
} }
spin_unlock_irq(&q->queue_lock); spin_unlock_irq(&q->queue_lock);
} }