soc: fsl: dpio: use the combined functions to protect critical zone
In orininal code, use 2 function spin_lock() and local_irq_save() to
protect the critical zone. But when enable the kernel debug config,
there are below inconsistent lock state detected.
================================
WARNING: inconsistent lock state
5.10.63-yocto-standard #1 Not tainted
--------------------------------
inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage.
lock_torture_wr/226 [HC0[0]:SC1[5]:HE1:SE0] takes:
ffff002005b2dd80 (&p->access_spinlock){+.?.}-{3:3}, at: qbman_swp_enqueue_multiple_mem_back+0x44/0x270
{SOFTIRQ-ON-W} state was registered at:
lock_acquire.part.0+0xf8/0x250
lock_acquire+0x68/0x84
_raw_spin_lock+0x68/0x90
qbman_swp_enqueue_multiple_mem_back+0x44/0x270
......
cryptomgr_test+0x38/0x60
kthread+0x158/0x164
ret_from_fork+0x10/0x38
irq event stamp: 4498
hardirqs last enabled at (4498): [<ffff800010fcf980>] _raw_spin_unlock_irqrestore+0x90/0xb0
hardirqs last disabled at (4497): [<ffff800010fcffc4>] _raw_spin_lock_irqsave+0xd4/0xe0
softirqs last enabled at (4458): [<ffff8000100108c4>] __do_softirq+0x674/0x724
softirqs last disabled at (4465): [<ffff80001005b2a4>] __irq_exit_rcu+0x190/0x19c
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0
----
lock(&p->access_spinlock);
<Interrupt>
lock(&p->access_spinlock);
*** DEADLOCK ***
So, in order to avoid deadlock, use the combined functions
spin_lock_irqsave/spin_unlock_irqrestore() to protect critical zone.
Fixes: 3b2abda7d2
("soc: fsl: dpio: Replace QMAN array mode with ring mode enqueue")
Cc: stable@vger.kernel.org
Signed-off-by: Meng Li <Meng.Li@windriver.com>
Signed-off-by: Li Yang <leoyang.li@nxp.com>
This commit is contained in:
parent
e775eb9fc2
commit
dc7e5940aa
|
@ -732,8 +732,7 @@ int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
|
|||
int i, num_enqueued = 0;
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock(&s->access_spinlock);
|
||||
local_irq_save(irq_flags);
|
||||
spin_lock_irqsave(&s->access_spinlock, irq_flags);
|
||||
|
||||
half_mask = (s->eqcr.pi_ci_mask>>1);
|
||||
full_mask = s->eqcr.pi_ci_mask;
|
||||
|
@ -744,8 +743,7 @@ int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
|
|||
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
|
||||
eqcr_ci, s->eqcr.ci);
|
||||
if (!s->eqcr.available) {
|
||||
local_irq_restore(irq_flags);
|
||||
spin_unlock(&s->access_spinlock);
|
||||
spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -784,8 +782,7 @@ int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
|
|||
dma_wmb();
|
||||
qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
|
||||
(QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
|
||||
local_irq_restore(irq_flags);
|
||||
spin_unlock(&s->access_spinlock);
|
||||
spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
|
||||
|
||||
return num_enqueued;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue