scsi: bnx2fc: Plug CPU hotplug race
bnx2fc_process_new_cqes() has protection against CPU hotplug, which relies on the per cpu thread pointer. This protection is racy because it happens only partially with the per cpu fp_work_lock held. If the CPU is unplugged after the lock is dropped, the wakeup code can dereference a NULL pointer or access freed and potentially reused memory. Restructure the code so the thread check and wakeup happens with the fp_work_lock held. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Chad Dupuis <chad.dupuis@cavium.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
2c67521821
commit
8addebc14a
|
@ -1008,6 +1008,28 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
|
|||
return work;
|
||||
}
|
||||
|
||||
/* Pending work request completion */
|
||||
static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
|
||||
{
|
||||
unsigned int cpu = wqe % num_possible_cpus();
|
||||
struct bnx2fc_percpu_s *fps;
|
||||
struct bnx2fc_work *work;
|
||||
|
||||
fps = &per_cpu(bnx2fc_percpu, cpu);
|
||||
spin_lock_bh(&fps->fp_work_lock);
|
||||
if (fps->iothread) {
|
||||
work = bnx2fc_alloc_work(tgt, wqe);
|
||||
if (work) {
|
||||
list_add_tail(&work->list, &fps->work_list);
|
||||
wake_up_process(fps->iothread);
|
||||
spin_unlock_bh(&fps->fp_work_lock);
|
||||
return;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&fps->fp_work_lock);
|
||||
bnx2fc_process_cq_compl(tgt, wqe);
|
||||
}
|
||||
|
||||
int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
|
||||
{
|
||||
struct fcoe_cqe *cq;
|
||||
|
@ -1042,28 +1064,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
|
|||
/* Unsolicited event notification */
|
||||
bnx2fc_process_unsol_compl(tgt, wqe);
|
||||
} else {
|
||||
/* Pending work request completion */
|
||||
struct bnx2fc_work *work = NULL;
|
||||
struct bnx2fc_percpu_s *fps = NULL;
|
||||
unsigned int cpu = wqe % num_possible_cpus();
|
||||
|
||||
fps = &per_cpu(bnx2fc_percpu, cpu);
|
||||
spin_lock_bh(&fps->fp_work_lock);
|
||||
if (unlikely(!fps->iothread))
|
||||
goto unlock;
|
||||
|
||||
work = bnx2fc_alloc_work(tgt, wqe);
|
||||
if (work)
|
||||
list_add_tail(&work->list,
|
||||
&fps->work_list);
|
||||
unlock:
|
||||
spin_unlock_bh(&fps->fp_work_lock);
|
||||
|
||||
/* Pending work request completion */
|
||||
if (fps->iothread && work)
|
||||
wake_up_process(fps->iothread);
|
||||
else
|
||||
bnx2fc_process_cq_compl(tgt, wqe);
|
||||
bnx2fc_pending_work(tgt, wqe);
|
||||
num_free_sqes++;
|
||||
}
|
||||
cqe++;
|
||||
|
|
Loading…
Reference in New Issue