vhost-scsi: Fix crash during LUN unmapping

We normally clear the endpoint then unmap LUNs so the devices are fully
shutdown when the LUN is unmapped, but it's legal to unmap before
clearing. If the user does that while TMFs are running then we can end
up crashing.

vhost_scsi_port_unlink assumes that the LUN's tmf struct will always be on
the tmf_queue list. However, if a TMF is running then it will have been
removed while it's executing. If we do a LUN unmap at this time, then
we assume the entry is on the list and just start accessing it and free
it.

This fixes the bug by just allocating the vhost_scsi_tmf struct when it's
needed like is done with the se_tmr struct that's needed when we submit
the TMF. In this path perf is not an issue and we can use GFP_KERNEL
since it won't swing directly back on us, so we don't need to preallocate
the struct.

Signed-off-by: Mike Christie <michael.christie@oracle.com>
Message-Id: <20230321020624.13323-3-michael.christie@oracle.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Mike Christie 2023-03-20 21:06:19 -05:00 committed by Michael S. Tsirkin
parent e508efc3ae
commit 4c363c81f6
1 changed files with 4 additions and 32 deletions

View File

@ -125,7 +125,6 @@ struct vhost_scsi_tpg {
struct se_portal_group se_tpg;
/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
struct vhost_scsi *vhost_scsi;
struct list_head tmf_queue;
};
struct vhost_scsi_tport {
@ -206,10 +205,8 @@ struct vhost_scsi {
struct vhost_scsi_tmf {
struct vhost_work vwork;
struct vhost_scsi_tpg *tpg;
struct vhost_scsi *vhost;
struct vhost_scsi_virtqueue *svq;
struct list_head queue_entry;
struct se_cmd se_cmd;
u8 scsi_resp;
@ -352,12 +349,9 @@ static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
{
struct vhost_scsi_tpg *tpg = tmf->tpg;
struct vhost_scsi_inflight *inflight = tmf->inflight;
mutex_lock(&tpg->tv_tpg_mutex);
list_add_tail(&tpg->tmf_queue, &tmf->queue_entry);
mutex_unlock(&tpg->tv_tpg_mutex);
kfree(tmf);
vhost_scsi_put_inflight(inflight);
}
@ -1194,19 +1188,11 @@ vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
goto send_reject;
}
mutex_lock(&tpg->tv_tpg_mutex);
if (list_empty(&tpg->tmf_queue)) {
pr_err("Missing reserve TMF. Could not handle LUN RESET.\n");
mutex_unlock(&tpg->tv_tpg_mutex);
tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
if (!tmf)
goto send_reject;
}
tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
queue_entry);
list_del_init(&tmf->queue_entry);
mutex_unlock(&tpg->tv_tpg_mutex);
tmf->tpg = tpg;
vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
tmf->vhost = vs;
tmf->svq = svq;
tmf->resp_iov = vq->iov[vc->out];
@ -2035,19 +2021,11 @@ static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
struct vhost_scsi_tmf *tmf;
tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
if (!tmf)
return -ENOMEM;
INIT_LIST_HEAD(&tmf->queue_entry);
vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
mutex_lock(&vhost_scsi_mutex);
mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_port_count++;
list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);
mutex_unlock(&tpg->tv_tpg_mutex);
vhost_scsi_hotplug(tpg, lun);
@ -2062,16 +2040,11 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
struct vhost_scsi_tmf *tmf;
mutex_lock(&vhost_scsi_mutex);
mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_port_count--;
tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
queue_entry);
list_del(&tmf->queue_entry);
kfree(tmf);
mutex_unlock(&tpg->tv_tpg_mutex);
vhost_scsi_hotunplug(tpg, lun);
@ -2332,7 +2305,6 @@ vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
}
mutex_init(&tpg->tv_tpg_mutex);
INIT_LIST_HEAD(&tpg->tv_tpg_list);
INIT_LIST_HEAD(&tpg->tmf_queue);
tpg->tport = tport;
tpg->tport_tpgt = tpgt;