scsi: virtio_scsi: Remove per-target data because it is no longer used
Commit b5b6e8c8d3
("scsi: virtio_scsi: fix IO hang caused by automatic
irq vector affinity") removed all virtio_scsi hostdata users. Since the
SCSI host data is no longer used, also remove the host data itself.
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Hannes Reinecke <hare@suse.de>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
6c8d5f0512
commit
c29d7d10cd
|
@ -68,33 +68,6 @@ struct virtio_scsi_vq {
|
||||||
struct virtqueue *vq;
|
struct virtqueue *vq;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
|
||||||
* Per-target queue state.
|
|
||||||
*
|
|
||||||
* This struct holds the data needed by the queue steering policy. When a
|
|
||||||
* target is sent multiple requests, we need to drive them to the same queue so
|
|
||||||
* that FIFO processing order is kept. However, if a target was idle, we can
|
|
||||||
* choose a queue arbitrarily. In this case the queue is chosen according to
|
|
||||||
* the current VCPU, so the driver expects the number of request queues to be
|
|
||||||
* equal to the number of VCPUs. This makes it easy and fast to select the
|
|
||||||
* queue, and also lets the driver optimize the IRQ affinity for the virtqueues
|
|
||||||
* (each virtqueue's affinity is set to the CPU that "owns" the queue).
|
|
||||||
*
|
|
||||||
* tgt_seq is held to serialize reading and writing req_vq.
|
|
||||||
*
|
|
||||||
* Decrements of reqs are never concurrent with writes of req_vq: before the
|
|
||||||
* decrement reqs will be != 0; after the decrement the virtqueue completion
|
|
||||||
* routine will not use the req_vq so it can be changed by a new request.
|
|
||||||
* Thus they can happen outside the tgt_seq, provided of course we make reqs
|
|
||||||
* an atomic_t.
|
|
||||||
*/
|
|
||||||
struct virtio_scsi_target_state {
|
|
||||||
seqcount_t tgt_seq;
|
|
||||||
|
|
||||||
/* Currently active virtqueue for requests sent to this target. */
|
|
||||||
struct virtio_scsi_vq *req_vq;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Driver instance state */
|
/* Driver instance state */
|
||||||
struct virtio_scsi {
|
struct virtio_scsi {
|
||||||
struct virtio_device *vdev;
|
struct virtio_device *vdev;
|
||||||
|
@ -693,29 +666,6 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
|
||||||
return virtscsi_tmf(vscsi, cmd);
|
return virtscsi_tmf(vscsi, cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int virtscsi_target_alloc(struct scsi_target *starget)
|
|
||||||
{
|
|
||||||
struct Scsi_Host *sh = dev_to_shost(starget->dev.parent);
|
|
||||||
struct virtio_scsi *vscsi = shost_priv(sh);
|
|
||||||
|
|
||||||
struct virtio_scsi_target_state *tgt =
|
|
||||||
kmalloc(sizeof(*tgt), GFP_KERNEL);
|
|
||||||
if (!tgt)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
seqcount_init(&tgt->tgt_seq);
|
|
||||||
tgt->req_vq = &vscsi->req_vqs[0];
|
|
||||||
|
|
||||||
starget->hostdata = tgt;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void virtscsi_target_destroy(struct scsi_target *starget)
|
|
||||||
{
|
|
||||||
struct virtio_scsi_target_state *tgt = starget->hostdata;
|
|
||||||
kfree(tgt);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int virtscsi_map_queues(struct Scsi_Host *shost)
|
static int virtscsi_map_queues(struct Scsi_Host *shost)
|
||||||
{
|
{
|
||||||
struct virtio_scsi *vscsi = shost_priv(shost);
|
struct virtio_scsi *vscsi = shost_priv(shost);
|
||||||
|
@ -748,8 +698,6 @@ static struct scsi_host_template virtscsi_host_template = {
|
||||||
|
|
||||||
.dma_boundary = UINT_MAX,
|
.dma_boundary = UINT_MAX,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
.target_alloc = virtscsi_target_alloc,
|
|
||||||
.target_destroy = virtscsi_target_destroy,
|
|
||||||
.map_queues = virtscsi_map_queues,
|
.map_queues = virtscsi_map_queues,
|
||||||
.track_queue_depth = 1,
|
.track_queue_depth = 1,
|
||||||
.force_blk_mq = 1,
|
.force_blk_mq = 1,
|
||||||
|
|
Loading…
Reference in New Issue