for-5.16/passthrough-flag-2021-10-29
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmF8MnsQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpuBpEACzrzbUfkTQ33bwF60mZQaqbR0ha7TrP/hp oAqthmf1S2U+7mzXHQ+6MN7p4+TVPa/ITxQZtLTw7U/68+w68tTUZfZHJ5H6tSXu 92OHFDDP4ZeqATRTcJBij/5Si9BiKBHexMqeyVYPw0DWdEukAko9f7Z81GonFbTu EIdIWivBc76bLiK/X3w7lhLcaNyUv9cKalwjbI4xtwcHtcIYj5d2jIc9PF2I9Xtl 3oqNT4GOSv7s3mW7syB1UEPrzbhVIzCSNbMSviCoK7GA5g8EN5KMEGQQoUJ942Zv bHMjMpGrXsWebPto9maXycGY/9WsVcpNB7opyQRpyG8yDDZq0AFNJxD/NBMkQo4S Sfp0fxpVXDRWu7zX0EktwGyOp4YNwfS6pDeAhqhnSl2uPWTsxGZ0kXvlMpR9Rt/t TjEKZe6lmcC7s42rPVRBRw5HEzEsVovf0z4lyvC4M223CV3c5cuYkAAtCcqLdVWq JkceHSb7EKu7QY6jf3sBud14HaAj+sub7kffOWhhAxObg3Ytsql61AGzbhandnxT AtN3n9PBHNGmrSv4MiiuP+Dq5jeT5NspFkf1FvnRcfmZMJtH1VXHKr84JbAy4VHr 5cZoDJzL9Zm1d865f+VWkZeYd3b2kKP8C0dm6tAn4VweT6eb8bu6tgB7wFQwLIFK aRxz5vQ1AQ== =dLYJ -----END PGP SIGNATURE----- Merge tag 'for-5.16/passthrough-flag-2021-10-29' of git://git.kernel.dk/linux-block Pull QUEUE_FLAG_SCSI_PASSTHROUGH removal from Jens Axboe: "This contains a series leading to the removal of the QUEUE_FLAG_SCSI_PASSTHROUGH queue flag" * tag 'for-5.16/passthrough-flag-2021-10-29' of git://git.kernel.dk/linux-block: block: remove blk_{get,put}_request block: remove QUEUE_FLAG_SCSI_PASSTHROUGH block: remove the initialize_rq_fn blk_mq_ops method scsi: add a scsi_alloc_request helper bsg-lib: initialize the bsg_job in bsg_transport_sg_io_fn nfsd/blocklayout: use ->get_unique_id instead of sending SCSI commands sd: implement ->get_unique_id block: add a ->get_unique_id method
This commit is contained in:
commit
71ae42629e
|
@ -597,34 +597,6 @@ bool blk_get_queue(struct request_queue *q)
|
|||
}
|
||||
EXPORT_SYMBOL(blk_get_queue);
|
||||
|
||||
/**
|
||||
* blk_get_request - allocate a request
|
||||
* @q: request queue to allocate a request for
|
||||
* @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
|
||||
* @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
|
||||
*/
|
||||
struct request *blk_get_request(struct request_queue *q, unsigned int op,
|
||||
blk_mq_req_flags_t flags)
|
||||
{
|
||||
struct request *req;
|
||||
|
||||
WARN_ON_ONCE(op & REQ_NOWAIT);
|
||||
WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
|
||||
|
||||
req = blk_mq_alloc_request(q, op, flags);
|
||||
if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
|
||||
q->mq_ops->initialize_rq_fn(req);
|
||||
|
||||
return req;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_get_request);
|
||||
|
||||
void blk_put_request(struct request *req)
|
||||
{
|
||||
blk_mq_free_request(req);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_put_request);
|
||||
|
||||
static void handle_bad_sector(struct bio *bio, sector_t maxsector)
|
||||
{
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
|
|
@ -124,7 +124,6 @@ static const char *const blk_queue_flag_name[] = {
|
|||
QUEUE_FLAG_NAME(STATS),
|
||||
QUEUE_FLAG_NAME(POLL_STATS),
|
||||
QUEUE_FLAG_NAME(REGISTERED),
|
||||
QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
|
||||
QUEUE_FLAG_NAME(QUIESCED),
|
||||
QUEUE_FLAG_NAME(PCI_P2PDMA),
|
||||
QUEUE_FLAG_NAME(ZONE_RESETALL),
|
||||
|
|
|
@ -31,6 +31,7 @@ static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
|
|||
struct bsg_job *job;
|
||||
struct request *rq;
|
||||
struct bio *bio;
|
||||
void *reply;
|
||||
int ret;
|
||||
|
||||
if (hdr->protocol != BSG_PROTOCOL_SCSI ||
|
||||
|
@ -39,22 +40,28 @@ static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
|
|||
if (!capable(CAP_SYS_RAWIO))
|
||||
return -EPERM;
|
||||
|
||||
rq = blk_get_request(q, hdr->dout_xfer_len ?
|
||||
rq = blk_mq_alloc_request(q, hdr->dout_xfer_len ?
|
||||
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
rq->timeout = timeout;
|
||||
|
||||
job = blk_mq_rq_to_pdu(rq);
|
||||
reply = job->reply;
|
||||
memset(job, 0, sizeof(*job));
|
||||
job->reply = reply;
|
||||
job->reply_len = SCSI_SENSE_BUFFERSIZE;
|
||||
job->dd_data = job + 1;
|
||||
|
||||
job->request_len = hdr->request_len;
|
||||
job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
|
||||
if (IS_ERR(job->request)) {
|
||||
ret = PTR_ERR(job->request);
|
||||
goto out_put_request;
|
||||
goto out_free_rq;
|
||||
}
|
||||
|
||||
if (hdr->dout_xfer_len && hdr->din_xfer_len) {
|
||||
job->bidi_rq = blk_get_request(rq->q, REQ_OP_DRV_IN, 0);
|
||||
job->bidi_rq = blk_mq_alloc_request(rq->q, REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(job->bidi_rq)) {
|
||||
ret = PTR_ERR(job->bidi_rq);
|
||||
goto out_free_job_request;
|
||||
|
@ -134,11 +141,11 @@ out_unmap_bidi_rq:
|
|||
blk_rq_unmap_user(job->bidi_bio);
|
||||
out_free_bidi_rq:
|
||||
if (job->bidi_rq)
|
||||
blk_put_request(job->bidi_rq);
|
||||
blk_mq_free_request(job->bidi_rq);
|
||||
out_free_job_request:
|
||||
kfree(job->request);
|
||||
out_put_request:
|
||||
blk_put_request(rq);
|
||||
out_free_rq:
|
||||
blk_mq_free_request(rq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -302,18 +309,6 @@ static int bsg_init_rq(struct blk_mq_tag_set *set, struct request *req,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* called right before the request is given to the request_queue user */
|
||||
static void bsg_initialize_rq(struct request *req)
|
||||
{
|
||||
struct bsg_job *job = blk_mq_rq_to_pdu(req);
|
||||
void *reply = job->reply;
|
||||
|
||||
memset(job, 0, sizeof(*job));
|
||||
job->reply = reply;
|
||||
job->reply_len = SCSI_SENSE_BUFFERSIZE;
|
||||
job->dd_data = job + 1;
|
||||
}
|
||||
|
||||
static void bsg_exit_rq(struct blk_mq_tag_set *set, struct request *req,
|
||||
unsigned int hctx_idx)
|
||||
{
|
||||
|
@ -350,7 +345,6 @@ static const struct blk_mq_ops bsg_mq_ops = {
|
|||
.queue_rq = bsg_queue_rq,
|
||||
.init_request = bsg_init_rq,
|
||||
.exit_request = bsg_exit_rq,
|
||||
.initialize_rq_fn = bsg_initialize_rq,
|
||||
.complete = bsg_complete,
|
||||
.timeout = bsg_timeout,
|
||||
};
|
||||
|
|
|
@ -281,8 +281,8 @@ config BLK_DEV_RAM_SIZE
|
|||
config CDROM_PKTCDVD
|
||||
tristate "Packet writing on CD/DVD media (DEPRECATED)"
|
||||
depends on !UML
|
||||
depends on SCSI
|
||||
select CDROM
|
||||
select SCSI_COMMON
|
||||
help
|
||||
Note: This driver is deprecated and will be removed from the
|
||||
kernel in the near future!
|
||||
|
|
|
@ -775,14 +775,14 @@ static int pd_special_command(struct pd_unit *disk,
|
|||
struct request *rq;
|
||||
struct pd_req *req;
|
||||
|
||||
rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
|
||||
rq = blk_mq_alloc_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
req = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
req->func = func;
|
||||
blk_execute_rq(disk->gd, rq, 0);
|
||||
blk_put_request(rq);
|
||||
blk_mq_free_request(rq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -703,7 +703,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
|
|||
struct request *rq;
|
||||
int ret = 0;
|
||||
|
||||
rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
|
||||
rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
|
||||
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
@ -726,7 +726,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
|
|||
if (scsi_req(rq)->result)
|
||||
ret = -EIO;
|
||||
out:
|
||||
blk_put_request(rq);
|
||||
blk_mq_free_request(rq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2536,6 +2536,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
|
|||
int i;
|
||||
char b[BDEVNAME_SIZE];
|
||||
struct block_device *bdev;
|
||||
struct scsi_device *sdev;
|
||||
|
||||
if (pd->pkt_dev == dev) {
|
||||
pkt_err(pd, "recursive setup not allowed\n");
|
||||
|
@ -2559,10 +2560,12 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
|
|||
bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL);
|
||||
if (IS_ERR(bdev))
|
||||
return PTR_ERR(bdev);
|
||||
if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
|
||||
sdev = scsi_device_from_queue(bdev->bd_disk->queue);
|
||||
if (!sdev) {
|
||||
blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
|
||||
return -EINVAL;
|
||||
}
|
||||
put_device(&sdev->sdev_gendev);
|
||||
|
||||
/* This is safe, since we have a reference from open(). */
|
||||
__module_get(THIS_MODULE);
|
||||
|
|
|
@ -312,7 +312,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
|
|||
struct request *req;
|
||||
int err;
|
||||
|
||||
req = blk_get_request(q, REQ_OP_DRV_IN, 0);
|
||||
req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
|
@ -323,7 +323,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
|
|||
blk_execute_rq(vblk->disk, req, false);
|
||||
err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
|
||||
out:
|
||||
blk_put_request(req);
|
||||
blk_mq_free_request(req);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -530,7 +530,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
|
|||
|
||||
bdev = pgpath->path.dev->bdev;
|
||||
q = bdev_get_queue(bdev);
|
||||
clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE,
|
||||
clone = blk_mq_alloc_request(q, rq->cmd_flags | REQ_NOMERGE,
|
||||
BLK_MQ_REQ_NOWAIT);
|
||||
if (IS_ERR(clone)) {
|
||||
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
|
||||
|
@ -579,7 +579,7 @@ static void multipath_release_clone(struct request *clone,
|
|||
clone->io_start_time_ns);
|
||||
}
|
||||
|
||||
blk_put_request(clone);
|
||||
blk_mq_free_request(clone);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -258,7 +258,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
|
|||
mq = &md->queue;
|
||||
|
||||
/* Dispatch locking to the block layer */
|
||||
req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, 0);
|
||||
req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_OUT, 0);
|
||||
if (IS_ERR(req)) {
|
||||
count = PTR_ERR(req);
|
||||
goto out_put;
|
||||
|
@ -266,7 +266,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
|
|||
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
|
||||
blk_execute_rq(NULL, req, 0);
|
||||
ret = req_to_mmc_queue_req(req)->drv_op_result;
|
||||
blk_put_request(req);
|
||||
blk_mq_free_request(req);
|
||||
|
||||
if (!ret) {
|
||||
pr_info("%s: Locking boot partition ro until next power on\n",
|
||||
|
@ -646,7 +646,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
|
|||
* Dispatch the ioctl() into the block request queue.
|
||||
*/
|
||||
mq = &md->queue;
|
||||
req = blk_get_request(mq->queue,
|
||||
req = blk_mq_alloc_request(mq->queue,
|
||||
idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(req)) {
|
||||
err = PTR_ERR(req);
|
||||
|
@ -660,7 +660,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
|
|||
blk_execute_rq(NULL, req, 0);
|
||||
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
|
||||
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
|
||||
blk_put_request(req);
|
||||
blk_mq_free_request(req);
|
||||
|
||||
cmd_done:
|
||||
kfree(idata->buf);
|
||||
|
@ -716,7 +716,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
|
|||
* Dispatch the ioctl()s into the block request queue.
|
||||
*/
|
||||
mq = &md->queue;
|
||||
req = blk_get_request(mq->queue,
|
||||
req = blk_mq_alloc_request(mq->queue,
|
||||
idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(req)) {
|
||||
err = PTR_ERR(req);
|
||||
|
@ -733,7 +733,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
|
|||
for (i = 0; i < num_of_cmds && !err; i++)
|
||||
err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
|
||||
|
||||
blk_put_request(req);
|
||||
blk_mq_free_request(req);
|
||||
|
||||
cmd_err:
|
||||
for (i = 0; i < num_of_cmds; i++) {
|
||||
|
@ -2730,7 +2730,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
|
|||
int ret;
|
||||
|
||||
/* Ask the block layer about the card status */
|
||||
req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
|
||||
req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
|
||||
|
@ -2740,7 +2740,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
|
|||
*val = ret;
|
||||
ret = 0;
|
||||
}
|
||||
blk_put_request(req);
|
||||
blk_mq_free_request(req);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2766,7 +2766,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
|
|||
return -ENOMEM;
|
||||
|
||||
/* Ask the block layer for the EXT CSD */
|
||||
req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
|
||||
req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(req)) {
|
||||
err = PTR_ERR(req);
|
||||
goto out_free;
|
||||
|
@ -2775,7 +2775,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
|
|||
req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
|
||||
blk_execute_rq(NULL, req, 0);
|
||||
err = req_to_mmc_queue_req(req)->drv_op_result;
|
||||
blk_put_request(req);
|
||||
blk_mq_free_request(req);
|
||||
if (err) {
|
||||
pr_err("FAILED %d\n", err);
|
||||
goto out_free;
|
||||
|
|
|
@ -25,8 +25,8 @@ static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
rq = blk_get_request(q, hdr->dout_xfer_len ?
|
||||
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
|
||||
rq = scsi_alloc_request(q, hdr->dout_xfer_len ?
|
||||
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
rq->timeout = timeout;
|
||||
|
@ -95,7 +95,7 @@ static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
|
|||
out_free_cmd:
|
||||
scsi_req_free_cmd(scsi_req(rq));
|
||||
out_put_request:
|
||||
blk_put_request(rq);
|
||||
blk_mq_free_request(rq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1979,7 +1979,7 @@ maybe_retry:
|
|||
|
||||
static void eh_lock_door_done(struct request *req, blk_status_t status)
|
||||
{
|
||||
blk_put_request(req);
|
||||
blk_mq_free_request(req);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1998,7 +1998,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
|
|||
struct request *req;
|
||||
struct scsi_request *rq;
|
||||
|
||||
req = blk_get_request(sdev->request_queue, REQ_OP_DRV_IN, 0);
|
||||
req = scsi_alloc_request(sdev->request_queue, REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(req))
|
||||
return;
|
||||
rq = scsi_req(req);
|
||||
|
|
|
@ -438,7 +438,7 @@ static int sg_io(struct scsi_device *sdev, struct gendisk *disk,
|
|||
at_head = 1;
|
||||
|
||||
ret = -ENOMEM;
|
||||
rq = blk_get_request(sdev->request_queue, writing ?
|
||||
rq = scsi_alloc_request(sdev->request_queue, writing ?
|
||||
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
@ -490,7 +490,7 @@ static int sg_io(struct scsi_device *sdev, struct gendisk *disk,
|
|||
out_free_cdb:
|
||||
scsi_req_free_cmd(req);
|
||||
out_put_request:
|
||||
blk_put_request(rq);
|
||||
blk_mq_free_request(rq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -561,7 +561,7 @@ static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
|
|||
|
||||
}
|
||||
|
||||
rq = blk_get_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
|
||||
rq = scsi_alloc_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto error_free_buffer;
|
||||
|
@ -634,7 +634,7 @@ static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
|
|||
}
|
||||
|
||||
error:
|
||||
blk_put_request(rq);
|
||||
blk_mq_free_request(rq);
|
||||
|
||||
error_free_buffer:
|
||||
kfree(buffer);
|
||||
|
|
|
@ -216,7 +216,7 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
|
|||
struct scsi_request *rq;
|
||||
int ret;
|
||||
|
||||
req = blk_get_request(sdev->request_queue,
|
||||
req = scsi_alloc_request(sdev->request_queue,
|
||||
data_direction == DMA_TO_DEVICE ?
|
||||
REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
|
||||
rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
|
||||
|
@ -260,7 +260,7 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
|
|||
scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
|
||||
ret = rq->result;
|
||||
out:
|
||||
blk_put_request(req);
|
||||
blk_mq_free_request(req);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1079,9 +1079,6 @@ EXPORT_SYMBOL(scsi_alloc_sgtables);
|
|||
* This function initializes the members of struct scsi_cmnd that must be
|
||||
* initialized before request processing starts and that won't be
|
||||
* reinitialized if a SCSI command is requeued.
|
||||
*
|
||||
* Called from inside blk_get_request() for pass-through requests and from
|
||||
* inside scsi_init_command() for filesystem requests.
|
||||
*/
|
||||
static void scsi_initialize_rq(struct request *rq)
|
||||
{
|
||||
|
@ -1098,6 +1095,18 @@ static void scsi_initialize_rq(struct request *rq)
|
|||
cmd->retries = 0;
|
||||
}
|
||||
|
||||
struct request *scsi_alloc_request(struct request_queue *q,
|
||||
unsigned int op, blk_mq_req_flags_t flags)
|
||||
{
|
||||
struct request *rq;
|
||||
|
||||
rq = blk_mq_alloc_request(q, op, flags);
|
||||
if (!IS_ERR(rq))
|
||||
scsi_initialize_rq(rq);
|
||||
return rq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(scsi_alloc_request);
|
||||
|
||||
/*
|
||||
* Only called when the request isn't completed by SCSI, and not freed by
|
||||
* SCSI
|
||||
|
@ -1864,7 +1873,6 @@ static const struct blk_mq_ops scsi_mq_ops_no_commit = {
|
|||
#endif
|
||||
.init_request = scsi_mq_init_request,
|
||||
.exit_request = scsi_mq_exit_request,
|
||||
.initialize_rq_fn = scsi_initialize_rq,
|
||||
.cleanup_rq = scsi_cleanup_rq,
|
||||
.busy = scsi_mq_lld_busy,
|
||||
.map_queues = scsi_map_queues,
|
||||
|
@ -1894,7 +1902,6 @@ static const struct blk_mq_ops scsi_mq_ops = {
|
|||
#endif
|
||||
.init_request = scsi_mq_init_request,
|
||||
.exit_request = scsi_mq_exit_request,
|
||||
.initialize_rq_fn = scsi_initialize_rq,
|
||||
.cleanup_rq = scsi_cleanup_rq,
|
||||
.busy = scsi_mq_lld_busy,
|
||||
.map_queues = scsi_map_queues,
|
||||
|
@ -1960,6 +1967,14 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q)
|
|||
|
||||
return sdev;
|
||||
}
|
||||
/*
|
||||
* pktcdvd should have been integrated into the SCSI layers, but for historical
|
||||
* reasons like the old IDE driver it isn't. This export allows it to safely
|
||||
* probe if a given device is a SCSI one and only attach to that.
|
||||
*/
|
||||
#ifdef CONFIG_CDROM_PKTCDVD_MODULE
|
||||
EXPORT_SYMBOL_GPL(scsi_device_from_queue);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* scsi_block_requests - Utility function used by low-level drivers to prevent
|
||||
|
|
|
@ -280,7 +280,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
|
|||
sdev->request_queue = q;
|
||||
q->queuedata = sdev;
|
||||
__scsi_init_queue(sdev->host, q);
|
||||
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
|
||||
WARN_ON_ONCE(!blk_get_queue(q));
|
||||
|
||||
depth = sdev->host->cmd_per_lun ?: 1;
|
||||
|
|
|
@ -1757,6 +1757,44 @@ static void sd_rescan(struct device *dev)
|
|||
sd_revalidate_disk(sdkp->disk);
|
||||
}
|
||||
|
||||
static int sd_get_unique_id(struct gendisk *disk, u8 id[16],
|
||||
enum blk_unique_id type)
|
||||
{
|
||||
struct scsi_device *sdev = scsi_disk(disk)->device;
|
||||
const struct scsi_vpd *vpd;
|
||||
const unsigned char *d;
|
||||
int ret = -ENXIO, len;
|
||||
|
||||
rcu_read_lock();
|
||||
vpd = rcu_dereference(sdev->vpd_pg83);
|
||||
if (!vpd)
|
||||
goto out_unlock;
|
||||
|
||||
ret = -EINVAL;
|
||||
for (d = vpd->data + 4; d < vpd->data + vpd->len; d += d[3] + 4) {
|
||||
/* we only care about designators with LU association */
|
||||
if (((d[1] >> 4) & 0x3) != 0x00)
|
||||
continue;
|
||||
if ((d[1] & 0xf) != type)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Only exit early if a 16-byte descriptor was found. Otherwise
|
||||
* keep looking as one with more entropy might still show up.
|
||||
*/
|
||||
len = d[3];
|
||||
if (len != 8 && len != 12 && len != 16)
|
||||
continue;
|
||||
ret = len;
|
||||
memcpy(id, d + 4, len);
|
||||
if (len == 16)
|
||||
break;
|
||||
}
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static char sd_pr_type(enum pr_type type)
|
||||
{
|
||||
switch (type) {
|
||||
|
@ -1861,6 +1899,7 @@ static const struct block_device_operations sd_fops = {
|
|||
.check_events = sd_check_events,
|
||||
.unlock_native_capacity = sd_unlock_native_capacity,
|
||||
.report_zones = sd_zbc_report_zones,
|
||||
.get_unique_id = sd_get_unique_id,
|
||||
.pr_ops = &sd_pr_ops,
|
||||
};
|
||||
|
||||
|
|
|
@ -815,7 +815,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
|
|||
if (atomic_read(&sdp->detaching)) {
|
||||
if (srp->bio) {
|
||||
scsi_req_free_cmd(scsi_req(srp->rq));
|
||||
blk_put_request(srp->rq);
|
||||
blk_mq_free_request(srp->rq);
|
||||
srp->rq = NULL;
|
||||
}
|
||||
|
||||
|
@ -1390,7 +1390,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
|
|||
*/
|
||||
srp->rq = NULL;
|
||||
scsi_req_free_cmd(scsi_req(rq));
|
||||
blk_put_request(rq);
|
||||
blk_mq_free_request(rq);
|
||||
|
||||
write_lock_irqsave(&sfp->rq_list_lock, iflags);
|
||||
if (unlikely(srp->orphan)) {
|
||||
|
@ -1718,13 +1718,13 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
|
|||
*
|
||||
* With scsi-mq enabled, there are a fixed number of preallocated
|
||||
* requests equal in number to shost->can_queue. If all of the
|
||||
* preallocated requests are already in use, then blk_get_request()
|
||||
* preallocated requests are already in use, then scsi_alloc_request()
|
||||
* will sleep until an active command completes, freeing up a request.
|
||||
* Although waiting in an asynchronous interface is less than ideal, we
|
||||
* do not want to use BLK_MQ_REQ_NOWAIT here because userspace might
|
||||
* not expect an EWOULDBLOCK from this condition.
|
||||
*/
|
||||
rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
|
||||
rq = scsi_alloc_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
|
||||
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(rq)) {
|
||||
kfree(long_cmdp);
|
||||
|
@ -1830,7 +1830,7 @@ sg_finish_rem_req(Sg_request *srp)
|
|||
|
||||
if (srp->rq) {
|
||||
scsi_req_free_cmd(scsi_req(srp->rq));
|
||||
blk_put_request(srp->rq);
|
||||
blk_mq_free_request(srp->rq);
|
||||
}
|
||||
|
||||
if (srp->res_used)
|
||||
|
|
|
@ -967,7 +967,7 @@ static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
|
|||
struct bio *bio;
|
||||
int ret;
|
||||
|
||||
rq = blk_get_request(disk->queue, REQ_OP_DRV_IN, 0);
|
||||
rq = scsi_alloc_request(disk->queue, REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
req = scsi_req(rq);
|
||||
|
@ -1003,7 +1003,7 @@ static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
|
|||
if (blk_rq_unmap_user(bio))
|
||||
ret = -EFAULT;
|
||||
out_put_request:
|
||||
blk_put_request(rq);
|
||||
blk_mq_free_request(rq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -530,7 +530,7 @@ static void st_scsi_execute_end(struct request *req, blk_status_t status)
|
|||
complete(SRpnt->waiting);
|
||||
|
||||
blk_rq_unmap_user(tmp);
|
||||
blk_put_request(req);
|
||||
blk_mq_free_request(req);
|
||||
}
|
||||
|
||||
static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
|
||||
|
@ -543,7 +543,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
|
|||
int err = 0;
|
||||
struct scsi_tape *STp = SRpnt->stp;
|
||||
|
||||
req = blk_get_request(SRpnt->stp->device->request_queue,
|
||||
req = scsi_alloc_request(SRpnt->stp->device->request_queue,
|
||||
data_direction == DMA_TO_DEVICE ?
|
||||
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(req))
|
||||
|
@ -557,7 +557,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
|
|||
err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen,
|
||||
GFP_KERNEL);
|
||||
if (err) {
|
||||
blk_put_request(req);
|
||||
blk_mq_free_request(req);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2920,7 +2920,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
|
|||
* Even though we use wait_event() which sleeps indefinitely,
|
||||
* the maximum wait time is bounded by SCSI request timeout.
|
||||
*/
|
||||
req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
|
||||
req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
|
||||
if (IS_ERR(req)) {
|
||||
err = PTR_ERR(req);
|
||||
goto out_unlock;
|
||||
|
@ -2947,7 +2947,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
|
|||
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
|
||||
|
||||
out:
|
||||
blk_put_request(req);
|
||||
blk_mq_free_request(req);
|
||||
out_unlock:
|
||||
up_read(&hba->clk_scaling_lock);
|
||||
return err;
|
||||
|
@ -6512,9 +6512,9 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
|
|||
int task_tag, err;
|
||||
|
||||
/*
|
||||
* blk_get_request() is used here only to get a free tag.
|
||||
* blk_mq_alloc_request() is used here only to get a free tag.
|
||||
*/
|
||||
req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
|
||||
req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
|
@ -6570,7 +6570,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
|
|||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
|
||||
ufshcd_release(hba);
|
||||
blk_put_request(req);
|
||||
blk_mq_free_request(req);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -6655,7 +6655,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
|
|||
|
||||
down_read(&hba->clk_scaling_lock);
|
||||
|
||||
req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
|
||||
req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
|
||||
if (IS_ERR(req)) {
|
||||
err = PTR_ERR(req);
|
||||
goto out_unlock;
|
||||
|
@ -6736,7 +6736,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
|
|||
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
|
||||
|
||||
out:
|
||||
blk_put_request(req);
|
||||
blk_mq_free_request(req);
|
||||
out_unlock:
|
||||
up_read(&hba->clk_scaling_lock);
|
||||
return err;
|
||||
|
@ -7907,7 +7907,7 @@ static void ufshcd_request_sense_done(struct request *rq, blk_status_t error)
|
|||
if (error != BLK_STS_OK)
|
||||
pr_err("%s: REQUEST SENSE failed (%d)\n", __func__, error);
|
||||
kfree(rq->end_io_data);
|
||||
blk_put_request(rq);
|
||||
blk_mq_free_request(rq);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -7927,7 +7927,7 @@ ufshcd_request_sense_async(struct ufs_hba *hba, struct scsi_device *sdev)
|
|||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
req = blk_get_request(sdev->request_queue, REQ_OP_DRV_IN,
|
||||
req = blk_mq_alloc_request(sdev->request_queue, REQ_OP_DRV_IN,
|
||||
/*flags=*/BLK_MQ_REQ_PM);
|
||||
if (IS_ERR(req)) {
|
||||
ret = PTR_ERR(req);
|
||||
|
@ -7952,7 +7952,7 @@ ufshcd_request_sense_async(struct ufs_hba *hba, struct scsi_device *sdev)
|
|||
return 0;
|
||||
|
||||
out_put:
|
||||
blk_put_request(req);
|
||||
blk_mq_free_request(req);
|
||||
out_free:
|
||||
kfree(buffer);
|
||||
return ret;
|
||||
|
|
|
@ -449,7 +449,7 @@ static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
|
|||
return NULL;
|
||||
|
||||
retry:
|
||||
req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir,
|
||||
req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, dir,
|
||||
BLK_MQ_REQ_NOWAIT);
|
||||
|
||||
if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
|
||||
|
@ -473,7 +473,7 @@ free_rq:
|
|||
|
||||
static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
|
||||
{
|
||||
blk_put_request(rq->req);
|
||||
blk_mq_free_request(rq->req);
|
||||
kmem_cache_free(hpb->map_req_cache, rq);
|
||||
}
|
||||
|
||||
|
|
|
@ -980,11 +980,10 @@ pscsi_execute_cmd(struct se_cmd *cmd)
|
|||
memcpy(pt->pscsi_cdb, cmd->t_task_cdb,
|
||||
scsi_command_size(cmd->t_task_cdb));
|
||||
|
||||
req = blk_get_request(pdv->pdv_sd->request_queue,
|
||||
req = scsi_alloc_request(pdv->pdv_sd->request_queue,
|
||||
cmd->data_direction == DMA_TO_DEVICE ?
|
||||
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(req)) {
|
||||
pr_err("PSCSI: blk_get_request() failed\n");
|
||||
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
goto fail;
|
||||
}
|
||||
|
@ -1012,7 +1011,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
|
|||
return 0;
|
||||
|
||||
fail_put_request:
|
||||
blk_put_request(req);
|
||||
blk_mq_free_request(req);
|
||||
fail:
|
||||
kfree(pt);
|
||||
return ret;
|
||||
|
@ -1067,7 +1066,7 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
|
|||
break;
|
||||
}
|
||||
|
||||
blk_put_request(req);
|
||||
blk_mq_free_request(req);
|
||||
kfree(pt);
|
||||
}
|
||||
|
||||
|
|
|
@ -109,7 +109,6 @@ config NFSD_SCSILAYOUT
|
|||
depends on NFSD_V4 && BLOCK
|
||||
select NFSD_PNFS
|
||||
select EXPORTFS_BLOCK_OPS
|
||||
select SCSI_COMMON
|
||||
help
|
||||
This option enables support for the exporting pNFS SCSI layouts
|
||||
in the kernel's NFS server. The pNFS SCSI layout enables NFS
|
||||
|
|
|
@ -9,9 +9,6 @@
|
|||
#include <linux/pr.h>
|
||||
|
||||
#include <linux/nfsd/debug.h>
|
||||
#include <scsi/scsi_proto.h>
|
||||
#include <scsi/scsi_common.h>
|
||||
#include <scsi/scsi_request.h>
|
||||
|
||||
#include "blocklayoutxdr.h"
|
||||
#include "pnfs.h"
|
||||
|
@ -211,109 +208,6 @@ const struct nfsd4_layout_ops bl_layout_ops = {
|
|||
#endif /* CONFIG_NFSD_BLOCKLAYOUT */
|
||||
|
||||
#ifdef CONFIG_NFSD_SCSILAYOUT
|
||||
static int nfsd4_scsi_identify_device(struct block_device *bdev,
|
||||
struct pnfs_block_volume *b)
|
||||
{
|
||||
struct request_queue *q = bdev->bd_disk->queue;
|
||||
struct request *rq;
|
||||
struct scsi_request *req;
|
||||
/*
|
||||
* The allocation length (passed in bytes 3 and 4 of the INQUIRY
|
||||
* command descriptor block) specifies the number of bytes that have
|
||||
* been allocated for the data-in buffer.
|
||||
* 252 is the highest one-byte value that is a multiple of 4.
|
||||
* 65532 is the highest two-byte value that is a multiple of 4.
|
||||
*/
|
||||
size_t bufflen = 252, maxlen = 65532, len, id_len;
|
||||
u8 *buf, *d, type, assoc;
|
||||
int retries = 1, error;
|
||||
|
||||
if (WARN_ON_ONCE(!blk_queue_scsi_passthrough(q)))
|
||||
return -EINVAL;
|
||||
|
||||
again:
|
||||
buf = kzalloc(bufflen, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
rq = blk_get_request(q, REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(rq)) {
|
||||
error = -ENOMEM;
|
||||
goto out_free_buf;
|
||||
}
|
||||
req = scsi_req(rq);
|
||||
|
||||
error = blk_rq_map_kern(q, rq, buf, bufflen, GFP_KERNEL);
|
||||
if (error)
|
||||
goto out_put_request;
|
||||
|
||||
req->cmd[0] = INQUIRY;
|
||||
req->cmd[1] = 1;
|
||||
req->cmd[2] = 0x83;
|
||||
req->cmd[3] = bufflen >> 8;
|
||||
req->cmd[4] = bufflen & 0xff;
|
||||
req->cmd_len = COMMAND_SIZE(INQUIRY);
|
||||
|
||||
blk_execute_rq(NULL, rq, 1);
|
||||
if (req->result) {
|
||||
pr_err("pNFS: INQUIRY 0x83 failed with: %x\n",
|
||||
req->result);
|
||||
error = -EIO;
|
||||
goto out_put_request;
|
||||
}
|
||||
|
||||
len = (buf[2] << 8) + buf[3] + 4;
|
||||
if (len > bufflen) {
|
||||
if (len <= maxlen && retries--) {
|
||||
blk_put_request(rq);
|
||||
kfree(buf);
|
||||
bufflen = len;
|
||||
goto again;
|
||||
}
|
||||
pr_err("pNFS: INQUIRY 0x83 response invalid (len = %zd)\n",
|
||||
len);
|
||||
goto out_put_request;
|
||||
}
|
||||
|
||||
d = buf + 4;
|
||||
for (d = buf + 4; d < buf + len; d += id_len + 4) {
|
||||
id_len = d[3];
|
||||
type = d[1] & 0xf;
|
||||
assoc = (d[1] >> 4) & 0x3;
|
||||
|
||||
/*
|
||||
* We only care about a EUI-64 and NAA designator types
|
||||
* with LU association.
|
||||
*/
|
||||
if (assoc != 0x00)
|
||||
continue;
|
||||
if (type != 0x02 && type != 0x03)
|
||||
continue;
|
||||
if (id_len != 8 && id_len != 12 && id_len != 16)
|
||||
continue;
|
||||
|
||||
b->scsi.code_set = PS_CODE_SET_BINARY;
|
||||
b->scsi.designator_type = type == 0x02 ?
|
||||
PS_DESIGNATOR_EUI64 : PS_DESIGNATOR_NAA;
|
||||
b->scsi.designator_len = id_len;
|
||||
memcpy(b->scsi.designator, d + 4, id_len);
|
||||
|
||||
/*
|
||||
* If we found a 8 or 12 byte descriptor continue on to
|
||||
* see if a 16 byte one is available. If we find a
|
||||
* 16 byte descriptor we're done.
|
||||
*/
|
||||
if (id_len == 16)
|
||||
break;
|
||||
}
|
||||
|
||||
out_put_request:
|
||||
blk_put_request(rq);
|
||||
out_free_buf:
|
||||
kfree(buf);
|
||||
return error;
|
||||
}
|
||||
|
||||
#define NFSD_MDS_PR_KEY 0x0100000000000000ULL
|
||||
|
||||
/*
|
||||
|
@ -325,6 +219,31 @@ static u64 nfsd4_scsi_pr_key(struct nfs4_client *clp)
|
|||
return ((u64)clp->cl_clientid.cl_boot << 32) | clp->cl_clientid.cl_id;
|
||||
}
|
||||
|
||||
static const u8 designator_types[] = {
|
||||
PS_DESIGNATOR_EUI64,
|
||||
PS_DESIGNATOR_NAA,
|
||||
};
|
||||
|
||||
static int
|
||||
nfsd4_block_get_unique_id(struct gendisk *disk, struct pnfs_block_volume *b)
|
||||
{
|
||||
int ret, i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(designator_types); i++) {
|
||||
u8 type = designator_types[i];
|
||||
|
||||
ret = disk->fops->get_unique_id(disk, b->scsi.designator, type);
|
||||
if (ret > 0) {
|
||||
b->scsi.code_set = PS_CODE_SET_BINARY;
|
||||
b->scsi.designator_type = type;
|
||||
b->scsi.designator_len = ret;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int
|
||||
nfsd4_block_get_device_info_scsi(struct super_block *sb,
|
||||
struct nfs4_client *clp,
|
||||
|
@ -333,7 +252,7 @@ nfsd4_block_get_device_info_scsi(struct super_block *sb,
|
|||
struct pnfs_block_deviceaddr *dev;
|
||||
struct pnfs_block_volume *b;
|
||||
const struct pr_ops *ops;
|
||||
int error;
|
||||
int ret;
|
||||
|
||||
dev = kzalloc(sizeof(struct pnfs_block_deviceaddr) +
|
||||
sizeof(struct pnfs_block_volume), GFP_KERNEL);
|
||||
|
@ -347,33 +266,38 @@ nfsd4_block_get_device_info_scsi(struct super_block *sb,
|
|||
b->type = PNFS_BLOCK_VOLUME_SCSI;
|
||||
b->scsi.pr_key = nfsd4_scsi_pr_key(clp);
|
||||
|
||||
error = nfsd4_scsi_identify_device(sb->s_bdev, b);
|
||||
if (error)
|
||||
return error;
|
||||
ret = nfsd4_block_get_unique_id(sb->s_bdev->bd_disk, b);
|
||||
if (ret < 0)
|
||||
goto out_free_dev;
|
||||
|
||||
ret = -EINVAL;
|
||||
ops = sb->s_bdev->bd_disk->fops->pr_ops;
|
||||
if (!ops) {
|
||||
pr_err("pNFS: device %s does not support PRs.\n",
|
||||
sb->s_id);
|
||||
return -EINVAL;
|
||||
goto out_free_dev;
|
||||
}
|
||||
|
||||
error = ops->pr_register(sb->s_bdev, 0, NFSD_MDS_PR_KEY, true);
|
||||
if (error) {
|
||||
ret = ops->pr_register(sb->s_bdev, 0, NFSD_MDS_PR_KEY, true);
|
||||
if (ret) {
|
||||
pr_err("pNFS: failed to register key for device %s.\n",
|
||||
sb->s_id);
|
||||
return -EINVAL;
|
||||
goto out_free_dev;
|
||||
}
|
||||
|
||||
error = ops->pr_reserve(sb->s_bdev, NFSD_MDS_PR_KEY,
|
||||
ret = ops->pr_reserve(sb->s_bdev, NFSD_MDS_PR_KEY,
|
||||
PR_EXCLUSIVE_ACCESS_REG_ONLY, 0);
|
||||
if (error) {
|
||||
if (ret) {
|
||||
pr_err("pNFS: failed to reserve device %s.\n",
|
||||
sb->s_id);
|
||||
return -EINVAL;
|
||||
goto out_free_dev;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_dev:
|
||||
kfree(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __be32
|
||||
|
|
|
@ -145,8 +145,9 @@ void nfsd4_setup_layout_type(struct svc_export *exp)
|
|||
#ifdef CONFIG_NFSD_SCSILAYOUT
|
||||
if (sb->s_export_op->map_blocks &&
|
||||
sb->s_export_op->commit_blocks &&
|
||||
sb->s_bdev && sb->s_bdev->bd_disk->fops->pr_ops &&
|
||||
blk_queue_scsi_passthrough(sb->s_bdev->bd_disk->queue))
|
||||
sb->s_bdev &&
|
||||
sb->s_bdev->bd_disk->fops->pr_ops &&
|
||||
sb->s_bdev->bd_disk->fops->get_unique_id)
|
||||
exp->ex_layout_types |= 1 << LAYOUT_SCSI;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -566,11 +566,6 @@ struct blk_mq_ops {
|
|||
void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
|
||||
unsigned int);
|
||||
|
||||
/**
|
||||
* @initialize_rq_fn: Called from inside blk_get_request().
|
||||
*/
|
||||
void (*initialize_rq_fn)(struct request *rq);
|
||||
|
||||
/**
|
||||
* @cleanup_rq: Called before freeing one request which isn't completed
|
||||
* yet, and usually for freeing the driver private data.
|
||||
|
@ -897,9 +892,6 @@ static inline bool rq_is_sync(struct request *rq)
|
|||
}
|
||||
|
||||
void blk_rq_init(struct request_queue *q, struct request *rq);
|
||||
void blk_put_request(struct request *rq);
|
||||
struct request *blk_get_request(struct request_queue *q, unsigned int op,
|
||||
blk_mq_req_flags_t flags);
|
||||
int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
|
||||
struct bio_set *bs, gfp_t gfp_mask,
|
||||
int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
|
||||
|
|
|
@ -399,7 +399,6 @@ struct request_queue {
|
|||
#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */
|
||||
#define QUEUE_FLAG_POLL_STATS 21 /* collecting stats for hybrid polling */
|
||||
#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
|
||||
#define QUEUE_FLAG_SCSI_PASSTHROUGH 23 /* queue supports SCSI commands */
|
||||
#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
|
||||
#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
|
||||
#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */
|
||||
|
@ -433,8 +432,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
|
|||
#define blk_queue_secure_erase(q) \
|
||||
(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
|
||||
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
|
||||
#define blk_queue_scsi_passthrough(q) \
|
||||
test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
|
||||
#define blk_queue_pci_p2pdma(q) \
|
||||
test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
|
||||
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
|
||||
|
@ -1188,6 +1185,14 @@ static inline void blk_crypto_unregister(struct request_queue *q) { }
|
|||
|
||||
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
|
||||
|
||||
enum blk_unique_id {
|
||||
/* these match the Designator Types specified in SPC */
|
||||
BLK_UID_T10 = 1,
|
||||
BLK_UID_EUI64 = 2,
|
||||
BLK_UID_NAA = 3,
|
||||
};
|
||||
|
||||
#define NFL4_UFLG_MASK 0x0000003F
|
||||
|
||||
struct block_device_operations {
|
||||
void (*submit_bio)(struct bio *bio);
|
||||
|
@ -1206,6 +1211,9 @@ struct block_device_operations {
|
|||
int (*report_zones)(struct gendisk *, sector_t sector,
|
||||
unsigned int nr_zones, report_zones_cb cb, void *data);
|
||||
char *(*devnode)(struct gendisk *disk, umode_t *mode);
|
||||
/* returns the length of the identifier or a negative errno: */
|
||||
int (*get_unique_id)(struct gendisk *disk, u8 id[16],
|
||||
enum blk_unique_id id_type);
|
||||
struct module *owner;
|
||||
const struct pr_ops *pr_ops;
|
||||
|
||||
|
|
|
@ -396,4 +396,7 @@ static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
|
|||
extern void scsi_build_sense(struct scsi_cmnd *scmd, int desc,
|
||||
u8 key, u8 asc, u8 ascq);
|
||||
|
||||
struct request *scsi_alloc_request(struct request_queue *q,
|
||||
unsigned int op, blk_mq_req_flags_t flags);
|
||||
|
||||
#endif /* _SCSI_SCSI_CMND_H */
|
||||
|
|
Loading…
Reference in New Issue