target: Untangle front-end and back-end meanings of max_sectors attribute
se_dev_attrib.max_sectors currently has two independent meanings: - It is reported in the block limits VPD page as the maximum transfer length, ie the largest IO that the front-end (fabric) can handle. Also the target core doesn't enforce this maximum transfer length. - It is used to hold the size of the largest IO that the back-end can handle, so we know when to split SCSI commands into multiple tasks. Fix this by adding a new se_dev_attrib.fabric_max_sectors to hold the maximum transfer length, and checking incoming IOs against that limit. Signed-off-by: Roland Dreier <roland@purestorage.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
parent
effc6cc882
commit
015487b89f
|
@ -456,7 +456,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
|
|||
/*
|
||||
* Set MAXIMUM TRANSFER LENGTH
|
||||
*/
|
||||
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]);
|
||||
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors, &buf[8]);
|
||||
|
||||
/*
|
||||
* Set OPTIMAL TRANSFER LENGTH
|
||||
|
|
|
@ -702,6 +702,9 @@ SE_DEV_ATTR_RO(hw_max_sectors);
|
|||
DEF_DEV_ATTRIB(max_sectors);
|
||||
SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(fabric_max_sectors);
|
||||
SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(optimal_sectors);
|
||||
SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
|
||||
|
||||
|
@ -741,6 +744,7 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
|
|||
&target_core_dev_attrib_block_size.attr,
|
||||
&target_core_dev_attrib_hw_max_sectors.attr,
|
||||
&target_core_dev_attrib_max_sectors.attr,
|
||||
&target_core_dev_attrib_fabric_max_sectors.attr,
|
||||
&target_core_dev_attrib_optimal_sectors.attr,
|
||||
&target_core_dev_attrib_hw_queue_depth.attr,
|
||||
&target_core_dev_attrib_queue_depth.attr,
|
||||
|
|
|
@ -889,10 +889,15 @@ void se_dev_set_default_attribs(
|
|||
limits->logical_block_size);
|
||||
dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
|
||||
/*
|
||||
* Set optimal_sectors from max_sectors, which can be lowered via
|
||||
* configfs.
|
||||
* Set fabric_max_sectors, which is reported in block limits
|
||||
* VPD page (B0h).
|
||||
*/
|
||||
dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors;
|
||||
dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
|
||||
/*
|
||||
* Set optimal_sectors from fabric_max_sectors, which can be
|
||||
* lowered via configfs.
|
||||
*/
|
||||
dev->se_sub_dev->se_dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
|
||||
/*
|
||||
* queue_depth is based on subsystem plugin dependent requirements.
|
||||
*/
|
||||
|
@ -1224,6 +1229,54 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
|
||||
{
|
||||
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
|
||||
pr_err("dev[%p]: Unable to change SE Device"
|
||||
" fabric_max_sectors while dev_export_obj: %d count exists\n",
|
||||
dev, atomic_read(&dev->dev_export_obj.obj_access_count));
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!fabric_max_sectors) {
|
||||
pr_err("dev[%p]: Illegal ZERO value for"
|
||||
" fabric_max_sectors\n", dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
|
||||
pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
|
||||
" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
|
||||
DA_STATUS_MAX_SECTORS_MIN);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
|
||||
if (fabric_max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
|
||||
pr_err("dev[%p]: Passed fabric_max_sectors: %u"
|
||||
" greater than TCM/SE_Device max_sectors:"
|
||||
" %u\n", dev, fabric_max_sectors,
|
||||
dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
|
||||
pr_err("dev[%p]: Passed fabric_max_sectors: %u"
|
||||
" greater than DA_STATUS_MAX_SECTORS_MAX:"
|
||||
" %u\n", dev, fabric_max_sectors,
|
||||
DA_STATUS_MAX_SECTORS_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
|
||||
*/
|
||||
fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
|
||||
dev->se_sub_dev->se_dev_attrib.block_size);
|
||||
|
||||
dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors;
|
||||
pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
|
||||
dev, fabric_max_sectors);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
|
||||
{
|
||||
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
|
||||
|
@ -1237,10 +1290,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
|
|||
" changed for TCM/pSCSI\n", dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
|
||||
if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
|
||||
pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
|
||||
" greater than max_sectors: %u\n", dev,
|
||||
optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
|
||||
" greater than fabric_max_sectors: %u\n", dev,
|
||||
optimal_sectors, dev->se_sub_dev->se_dev_attrib.fabric_max_sectors);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -53,6 +53,7 @@ int se_dev_set_is_nonrot(struct se_device *, int);
|
|||
int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
|
||||
int se_dev_set_queue_depth(struct se_device *, u32);
|
||||
int se_dev_set_max_sectors(struct se_device *, u32);
|
||||
int se_dev_set_fabric_max_sectors(struct se_device *, u32);
|
||||
int se_dev_set_optimal_sectors(struct se_device *, u32);
|
||||
int se_dev_set_block_size(struct se_device *, u32);
|
||||
struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <linux/in.h>
|
||||
#include <linux/cdrom.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/tcp.h>
|
||||
|
@ -3107,6 +3108,13 @@ static int transport_generic_cmd_sequencer(
|
|||
cmd->data_length = size;
|
||||
}
|
||||
|
||||
if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB &&
|
||||
sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
|
||||
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too big sectors %u\n",
|
||||
cdb[0], sectors);
|
||||
goto out_invalid_cdb_field;
|
||||
}
|
||||
|
||||
/* reject any command that we don't have a handler for */
|
||||
if (!(passthrough || cmd->execute_task ||
|
||||
(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
|
||||
|
|
|
@ -86,6 +86,8 @@
|
|||
#define DA_UNMAP_GRANULARITY_DEFAULT 0
|
||||
/* Default unmap_granularity_alignment */
|
||||
#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
|
||||
/* Default max transfer length */
|
||||
#define DA_FABRIC_MAX_SECTORS 8192
|
||||
/* Emulation for Direct Page Out */
|
||||
#define DA_EMULATE_DPO 0
|
||||
/* Emulation for Forced Unit Access WRITEs */
|
||||
|
@ -726,6 +728,7 @@ struct se_dev_attrib {
|
|||
u32 block_size;
|
||||
u32 hw_max_sectors;
|
||||
u32 max_sectors;
|
||||
u32 fabric_max_sectors;
|
||||
u32 optimal_sectors;
|
||||
u32 hw_queue_depth;
|
||||
u32 queue_depth;
|
||||
|
|
Loading…
Reference in New Issue