Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (26 commits)
  target: Set additional sense length field in sense data
  target: Remove legacy device status check from transport_execute_tasks
  target: Remove __transport_execute_tasks() for each processing context
  target: Remove extra se_device->execute_task_lock access in fast path
  target: Drop se_device TCQ queue_depth usage from I/O path
  target: Fix possible NULL pointer with __transport_execute_tasks
  target: Remove TFO->check_release_cmd() fabric API caller
  tcm_fc: Convert ft_send_work to use target_submit_cmd
  target: Add target_submit_cmd() for process context fabric submission
  target: Make target_put_sess_cmd use target_release_cmd_kref
  target: Set response format in INQUIRY response
  target: tcm_mod_builder: small fixups
  Documentation/target: Fix tcm_mod_builder.py build breakage
  target: remove overagressive ____cacheline_aligned annoations
  tcm_loop: bump max_sectors
  target/configs: remove trailing newline from udev_path and alias
  iscsi-target: fix chap identifier simple_strtoul usage
  target: remove useless casts
  target: simplify target_check_cdb_and_preempt
  target: Move core_scsi3_check_cdb_abort_and_preempt
  ...
This commit is contained in:
Linus Torvalds 2012-01-18 15:59:18 -08:00
commit 4ba3069fea
51 changed files with 828 additions and 1013 deletions

View File

@ -230,14 +230,9 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
@ -260,7 +255,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!(se_nacl_new))\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
@ -308,7 +303,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!(tpg)) {\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
@ -344,7 +339,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!(" + fabric_mod_port + ")) {\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
@ -352,7 +347,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
@ -391,8 +386,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n"
buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
@ -405,14 +399,12 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
@ -439,9 +431,9 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (!(fabric)) {\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return -ENOMEM;\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
@ -475,9 +467,9 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
@ -492,17 +484,15 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_exit(void)\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "#ifdef MODULE\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
buf += "#endif\n"
ret = p.write(buf)
if ret:
@ -514,7 +504,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
@ -579,11 +569,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
@ -788,7 +774,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!(nacl)) {\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
@ -815,7 +801,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('release_cmd_to_pool', fo):
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
@ -899,13 +885,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('new_cmd_failure\)\(', fo):
buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
@ -948,15 +927,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
if re.search('pack_lun\)\(', fo):
buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
buf += "{\n"
buf += " WARN_ON(lun >= 256);\n"
buf += " /* Caller wants this byte-swapped */\n"
buf += " return cpu_to_le64((lun & 0xff) << 8);\n"
buf += "}\n\n"
bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
ret = p.write(buf)
if ret:

View File

@ -27,8 +27,7 @@
#include <scsi/scsi_device.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_tmr.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_parameters.h"
@ -284,8 +283,8 @@ static struct iscsi_np *iscsit_get_np(
sock_in6 = (struct sockaddr_in6 *)sockaddr;
sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
if (!memcmp((void *)&sock_in6->sin6_addr.in6_u,
(void *)&sock_in6_e->sin6_addr.in6_u,
if (!memcmp(&sock_in6->sin6_addr.in6_u,
&sock_in6_e->sin6_addr.in6_u,
sizeof(struct in6_addr)))
ip_match = 1;
@ -1225,7 +1224,7 @@ static void iscsit_do_crypto_hash_buf(
crypto_hash_init(hash);
sg_init_one(&sg, (u8 *)buf, payload_length);
sg_init_one(&sg, buf, payload_length);
crypto_hash_update(hash, &sg, payload_length);
if (padding) {
@ -1603,7 +1602,7 @@ static int iscsit_handle_nop_out(
/*
* Attach ping data to struct iscsi_cmd->buf_ptr.
*/
cmd->buf_ptr = (void *)ping_data;
cmd->buf_ptr = ping_data;
cmd->buf_ptr_size = payload_length;
pr_debug("Got %u bytes of NOPOUT ping"
@ -3197,7 +3196,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
end_of_buf = 1;
goto eob;
}
memcpy((void *)payload + payload_len, buf, len);
memcpy(payload + payload_len, buf, len);
payload_len += len;
spin_lock(&tiqn->tiqn_tpg_lock);
@ -3229,7 +3228,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
end_of_buf = 1;
goto eob;
}
memcpy((void *)payload + payload_len, buf, len);
memcpy(payload + payload_len, buf, len);
payload_len += len;
}
spin_unlock(&tpg->tpg_np_lock);
@ -3486,7 +3485,7 @@ int iscsi_target_tx_thread(void *arg)
struct iscsi_conn *conn;
struct iscsi_queue_req *qr = NULL;
struct se_cmd *se_cmd;
struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
struct iscsi_thread_set *ts = arg;
/*
* Allow ourselves to be interrupted by SIGINT so that a
* connection recovery / failure event can be triggered externally.
@ -3775,7 +3774,7 @@ int iscsi_target_rx_thread(void *arg)
u8 buffer[ISCSI_HDR_LEN], opcode;
u32 checksum = 0, digest = 0;
struct iscsi_conn *conn = NULL;
struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
struct iscsi_thread_set *ts = arg;
struct kvec iov;
/*
* Allow ourselves to be interrupted by SIGINT so that a

View File

@ -82,7 +82,7 @@ static void chap_gen_challenge(
unsigned int *c_len)
{
unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
struct iscsi_chap *chap = conn->auth_protocol;
memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
@ -120,7 +120,7 @@ static struct iscsi_chap *chap_server_open(
if (!conn->auth_protocol)
return NULL;
chap = (struct iscsi_chap *) conn->auth_protocol;
chap = conn->auth_protocol;
/*
* We only support MD5 MDA presently.
*/
@ -165,14 +165,15 @@ static int chap_server_compute_md5(
unsigned int *nr_out_len)
{
char *endptr;
unsigned char id, digest[MD5_SIGNATURE_SIZE];
unsigned long id;
unsigned char digest[MD5_SIGNATURE_SIZE];
unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
unsigned char identifier[10], *challenge = NULL;
unsigned char *challenge_binhex = NULL;
unsigned char client_digest[MD5_SIGNATURE_SIZE];
unsigned char server_digest[MD5_SIGNATURE_SIZE];
unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
struct iscsi_chap *chap = conn->auth_protocol;
struct crypto_hash *tfm;
struct hash_desc desc;
struct scatterlist sg;
@ -246,7 +247,7 @@ static int chap_server_compute_md5(
goto out;
}
sg_init_one(&sg, (void *)&chap->id, 1);
sg_init_one(&sg, &chap->id, 1);
ret = crypto_hash_update(&desc, &sg, 1);
if (ret < 0) {
pr_err("crypto_hash_update() failed for id\n");
@ -254,7 +255,7 @@ static int chap_server_compute_md5(
goto out;
}
sg_init_one(&sg, (void *)&auth->password, strlen(auth->password));
sg_init_one(&sg, &auth->password, strlen(auth->password));
ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
if (ret < 0) {
pr_err("crypto_hash_update() failed for password\n");
@ -262,7 +263,7 @@ static int chap_server_compute_md5(
goto out;
}
sg_init_one(&sg, (void *)chap->challenge, CHAP_CHALLENGE_LENGTH);
sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH);
ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
if (ret < 0) {
pr_err("crypto_hash_update() failed for challenge\n");
@ -305,14 +306,17 @@ static int chap_server_compute_md5(
}
if (type == HEX)
id = (unsigned char)simple_strtoul((char *)&identifier[2],
&endptr, 0);
id = simple_strtoul(&identifier[2], &endptr, 0);
else
id = (unsigned char)simple_strtoul(identifier, &endptr, 0);
id = simple_strtoul(identifier, &endptr, 0);
if (id > 255) {
pr_err("chap identifier: %lu greater than 255\n", id);
goto out;
}
/*
* RFC 1994 says Identifier is no more than octet (8 bits).
*/
pr_debug("[server] Got CHAP_I=%d\n", id);
pr_debug("[server] Got CHAP_I=%lu\n", id);
/*
* Get CHAP_C.
*/
@ -351,7 +355,7 @@ static int chap_server_compute_md5(
goto out;
}
sg_init_one(&sg, (void *)&id, 1);
sg_init_one(&sg, &id, 1);
ret = crypto_hash_update(&desc, &sg, 1);
if (ret < 0) {
pr_err("crypto_hash_update() failed for id\n");
@ -359,7 +363,7 @@ static int chap_server_compute_md5(
goto out;
}
sg_init_one(&sg, (void *)auth->password_mutual,
sg_init_one(&sg, auth->password_mutual,
strlen(auth->password_mutual));
ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
if (ret < 0) {
@ -371,7 +375,7 @@ static int chap_server_compute_md5(
/*
* Convert received challenge to binary hex.
*/
sg_init_one(&sg, (void *)challenge_binhex, challenge_len);
sg_init_one(&sg, challenge_binhex, challenge_len);
ret = crypto_hash_update(&desc, &sg, challenge_len);
if (ret < 0) {
pr_err("crypto_hash_update() failed for ma challenge\n");
@ -414,7 +418,7 @@ static int chap_got_response(
char *nr_out_ptr,
unsigned int *nr_out_len)
{
struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
struct iscsi_chap *chap = conn->auth_protocol;
switch (chap->digest_type) {
case CHAP_DIGEST_MD5:
@ -437,7 +441,7 @@ u32 chap_main_loop(
int *in_len,
int *out_len)
{
struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
struct iscsi_chap *chap = conn->auth_protocol;
if (!chap) {
chap = chap_server_open(conn, auth, in_text, out_text, out_len);

View File

@ -22,12 +22,8 @@
#include <linux/configfs.h>
#include <linux/export.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_fabric_lib.h>
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
@ -56,8 +52,7 @@ struct iscsi_portal_group *lio_get_tpg_from_tpg_item(
{
struct se_portal_group *se_tpg = container_of(to_config_group(item),
struct se_portal_group, tpg_group);
struct iscsi_portal_group *tpg =
(struct iscsi_portal_group *)se_tpg->se_tpg_fabric_ptr;
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
int ret;
if (!tpg) {
@ -1225,7 +1220,7 @@ struct se_portal_group *lio_target_tiqn_addtpg(
ret = core_tpg_register(
&lio_target_fabric_configfs->tf_ops,
wwn, &tpg->tpg_se_tpg, (void *)tpg,
wwn, &tpg->tpg_se_tpg, tpg,
TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0)
return NULL;

View File

@ -21,8 +21,7 @@
#include <scsi/scsi_device.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_device.h"

View File

@ -21,7 +21,7 @@
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_seq_pdu_list.h"

View File

@ -21,7 +21,7 @@
#include <linux/list.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_seq_pdu_list.h"

View File

@ -21,7 +21,7 @@
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_datain_values.h"

View File

@ -23,7 +23,7 @@
#include <linux/crypto.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_tq.h"
@ -143,7 +143,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
sess_list) {
sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
sess_p = se_sess->fabric_sess_ptr;
spin_lock(&sess_p->conn_lock);
if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
atomic_read(&sess_p->session_logout) ||
@ -151,9 +151,9 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
spin_unlock(&sess_p->conn_lock);
continue;
}
if (!memcmp((void *)sess_p->isid, (void *)conn->sess->isid, 6) &&
(!strcmp((void *)sess_p->sess_ops->InitiatorName,
(void *)initiatorname_param->value) &&
if (!memcmp(sess_p->isid, conn->sess->isid, 6) &&
(!strcmp(sess_p->sess_ops->InitiatorName,
initiatorname_param->value) &&
(sess_p->sess_ops->SessionType == sessiontype))) {
atomic_set(&sess_p->session_reinstatement, 1);
spin_unlock(&sess_p->conn_lock);
@ -229,7 +229,7 @@ static int iscsi_login_zero_tsih_s1(
iscsi_login_set_conn_values(sess, conn, pdu->cid);
sess->init_task_tag = pdu->itt;
memcpy((void *)&sess->isid, (void *)pdu->isid, 6);
memcpy(&sess->isid, pdu->isid, 6);
sess->exp_cmd_sn = pdu->cmdsn;
INIT_LIST_HEAD(&sess->sess_conn_list);
INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list);
@ -440,8 +440,7 @@ static int iscsi_login_non_zero_tsih_s2(
atomic_read(&sess_p->session_logout) ||
(sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
continue;
if (!memcmp((const void *)sess_p->isid,
(const void *)pdu->isid, 6) &&
if (!memcmp(sess_p->isid, pdu->isid, 6) &&
(sess_p->tsih == pdu->tsih)) {
iscsit_inc_session_usage_count(sess_p);
iscsit_stop_time2retain_timer(sess_p);
@ -654,7 +653,7 @@ static int iscsi_post_login_handler(
spin_lock_bh(&se_tpg->session_lock);
__transport_register_session(&sess->tpg->tpg_se_tpg,
se_sess->se_node_acl, se_sess, (void *)sess);
se_sess->se_node_acl, se_sess, sess);
pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
sess->session_state = TARG_SESS_STATE_LOGGED_IN;
@ -811,7 +810,7 @@ int iscsi_target_setup_login_socket(
* Setup the np->np_sockaddr from the passed sockaddr setup
* in iscsi_target_configfs.c code..
*/
memcpy((void *)&np->np_sockaddr, (void *)sockaddr,
memcpy(&np->np_sockaddr, sockaddr,
sizeof(struct __kernel_sockaddr_storage));
if (sockaddr->ss_family == AF_INET6)
@ -821,6 +820,7 @@ int iscsi_target_setup_login_socket(
/*
* Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
*/
/* FIXME: Someone please explain why this is endian-safe */
opt = 1;
if (np->np_network_transport == ISCSI_TCP) {
ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
@ -832,6 +832,7 @@ int iscsi_target_setup_login_socket(
}
}
/* FIXME: Someone please explain why this is endian-safe */
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
(char *)&opt, sizeof(opt));
if (ret < 0) {
@ -1206,7 +1207,7 @@ out:
int iscsi_target_login_thread(void *arg)
{
struct iscsi_np *np = (struct iscsi_np *)arg;
struct iscsi_np *np = arg;
int ret;
allow_signal(SIGINT);

View File

@ -21,7 +21,7 @@
#include <linux/ctype.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_tpg.h>
#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_parameters.h"
@ -732,7 +732,7 @@ static void iscsi_initiatorname_tolower(
u32 iqn_size = strlen(param_buf), i;
for (i = 0; i < iqn_size; i++) {
c = (char *)&param_buf[i];
c = &param_buf[i];
if (!isupper(*c))
continue;

View File

@ -19,7 +19,6 @@
******************************************************************************/
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include "iscsi_target_core.h"
#include "iscsi_target_device.h"
@ -135,7 +134,7 @@ extern int iscsit_na_nopin_timeout(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
sess = se_sess->fabric_sess_ptr;
spin_lock(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list,

View File

@ -23,7 +23,6 @@
#include <linux/export.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/configfs_macros.h>
#include "iscsi_target_core.h"
@ -746,7 +745,7 @@ static ssize_t iscsi_stat_sess_show_attr_node(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n",
sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX);
@ -770,7 +769,7 @@ static ssize_t iscsi_stat_sess_show_attr_indx(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n",
sess->session_index);
@ -794,7 +793,7 @@ static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
}
@ -817,7 +816,7 @@ static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
}
@ -840,7 +839,7 @@ static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%llu\n",
(unsigned long long)sess->tx_data_octets);
@ -864,7 +863,7 @@ static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%llu\n",
(unsigned long long)sess->rx_data_octets);
@ -888,7 +887,7 @@ static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n",
sess->conn_digest_errors);
@ -912,7 +911,7 @@ static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n",
sess->conn_timeout_errors);

View File

@ -21,7 +21,7 @@
#include <asm/unaligned.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_seq_pdu_list.h"

View File

@ -19,10 +19,8 @@
******************************************************************************/
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include <target/target_core_tpg.h>
#include "iscsi_target_core.h"
#include "iscsi_target_erl0.h"
@ -72,7 +70,7 @@ int iscsit_load_discovery_tpg(void)
ret = core_tpg_register(
&lio_target_fabric_configfs->tf_ops,
NULL, &tpg->tpg_se_tpg, (void *)tpg,
NULL, &tpg->tpg_se_tpg, tpg,
TRANSPORT_TPG_TYPE_DISCOVERY);
if (ret < 0) {
kfree(tpg);

View File

@ -22,9 +22,7 @@
#include <scsi/scsi_tcq.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/target_core_tmr.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include "iscsi_target_core.h"
@ -289,7 +287,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
}
se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
(void *)cmd->tmr_req, tcm_function,
cmd->tmr_req, tcm_function,
GFP_KERNEL);
if (!se_cmd->se_tmr_req)
goto out;
@ -1066,7 +1064,7 @@ static void iscsit_handle_nopin_response_timeout(unsigned long data)
if (tiqn) {
spin_lock_bh(&tiqn->sess_err_stats.lock);
strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
(void *)conn->sess->sess_ops->InitiatorName);
conn->sess->sess_ops->InitiatorName);
tiqn->sess_err_stats.last_sess_failure_type =
ISCSI_SESS_ERR_CXN_TIMEOUT;
tiqn->sess_err_stats.cxn_timeout_errors++;

View File

@ -33,14 +33,9 @@
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_fabric_lib.h>
#include <target/target_core_configfs.h>
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_tmr.h>
#include "tcm_loop.h"
@ -421,11 +416,11 @@ static struct scsi_host_template tcm_loop_driver_template = {
.queuecommand = tcm_loop_queuecommand,
.change_queue_depth = tcm_loop_change_queue_depth,
.eh_device_reset_handler = tcm_loop_device_reset,
.can_queue = TL_SCSI_CAN_QUEUE,
.can_queue = 1024,
.this_id = -1,
.sg_tablesize = TL_SCSI_SG_TABLESIZE,
.cmd_per_lun = TL_SCSI_CMD_PER_LUN,
.max_sectors = TL_SCSI_MAX_SECTORS,
.sg_tablesize = 256,
.cmd_per_lun = 1024,
.max_sectors = 0xFFFF,
.use_clustering = DISABLE_CLUSTERING,
.slave_alloc = tcm_loop_slave_alloc,
.slave_configure = tcm_loop_slave_configure,
@ -564,8 +559,7 @@ static char *tcm_loop_get_fabric_name(void)
static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
{
struct tcm_loop_tpg *tl_tpg =
(struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
/*
* tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
@ -592,8 +586,7 @@ static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
{
struct tcm_loop_tpg *tl_tpg =
(struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
/*
* Return the passed NAA identifier for the SAS Target Port
*/
@ -602,8 +595,7 @@ static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
{
struct tcm_loop_tpg *tl_tpg =
(struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
/*
* This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
* to represent the SCSI Target Port.
@ -623,8 +615,7 @@ static u32 tcm_loop_get_pr_transport_id(
int *format_code,
unsigned char *buf)
{
struct tcm_loop_tpg *tl_tpg =
(struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
switch (tl_hba->tl_proto_id) {
@ -653,8 +644,7 @@ static u32 tcm_loop_get_pr_transport_id_len(
struct t10_pr_registration *pr_reg,
int *format_code)
{
struct tcm_loop_tpg *tl_tpg =
(struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
switch (tl_hba->tl_proto_id) {
@ -687,8 +677,7 @@ static char *tcm_loop_parse_pr_out_transport_id(
u32 *out_tid_len,
char **port_nexus_ptr)
{
struct tcm_loop_tpg *tl_tpg =
(struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
switch (tl_hba->tl_proto_id) {

View File

@ -1,16 +1,7 @@
#define TCM_LOOP_VERSION "v2.1-rc1"
#define TL_WWN_ADDR_LEN 256
#define TL_TPGS_PER_HBA 32
/*
* Defaults for struct scsi_host_template tcm_loop_driver_template
*
* We use large can_queue and cmd_per_lun here and let TCM enforce
* the underlying se_device_t->queue_depth.
*/
#define TL_SCSI_CAN_QUEUE 1024
#define TL_SCSI_CMD_PER_LUN 1024
#define TL_SCSI_MAX_SECTORS 1024
#define TL_SCSI_SG_TABLESIZE 256
/*
* Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len
*/

View File

@ -32,13 +32,12 @@
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_hba.h"
#include "target_core_ua.h"
static int core_alua_check_transition(int state, int *primary);

View File

@ -29,10 +29,11 @@
#include <scsi/scsi.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_ua.h"
#include "target_core_cdb.h"
static void
target_fill_alua_data(struct se_port *port, unsigned char *buf)
@ -93,6 +94,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
}
buf[2] = dev->transport->get_device_rev(dev);
/*
* NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
*
* SPC4 says:
* A RESPONSE DATA FORMAT field set to 2h indicates that the
* standard INQUIRY data is in the format defined in this
* standard. Response data format values less than 2h are
* obsolete. Response data format values greater than 2h are
* reserved.
*/
buf[3] = 2;
/*
* Enable SCCS and TPGS fields for Emulated ALUA
*/
@ -115,11 +128,9 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
goto out;
}
snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
snprintf((unsigned char *)&buf[16], 16, "%s",
&dev->se_sub_dev->t10_wwn.model[0]);
snprintf((unsigned char *)&buf[32], 4, "%s",
&dev->se_sub_dev->t10_wwn.revision[0]);
snprintf(&buf[8], 8, "LIO-ORG");
snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model);
snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision);
buf[4] = 31; /* Set additional length to 31 */
out:
@ -138,8 +149,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
SDF_EMULATED_VPD_UNIT_SERIAL) {
u32 unit_serial_len;
unit_serial_len =
strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial);
unit_serial_len++; /* For NULL Terminator */
if (((len + 4) + unit_serial_len) > cmd->data_length) {
@ -148,8 +158,8 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
buf[3] = (len & 0xff);
return 0;
}
len += sprintf((unsigned char *)&buf[4], "%s",
&dev->se_sub_dev->t10_wwn.unit_serial[0]);
len += sprintf(&buf[4], "%s",
dev->se_sub_dev->t10_wwn.unit_serial);
len++; /* Extra Byte for NULL Terminator */
buf[3] = len;
}
@ -279,14 +289,13 @@ check_t10_vend_desc:
len += (prod_len + unit_serial_len);
goto check_port;
}
id_len += sprintf((unsigned char *)&buf[off+12],
"%s:%s", prod,
id_len += sprintf(&buf[off+12], "%s:%s", prod,
&dev->se_sub_dev->t10_wwn.unit_serial[0]);
}
buf[off] = 0x2; /* ASCII */
buf[off+1] = 0x1; /* T10 Vendor ID */
buf[off+2] = 0x0;
memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8);
memcpy(&buf[off+4], "LIO-ORG", 8);
/* Extra Byte for NULL Terminator */
id_len++;
/* Identifier Length */

View File

@ -1,14 +0,0 @@
#ifndef TARGET_CORE_CDB_H
#define TARGET_CORE_CDB_H
int target_emulate_inquiry(struct se_task *task);
int target_emulate_readcapacity(struct se_task *task);
int target_emulate_readcapacity_16(struct se_task *task);
int target_emulate_modesense(struct se_task *task);
int target_emulate_request_sense(struct se_task *task);
int target_emulate_unmap(struct se_task *task);
int target_emulate_write_same(struct se_task *task);
int target_emulate_synchronize_cache(struct se_task *task);
int target_emulate_noop(struct se_task *task);
#endif /* TARGET_CORE_CDB_H */

View File

@ -39,18 +39,16 @@
#include <linux/spinlock.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_rd.h"
#include "target_core_stat.h"
extern struct t10_alua_lu_gp *default_lu_gp;
@ -1452,7 +1450,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
return -ENOMEM;
orig = opts;
while ((ptr = strsep(&opts, ",")) != NULL) {
while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
@ -1631,7 +1629,7 @@ static struct config_item_type target_core_dev_pr_cit = {
static ssize_t target_core_show_dev_info(void *p, char *page)
{
struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
struct se_subsystem_dev *se_dev = p;
struct se_hba *hba = se_dev->se_dev_hba;
struct se_subsystem_api *t = hba->transport;
int bl = 0;
@ -1659,7 +1657,7 @@ static ssize_t target_core_store_dev_control(
const char *page,
size_t count)
{
struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
struct se_subsystem_dev *se_dev = p;
struct se_hba *hba = se_dev->se_dev_hba;
struct se_subsystem_api *t = hba->transport;
@ -1682,7 +1680,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_control = {
static ssize_t target_core_show_dev_alias(void *p, char *page)
{
struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
struct se_subsystem_dev *se_dev = p;
if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
return 0;
@ -1695,7 +1693,7 @@ static ssize_t target_core_store_dev_alias(
const char *page,
size_t count)
{
struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
struct se_subsystem_dev *se_dev = p;
struct se_hba *hba = se_dev->se_dev_hba;
ssize_t read_bytes;
@ -1710,6 +1708,9 @@ static ssize_t target_core_store_dev_alias(
read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
"%s", page);
if (se_dev->se_dev_alias[read_bytes - 1] == '\n')
se_dev->se_dev_alias[read_bytes - 1] = '\0';
pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&se_dev->se_dev_group.cg_item),
@ -1728,7 +1729,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_alias = {
static ssize_t target_core_show_dev_udev_path(void *p, char *page)
{
struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
struct se_subsystem_dev *se_dev = p;
if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
return 0;
@ -1741,7 +1742,7 @@ static ssize_t target_core_store_dev_udev_path(
const char *page,
size_t count)
{
struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
struct se_subsystem_dev *se_dev = p;
struct se_hba *hba = se_dev->se_dev_hba;
ssize_t read_bytes;
@ -1756,6 +1757,9 @@ static ssize_t target_core_store_dev_udev_path(
read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
"%s", page);
if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n')
se_dev->se_dev_udev_path[read_bytes - 1] = '\0';
pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&se_dev->se_dev_group.cg_item),
@ -1777,7 +1781,7 @@ static ssize_t target_core_store_dev_enable(
const char *page,
size_t count)
{
struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
struct se_subsystem_dev *se_dev = p;
struct se_device *dev;
struct se_hba *hba = se_dev->se_dev_hba;
struct se_subsystem_api *t = hba->transport;
@ -1822,7 +1826,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_enable = {
static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
{
struct se_device *dev;
struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
struct se_subsystem_dev *su_dev = p;
struct config_item *lu_ci;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem;
@ -1860,7 +1864,7 @@ static ssize_t target_core_store_alua_lu_gp(
size_t count)
{
struct se_device *dev;
struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
struct se_subsystem_dev *su_dev = p;
struct se_hba *hba = su_dev->se_dev_hba;
struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;

View File

@ -42,13 +42,11 @@
#include <scsi/scsi_device.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
@ -1134,8 +1132,6 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
*/
int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
{
u32 orig_queue_depth = dev->queue_depth;
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
pr_err("dev[%p]: Unable to change SE Device TCQ while"
" dev_export_obj: %d count exists\n", dev,
@ -1169,11 +1165,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
}
dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
if (queue_depth > orig_queue_depth)
atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
else if (queue_depth < orig_queue_depth)
atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
dev, queue_depth);
return 0;

View File

@ -36,18 +36,14 @@
#include <linux/configfs.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_stat.h"
#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \

View File

@ -34,13 +34,10 @@
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_lib.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include "target_core_hba.h"
#include "target_core_internal.h"
#include "target_core_pr.h"
/*
@ -402,7 +399,7 @@ char *iscsi_parse_pr_out_transport_id(
add_len = ((buf[2] >> 8) & 0xff);
add_len |= (buf[3] & 0xff);
tid_len = strlen((char *)&buf[4]);
tid_len = strlen(&buf[4]);
tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
tid_len += 1; /* Add one byte for NULL terminator */
padding = ((-tid_len) & 3);
@ -423,11 +420,11 @@ char *iscsi_parse_pr_out_transport_id(
* format.
*/
if (format_code == 0x40) {
p = strstr((char *)&buf[4], ",i,0x");
p = strstr(&buf[4], ",i,0x");
if (!p) {
pr_err("Unable to locate \",i,0x\" seperator"
" for Initiator port identifier: %s\n",
(char *)&buf[4]);
&buf[4]);
return NULL;
}
*p = '\0'; /* Terminate iSCSI Name */

View File

@ -37,8 +37,7 @@
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include <target/target_core_backend.h>
#include "target_core_file.h"
@ -86,7 +85,7 @@ static void fd_detach_hba(struct se_hba *hba)
static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
{
struct fd_dev *fd_dev;
struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
struct fd_host *fd_host = hba->hba_ptr;
fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
if (!fd_dev) {
@ -114,8 +113,8 @@ static struct se_device *fd_create_virtdevice(
struct se_device *dev;
struct se_dev_limits dev_limits;
struct queue_limits *limits;
struct fd_dev *fd_dev = (struct fd_dev *) p;
struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
struct fd_dev *fd_dev = p;
struct fd_host *fd_host = hba->hba_ptr;
mm_segment_t old_fs;
struct file *file;
struct inode *inode = NULL;
@ -240,7 +239,7 @@ fail:
*/
static void fd_free_device(void *p)
{
struct fd_dev *fd_dev = (struct fd_dev *) p;
struct fd_dev *fd_dev = p;
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
@ -498,7 +497,7 @@ static ssize_t fd_set_configfs_dev_params(
orig = opts;
while ((ptr = strsep(&opts, ",")) != NULL) {
while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
@ -559,7 +558,7 @@ out:
static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
{
struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
pr_err("Missing fd_dev_name=\n");

View File

@ -37,11 +37,10 @@
#include <net/tcp.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_transport.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_hba.h"
#include "target_core_internal.h"
static LIST_HEAD(subsystem_list);
static DEFINE_MUTEX(subsystem_mutex);

View File

@ -1,7 +0,0 @@
#ifndef TARGET_CORE_HBA_H
#define TARGET_CORE_HBA_H
extern struct se_hba *core_alloc_hba(const char *, u32, u32);
extern int core_delete_hba(struct se_hba *);
#endif /* TARGET_CORE_HBA_H */

View File

@ -42,8 +42,7 @@
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include <target/target_core_backend.h>
#include "target_core_iblock.h"
@ -391,7 +390,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
orig = opts;
while ((ptr = strsep(&opts, ",")) != NULL) {
while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
@ -465,7 +464,7 @@ static ssize_t iblock_show_configfs_dev_params(
if (bd) {
bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
"" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
"" : (bd->bd_holder == ibd) ?
"CLAIMED: IBLOCK" : "CLAIMED: OS");
} else {
bl += sprintf(b + bl, "Major: 0 Minor: 0\n");

View File

@ -0,0 +1,123 @@
#ifndef TARGET_CORE_INTERNAL_H
#define TARGET_CORE_INTERNAL_H
/* target_core_alua.c */
extern struct t10_alua_lu_gp *default_lu_gp;
/* target_core_cdb.c */
int target_emulate_inquiry(struct se_task *task);
int target_emulate_readcapacity(struct se_task *task);
int target_emulate_readcapacity_16(struct se_task *task);
int target_emulate_modesense(struct se_task *task);
int target_emulate_request_sense(struct se_task *task);
int target_emulate_unmap(struct se_task *task);
int target_emulate_write_same(struct se_task *task);
int target_emulate_synchronize_cache(struct se_task *task);
int target_emulate_noop(struct se_task *task);
/* target_core_device.c */
struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
int core_free_device_list_for_node(struct se_node_acl *,
struct se_portal_group *);
void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
void core_update_device_list_access(u32, u32, struct se_node_acl *);
int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *,
u32, u32, struct se_node_acl *, struct se_portal_group *, int);
void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *);
void core_dev_unexport(struct se_device *, struct se_portal_group *,
struct se_lun *);
int target_report_luns(struct se_task *);
void se_release_device_for_hba(struct se_device *);
void se_release_vpd_for_dev(struct se_device *);
int se_free_virtual_device(struct se_device *, struct se_hba *);
int se_dev_check_online(struct se_device *);
int se_dev_check_shutdown(struct se_device *);
void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
int se_dev_set_task_timeout(struct se_device *, u32);
int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
int se_dev_set_unmap_granularity(struct se_device *, u32);
int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
int se_dev_set_emulate_dpo(struct se_device *, int);
int se_dev_set_emulate_fua_write(struct se_device *, int);
int se_dev_set_emulate_fua_read(struct se_device *, int);
int se_dev_set_emulate_write_cache(struct se_device *, int);
int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
int se_dev_set_emulate_tas(struct se_device *, int);
int se_dev_set_emulate_tpu(struct se_device *, int);
int se_dev_set_emulate_tpws(struct se_device *, int);
int se_dev_set_enforce_pr_isids(struct se_device *, int);
int se_dev_set_is_nonrot(struct se_device *, int);
int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
int se_dev_set_queue_depth(struct se_device *, u32);
int se_dev_set_max_sectors(struct se_device *, u32);
int se_dev_set_optimal_sectors(struct se_device *, u32);
int se_dev_set_block_size(struct se_device *, u32);
struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
struct se_device *, u32);
int core_dev_del_lun(struct se_portal_group *, u32);
struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
u32, char *, int *);
int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *, u32, u32);
int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun *, struct se_lun_acl *);
void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *lacl);
int core_dev_setup_virtual_lun0(void);
void core_dev_release_virtual_lun0(void);
/* target_core_hba.c */
struct se_hba *core_alloc_hba(const char *, u32, u32);
int core_delete_hba(struct se_hba *);
/* target_core_tmr.c */
int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
struct list_head *, struct se_cmd *);
/* target_core_tpg.c */
extern struct se_device *g_lun0_dev;
struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
const char *);
struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
unsigned char *);
void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *,
u32, void *);
struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32, int *);
int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
/* target_core_transport.c */
extern struct kmem_cache *se_tmr_req_cache;
int init_se_kmem_caches(void);
void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void);
void transport_cmd_finish_abort(struct se_cmd *, int);
void __transport_remove_task_from_execute_queue(struct se_task *,
struct se_device *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *,
unsigned long long, char *, int *);
void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
bool target_stop_task(struct se_task *task, unsigned long *flags);
int transport_clear_lun_from_sessions(struct se_lun *);
void transport_send_task_abort(struct se_cmd *);
/* target_core_stat.c */
void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
void target_stat_setup_port_default_groups(struct se_lun *);
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
#endif /* TARGET_CORE_INTERNAL_H */

View File

@ -33,14 +33,11 @@
#include <asm/unaligned.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_tmr.h>
#include <target/target_core_tpg.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include "target_core_hba.h"
#include "target_core_internal.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
@ -2984,21 +2981,6 @@ static void core_scsi3_release_preempt_and_abort(
}
}
int core_scsi3_check_cdb_abort_and_preempt(
struct list_head *preempt_and_abort_list,
struct se_cmd *cmd)
{
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
pr_reg_abort_list) {
if (pr_reg->pr_res_key == cmd->pr_res_key)
return 0;
}
return 1;
}
static int core_scsi3_pro_preempt(
struct se_cmd *cmd,
int type,

View File

@ -60,8 +60,6 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
struct se_node_acl *);
extern void core_scsi3_free_all_registrations(struct se_device *);
extern unsigned char *core_scsi3_pr_dump_type(int);
extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
struct se_cmd *);
extern int target_scsi3_emulate_pr_in(struct se_task *task);
extern int target_scsi3_emulate_pr_out(struct se_task *task);

View File

@ -44,8 +44,7 @@
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include <target/target_core_backend.h>
#include "target_core_pscsi.h"
@ -105,7 +104,7 @@ static void pscsi_detach_hba(struct se_hba *hba)
static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
{
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
struct pscsi_hba_virt *phv = hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host;
/*
* Release the struct Scsi_Host
@ -351,7 +350,6 @@ static struct se_device *pscsi_add_device_to_list(
* scsi_device_put() and the pdv->pdv_sd cleared.
*/
pdv->pdv_sd = sd;
dev = transport_add_device_to_core_hba(hba, &pscsi_template,
se_dev, dev_flags, pdv,
&dev_limits, NULL, NULL);
@ -406,7 +404,7 @@ static struct se_device *pscsi_create_type_disk(
__releases(sh->host_lock)
{
struct se_device *dev;
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
struct block_device *bd;
u32 dev_flags = 0;
@ -454,7 +452,7 @@ static struct se_device *pscsi_create_type_rom(
__releases(sh->host_lock)
{
struct se_device *dev;
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
u32 dev_flags = 0;
@ -489,7 +487,7 @@ static struct se_device *pscsi_create_type_other(
__releases(sh->host_lock)
{
struct se_device *dev;
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
u32 dev_flags = 0;
@ -510,10 +508,10 @@ static struct se_device *pscsi_create_virtdevice(
struct se_subsystem_dev *se_dev,
void *p)
{
struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p;
struct pscsi_dev_virt *pdv = p;
struct se_device *dev;
struct scsi_device *sd;
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
struct pscsi_hba_virt *phv = hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host;
int legacy_mode_enable = 0;
@ -818,7 +816,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
orig = opts;
while ((ptr = strsep(&opts, ",")) != NULL) {
while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
@ -1144,7 +1142,7 @@ static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
{
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
return (unsigned char *)&pt->pscsi_sense[0];
return pt->pscsi_sense;
}
/* pscsi_get_device_rev():

View File

@ -37,9 +37,7 @@
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_backend.h>
#include "target_core_rd.h"
@ -474,7 +472,7 @@ static ssize_t rd_set_configfs_dev_params(
orig = opts;
while ((ptr = strsep(&opts, ",")) != NULL) {
while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;

View File

@ -43,12 +43,12 @@
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "target_core_hba.h"
#include "target_core_internal.h"
#ifndef INITIAL_JIFFIES
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
@ -1755,8 +1755,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(
/* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
memset(buf, 0, 64);
if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL)
tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
(unsigned char *)&buf[0], 64);
tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, buf, 64);
ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
spin_unlock_irq(&nacl->nacl_sess_lock);

View File

@ -1,8 +0,0 @@
#ifndef TARGET_CORE_STAT_H
#define TARGET_CORE_STAT_H
extern void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
extern void target_stat_setup_port_default_groups(struct se_lun *);
extern void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
#endif /*** TARGET_CORE_STAT_H ***/

View File

@ -32,12 +32,11 @@
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_tmr.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
@ -101,6 +100,21 @@ static void core_tmr_handle_tas_abort(
transport_cmd_finish_abort(cmd, 0);
}
static int target_check_cdb_and_preempt(struct list_head *list,
struct se_cmd *cmd)
{
struct t10_pr_registration *reg;
if (!list)
return 0;
list_for_each_entry(reg, list, pr_reg_abort_list) {
if (reg->pr_res_key == cmd->pr_res_key)
return 0;
}
return 1;
}
static void core_tmr_drain_tmr_list(
struct se_device *dev,
struct se_tmr_req *tmr,
@ -132,9 +146,7 @@ static void core_tmr_drain_tmr_list(
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action
* skip non regisration key matching TMRs.
*/
if (preempt_and_abort_list &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
spin_lock(&cmd->t_state_lock);
@ -211,9 +223,7 @@ static void core_tmr_drain_task_list(
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
if (preempt_and_abort_list &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..
@ -222,7 +232,7 @@ static void core_tmr_drain_task_list(
continue;
list_move_tail(&task->t_state_list, &drain_task_list);
atomic_set(&task->task_state_active, 0);
task->t_state_active = false;
/*
* Remove from task execute list before processing drain_task_list
*/
@ -321,9 +331,7 @@ static void core_tmr_drain_cmd_list(
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
if (preempt_and_abort_list &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..

View File

@ -39,13 +39,10 @@
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_hba.h"
#include "target_core_stat.h"
#include "target_core_internal.h"
extern struct se_device *g_lun0_dev;

View File

@ -45,16 +45,12 @@
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_tmr.h>
#include <target/target_core_tpg.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_cdb.h"
#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
@ -72,7 +68,7 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
static int transport_generic_write_pending(struct se_cmd *);
static int transport_processing_thread(void *param);
static int __transport_execute_tasks(struct se_device *dev);
static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
static void transport_complete_task_attr(struct se_cmd *cmd);
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev);
@ -212,14 +208,13 @@ u32 scsi_get_new_index(scsi_index_t type)
return new_index;
}
void transport_init_queue_obj(struct se_queue_obj *qobj)
static void transport_init_queue_obj(struct se_queue_obj *qobj)
{
atomic_set(&qobj->queue_cnt, 0);
INIT_LIST_HEAD(&qobj->qobj_list);
init_waitqueue_head(&qobj->thread_wq);
spin_lock_init(&qobj->cmd_queue_lock);
}
EXPORT_SYMBOL(transport_init_queue_obj);
void transport_subsystem_check_init(void)
{
@ -426,18 +421,18 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
if (task->task_flags & TF_ACTIVE)
continue;
if (!atomic_read(&task->task_state_active))
continue;
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_del(&task->t_state_list);
pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
cmd->se_tfo->get_task_tag(cmd), dev, task);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
if (task->t_state_active) {
pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
cmd->se_tfo->get_task_tag(cmd), dev, task);
atomic_set(&task->task_state_active, 0);
atomic_dec(&cmd->t_task_cdbs_ex_left);
list_del(&task->t_state_list);
atomic_dec(&cmd->t_task_cdbs_ex_left);
task->t_state_active = false;
}
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
}
/* transport_cmd_check_stop():
@ -696,12 +691,6 @@ void transport_complete_task(struct se_task *task, int success)
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
unsigned long flags;
#if 0
pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
cmd->t_task_cdb[0], dev);
#endif
if (dev)
atomic_inc(&dev->depth_left);
spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags &= ~TF_ACTIVE;
@ -714,7 +703,7 @@ void transport_complete_task(struct se_task *task, int success)
if (dev && dev->transport->transport_complete) {
if (dev->transport->transport_complete(task) != 0) {
cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
task->task_sense = 1;
task->task_flags |= TF_HAS_SENSE;
success = 1;
}
}
@ -743,13 +732,7 @@ void transport_complete_task(struct se_task *task, int success)
}
if (cmd->t_tasks_failed) {
if (!task->task_error_status) {
task->task_error_status =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
atomic_set(&cmd->t_transport_complete, 1);
@ -824,7 +807,7 @@ static void __transport_add_task_to_execute_queue(
head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
atomic_inc(&dev->execute_tasks);
if (atomic_read(&task->task_state_active))
if (task->t_state_active)
return;
/*
* Determine if this task needs to go to HEAD_OF_QUEUE for the
@ -838,7 +821,7 @@ static void __transport_add_task_to_execute_queue(
else
list_add_tail(&task->t_state_list, &dev->state_task_list);
atomic_set(&task->task_state_active, 1);
task->t_state_active = true;
pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
@ -853,29 +836,26 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry(task, &cmd->t_task_list, t_list) {
if (atomic_read(&task->task_state_active))
continue;
spin_lock(&dev->execute_task_lock);
list_add_tail(&task->t_state_list, &dev->state_task_list);
atomic_set(&task->task_state_active, 1);
pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
task->task_se_cmd->se_tfo->get_task_tag(
task->task_se_cmd), task, dev);
if (!task->t_state_active) {
list_add_tail(&task->t_state_list,
&dev->state_task_list);
task->t_state_active = true;
pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
task->task_se_cmd->se_tfo->get_task_tag(
task->task_se_cmd), task, dev);
}
spin_unlock(&dev->execute_task_lock);
}
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_task *task, *task_prev = NULL;
unsigned long flags;
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_for_each_entry(task, &cmd->t_task_list, t_list) {
if (!list_empty(&task->t_execute_list))
continue;
@ -886,6 +866,15 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
__transport_add_task_to_execute_queue(task, task_prev, dev);
task_prev = task;
}
}
static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
{
unsigned long flags;
struct se_device *dev = cmd->se_dev;
spin_lock_irqsave(&dev->execute_task_lock, flags);
__transport_add_tasks_from_cmd(cmd);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
@ -896,7 +885,7 @@ void __transport_remove_task_from_execute_queue(struct se_task *task,
atomic_dec(&dev->execute_tasks);
}
void transport_remove_task_from_execute_queue(
static void transport_remove_task_from_execute_queue(
struct se_task *task,
struct se_device *dev)
{
@ -983,9 +972,8 @@ void transport_dump_dev_state(
break;
}
*bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d",
atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
dev->queue_depth);
*bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d",
atomic_read(&dev->execute_tasks), dev->queue_depth);
*bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
*bl += sprintf(b + *bl, " ");
@ -1340,9 +1328,6 @@ struct se_device *transport_add_device_to_core_hba(
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
dev->queue_depth = dev_limits->queue_depth;
atomic_set(&dev->depth_left, dev->queue_depth);
atomic_set(&dev->dev_ordered_id, 0);
se_dev_set_default_attribs(dev, dev_limits);
@ -1654,6 +1639,80 @@ int transport_handle_cdb_direct(
}
EXPORT_SYMBOL(transport_handle_cdb_direct);
/**
* target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
*
* @se_cmd: command descriptor to submit
* @se_sess: associated se_sess for endpoint
* @cdb: pointer to SCSI CDB
* @sense: pointer to SCSI sense buffer
* @unpacked_lun: unpacked LUN to reference for struct se_lun
* @data_length: fabric expected data transfer length
* @task_addr: SAM task attribute
* @data_dir: DMA data direction
* @flags: flags for command submission from target_sc_flags_tables
*
* This may only be called from process context, and also currently
* assumes internal allocation of fabric payload buffer by target-core.
**/
int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
u32 data_length, int task_attr, int data_dir, int flags)
{
struct se_portal_group *se_tpg;
int rc;
se_tpg = se_sess->se_tpg;
BUG_ON(!se_tpg);
BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
BUG_ON(in_interrupt());
/*
* Initialize se_cmd for target operation. From this point
* exceptions are handled by sending exception status via
* target_core_fabric_ops->queue_status() callback
*/
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
data_length, data_dir, task_attr, sense);
/*
* Obtain struct se_cmd->cmd_kref reference and add new cmd to
* se_sess->sess_cmd_list. A second kref_get here is necessary
* for fabrics using TARGET_SCF_ACK_KREF that expect a second
* kref_put() to happen during fabric packet acknowledgement.
*/
target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
/*
* Signal bidirectional data payloads to target-core
*/
if (flags & TARGET_SCF_BIDI_OP)
se_cmd->se_cmd_flags |= SCF_BIDI;
/*
* Locate se_lun pointer and attach it to struct se_cmd
*/
if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0)
goto out_check_cond;
/*
* Sanitize CDBs via transport_generic_cmd_sequencer() and
* allocate the necessary tasks to complete the received CDB+data
*/
rc = transport_generic_allocate_tasks(se_cmd, cdb);
if (rc != 0)
goto out_check_cond;
/*
* Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
* for immediate execution of READs, otherwise wait for
* transport_generic_handle_data() to be called for WRITEs
* when fabric has filled the incoming buffer.
*/
transport_handle_cdb_direct(se_cmd);
return 0;
out_check_cond:
transport_send_check_condition_and_sense(se_cmd,
se_cmd->scsi_sense_reason, 0);
return 0;
}
EXPORT_SYMBOL(target_submit_cmd);
/*
* Used by fabric module frontends defining a TFO->new_cmd_map() caller
* to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
@ -1920,18 +1979,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
static inline int transport_tcq_window_closed(struct se_device *dev)
{
if (dev->dev_tcq_window_closed++ <
PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
} else
msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
return 0;
}
/*
* Called from Fabric Module context from transport_execute_tasks()
*
@ -2014,13 +2061,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
static int transport_execute_tasks(struct se_cmd *cmd)
{
int add_tasks;
if (se_dev_check_online(cmd->se_dev) != 0) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
transport_generic_request_failure(cmd);
return 0;
}
struct se_device *se_dev = cmd->se_dev;
/*
* Call transport_cmd_check_stop() to see if a fabric exception
* has occurred that prevents execution.
@ -2034,19 +2075,16 @@ static int transport_execute_tasks(struct se_cmd *cmd)
if (!add_tasks)
goto execute_tasks;
/*
* This calls transport_add_tasks_from_cmd() to handle
* HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
* (if enabled) in __transport_add_task_to_execute_queue() and
* transport_add_task_check_sam_attr().
* __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
* adds associated se_tasks while holding dev->execute_task_lock
* before I/O dispath to avoid a double spinlock access.
*/
transport_add_tasks_from_cmd(cmd);
__transport_execute_tasks(se_dev, cmd);
return 0;
}
/*
* Kick the execution queue for the cmd associated struct se_device
* storage object.
*/
execute_tasks:
__transport_execute_tasks(cmd->se_dev);
__transport_execute_tasks(se_dev, NULL);
return 0;
}
@ -2056,24 +2094,18 @@ execute_tasks:
*
* Called from transport_processing_thread()
*/
static int __transport_execute_tasks(struct se_device *dev)
static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
{
int error;
struct se_cmd *cmd = NULL;
struct se_task *task = NULL;
unsigned long flags;
/*
* Check if there is enough room in the device and HBA queue to send
* struct se_tasks to the selected transport.
*/
check_depth:
if (!atomic_read(&dev->depth_left))
return transport_tcq_window_closed(dev);
dev->dev_tcq_window_closed = 0;
spin_lock_irq(&dev->execute_task_lock);
if (new_cmd != NULL)
__transport_add_tasks_from_cmd(new_cmd);
if (list_empty(&dev->execute_task_list)) {
spin_unlock_irq(&dev->execute_task_lock);
return 0;
@ -2083,10 +2115,7 @@ check_depth:
__transport_remove_task_from_execute_queue(task, dev);
spin_unlock_irq(&dev->execute_task_lock);
atomic_dec(&dev->depth_left);
cmd = task->task_se_cmd;
spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags |= (TF_ACTIVE | TF_SENT);
atomic_inc(&cmd->t_task_cdbs_sent);
@ -2107,10 +2136,10 @@ check_depth:
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
atomic_set(&cmd->t_transport_sent, 0);
transport_stop_tasks_for_cmd(cmd);
atomic_inc(&dev->depth_left);
transport_generic_request_failure(cmd);
}
new_cmd = NULL;
goto check_depth;
return 0;
@ -2351,7 +2380,7 @@ static int transport_get_sense_data(struct se_cmd *cmd)
list_for_each_entry_safe(task, task_tmp,
&cmd->t_task_list, t_list) {
if (!task->task_sense)
if (!(task->task_flags & TF_HAS_SENSE))
continue;
if (!dev->transport->get_sense_buffer) {
@ -3345,6 +3374,32 @@ static inline void transport_free_pages(struct se_cmd *cmd)
cmd->t_bidi_data_nents = 0;
}
/**
* transport_release_cmd - free a command
* @cmd: command to free
*
* This routine unconditionally frees a command, and reference counting
* or list removal must be done in the caller.
*/
static void transport_release_cmd(struct se_cmd *cmd)
{
BUG_ON(!cmd->se_tfo);
if (cmd->se_tmr_req)
core_tmr_release_req(cmd->se_tmr_req);
if (cmd->t_task_cdb != cmd->__t_task_cdb)
kfree(cmd->t_task_cdb);
/*
* If this cmd has been setup with target_get_sess_cmd(), drop
* the kref and call ->release_cmd() in kref callback.
*/
if (cmd->check_release != 0) {
target_put_sess_cmd(cmd->se_sess, cmd);
return;
}
cmd->se_tfo->release_cmd(cmd);
}
/**
* transport_put_cmd - release a reference to a command
* @cmd: command to release
@ -3870,33 +3925,6 @@ queue_full:
return 0;
}
/**
* transport_release_cmd - free a command
* @cmd: command to free
*
* This routine unconditionally frees a command, and reference counting
* or list removal must be done in the caller.
*/
void transport_release_cmd(struct se_cmd *cmd)
{
BUG_ON(!cmd->se_tfo);
if (cmd->se_tmr_req)
core_tmr_release_req(cmd->se_tmr_req);
if (cmd->t_task_cdb != cmd->__t_task_cdb)
kfree(cmd->t_task_cdb);
/*
* Check if target_wait_for_sess_cmds() is expecting to
* release se_cmd directly here..
*/
if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd)
if (cmd->se_tfo->check_release_cmd(cmd) != 0)
return;
cmd->se_tfo->release_cmd(cmd);
}
EXPORT_SYMBOL(transport_release_cmd);
void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
@ -3923,11 +3951,22 @@ EXPORT_SYMBOL(transport_generic_free_cmd);
/* target_get_sess_cmd - Add command to active ->sess_cmd_list
* @se_sess: session to reference
* @se_cmd: command descriptor to add
* @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
*/
void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
bool ack_kref)
{
unsigned long flags;
kref_init(&se_cmd->cmd_kref);
/*
* Add a second kref if the fabric caller is expecting to handle
* fabric acknowledgement that requires two target_put_sess_cmd()
* invocations before se_cmd descriptor release.
*/
if (ack_kref == true)
kref_get(&se_cmd->cmd_kref);
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
se_cmd->check_release = 1;
@ -3935,30 +3974,36 @@ void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
}
EXPORT_SYMBOL(target_get_sess_cmd);
/* target_put_sess_cmd - Check for active I/O shutdown or list delete
* @se_sess: session to reference
* @se_cmd: command descriptor to drop
*/
int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
static void target_release_cmd_kref(struct kref *kref)
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct se_session *se_sess = se_cmd->se_sess;
unsigned long flags;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (list_empty(&se_cmd->se_cmd_list)) {
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
WARN_ON(1);
return 0;
return;
}
if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
complete(&se_cmd->cmd_wait_comp);
return 1;
return;
}
list_del(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
return 0;
se_cmd->se_tfo->release_cmd(se_cmd);
}
/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
* @se_sess: session to reference
* @se_cmd: command descriptor to drop
*/
int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
{
return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
}
EXPORT_SYMBOL(target_put_sess_cmd);
@ -4174,7 +4219,7 @@ check_cond:
static int transport_clear_lun_thread(void *p)
{
struct se_lun *lun = (struct se_lun *)p;
struct se_lun *lun = p;
__transport_clear_lun_from_sessions(lun);
complete(&lun->lun_shutdown_comp);
@ -4353,6 +4398,7 @@ int transport_send_check_condition_and_sense(
case TCM_NON_EXISTENT_LUN:
/* CURRENT ERROR */
buffer[offset] = 0x70;
buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* LOGICAL UNIT NOT SUPPORTED */
@ -4362,6 +4408,7 @@ int transport_send_check_condition_and_sense(
case TCM_SECTOR_COUNT_TOO_MANY:
/* CURRENT ERROR */
buffer[offset] = 0x70;
buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* INVALID COMMAND OPERATION CODE */
@ -4370,6 +4417,7 @@ int transport_send_check_condition_and_sense(
case TCM_UNKNOWN_MODE_PAGE:
/* CURRENT ERROR */
buffer[offset] = 0x70;
buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* INVALID FIELD IN CDB */
@ -4378,6 +4426,7 @@ int transport_send_check_condition_and_sense(
case TCM_CHECK_CONDITION_ABORT_CMD:
/* CURRENT ERROR */
buffer[offset] = 0x70;
buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* BUS DEVICE RESET FUNCTION OCCURRED */
@ -4387,6 +4436,7 @@ int transport_send_check_condition_and_sense(
case TCM_INCORRECT_AMOUNT_OF_DATA:
/* CURRENT ERROR */
buffer[offset] = 0x70;
buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* WRITE ERROR */
@ -4397,6 +4447,7 @@ int transport_send_check_condition_and_sense(
case TCM_INVALID_CDB_FIELD:
/* CURRENT ERROR */
buffer[offset] = 0x70;
buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* INVALID FIELD IN CDB */
@ -4405,6 +4456,7 @@ int transport_send_check_condition_and_sense(
case TCM_INVALID_PARAMETER_LIST:
/* CURRENT ERROR */
buffer[offset] = 0x70;
buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* INVALID FIELD IN PARAMETER LIST */
@ -4413,6 +4465,7 @@ int transport_send_check_condition_and_sense(
case TCM_UNEXPECTED_UNSOLICITED_DATA:
/* CURRENT ERROR */
buffer[offset] = 0x70;
buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* WRITE ERROR */
@ -4423,6 +4476,7 @@ int transport_send_check_condition_and_sense(
case TCM_SERVICE_CRC_ERROR:
/* CURRENT ERROR */
buffer[offset] = 0x70;
buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* PROTOCOL SERVICE CRC ERROR */
@ -4433,6 +4487,7 @@ int transport_send_check_condition_and_sense(
case TCM_SNACK_REJECTED:
/* CURRENT ERROR */
buffer[offset] = 0x70;
buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* READ ERROR */
@ -4443,6 +4498,7 @@ int transport_send_check_condition_and_sense(
case TCM_WRITE_PROTECTED:
/* CURRENT ERROR */
buffer[offset] = 0x70;
buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* DATA PROTECT */
buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
/* WRITE PROTECTED */
@ -4451,6 +4507,7 @@ int transport_send_check_condition_and_sense(
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
/* CURRENT ERROR */
buffer[offset] = 0x70;
buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* UNIT ATTENTION */
buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
@ -4460,6 +4517,7 @@ int transport_send_check_condition_and_sense(
case TCM_CHECK_CONDITION_NOT_READY:
/* CURRENT ERROR */
buffer[offset] = 0x70;
buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* Not Ready */
buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
transport_get_sense_codes(cmd, &asc, &ascq);
@ -4470,6 +4528,7 @@ int transport_send_check_condition_and_sense(
default:
/* CURRENT ERROR */
buffer[offset] = 0x70;
buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* LOGICAL UNIT COMMUNICATION FAILURE */
@ -4545,11 +4604,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
cmd->se_tfo->queue_status(cmd);
}
/* transport_generic_do_tmr():
*
*
*/
int transport_generic_do_tmr(struct se_cmd *cmd)
static int transport_generic_do_tmr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_tmr_req *tmr = cmd->se_tmr_req;
@ -4597,7 +4652,7 @@ static int transport_processing_thread(void *param)
{
int ret;
struct se_cmd *cmd;
struct se_device *dev = (struct se_device *) param;
struct se_device *dev = param;
while (!kthread_should_stop()) {
ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
@ -4607,8 +4662,6 @@ static int transport_processing_thread(void *param)
goto out;
get_cmd:
__transport_execute_tasks(dev);
cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
if (!cmd)
continue;

View File

@ -30,13 +30,11 @@
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_ua.h"

View File

@ -39,12 +39,8 @@
#include <scsi/fc_encode.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include <target/target_core_tmr.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
@ -367,6 +363,11 @@ static void ft_send_tm(struct ft_cmd *cmd)
struct ft_sess *sess;
u8 tm_func;
transport_init_se_cmd(&cmd->se_cmd, &ft_configfs->tf_ops,
cmd->sess->se_sess, 0, DMA_NONE, 0,
&cmd->ft_sense_buffer[0]);
target_get_sess_cmd(cmd->sess->se_sess, &cmd->se_cmd, false);
fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
switch (fcp->fc_tm_flags) {
@ -420,7 +421,6 @@ static void ft_send_tm(struct ft_cmd *cmd)
sess = cmd->sess;
transport_send_check_condition_and_sense(&cmd->se_cmd,
cmd->se_cmd.scsi_sense_reason, 0);
transport_generic_free_cmd(&cmd->se_cmd, 0);
ft_sess_put(sess);
return;
}
@ -536,7 +536,6 @@ static void ft_send_work(struct work_struct *work)
{
struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
struct se_cmd *se_cmd;
struct fcp_cmnd *fcp;
int data_dir = 0;
u32 data_len;
@ -591,15 +590,6 @@ static void ft_send_work(struct work_struct *work)
data_len = ntohl(fcp->fc_dl);
cmd->cdb = fcp->fc_cdb;
}
se_cmd = &cmd->se_cmd;
/*
* Initialize struct se_cmd descriptor from target_core_mod
* infrastructure
*/
transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess,
data_len, data_dir, task_attr,
&cmd->ft_sense_buffer[0]);
/*
* Check for FCP task management flags
*/
@ -607,39 +597,20 @@ static void ft_send_work(struct work_struct *work)
ft_send_tm(cmd);
return;
}
fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun);
/*
* Use a single se_cmd->cmd_kref as we expect to release se_cmd
* directly from ft_check_stop_free callback in response path.
*/
ret = target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, cmd->cdb,
&cmd->ft_sense_buffer[0], cmd->lun, data_len,
task_attr, data_dir, 0);
pr_debug("r_ctl %x alloc target_submit_cmd %d\n", fh->fh_r_ctl, ret);
if (ret < 0) {
ft_dump_cmd(cmd, __func__);
transport_send_check_condition_and_sense(&cmd->se_cmd,
cmd->se_cmd.scsi_sense_reason, 0);
return;
}
ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
ft_dump_cmd(cmd, __func__);
if (ret == -ENOMEM) {
transport_send_check_condition_and_sense(se_cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
transport_generic_free_cmd(se_cmd, 0);
return;
}
if (ret == -EINVAL) {
if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
ft_queue_status(se_cmd);
else
transport_send_check_condition_and_sense(se_cmd,
se_cmd->scsi_sense_reason, 0);
transport_generic_free_cmd(se_cmd, 0);
return;
}
transport_handle_cdb_direct(se_cmd);
return;
err:

View File

@ -41,12 +41,8 @@
#include <scsi/libfc.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_fabric_lib.h>
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>

View File

@ -48,10 +48,7 @@
#include <scsi/fc_encode.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>

View File

@ -40,10 +40,7 @@
#include <scsi/libfc.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>

View File

@ -0,0 +1,65 @@
#ifndef TARGET_CORE_BACKEND_H
#define TARGET_CORE_BACKEND_H
#define TRANSPORT_PLUGIN_PHBA_PDEV 1
#define TRANSPORT_PLUGIN_VHBA_PDEV 2
#define TRANSPORT_PLUGIN_VHBA_VDEV 3
struct se_subsystem_api {
struct list_head sub_api_list;
char name[16];
struct module *owner;
u8 transport_type;
unsigned int fua_write_emulated : 1;
unsigned int write_cache_emulated : 1;
int (*attach_hba)(struct se_hba *, u32);
void (*detach_hba)(struct se_hba *);
int (*pmode_enable_hba)(struct se_hba *, unsigned long);
void *(*allocate_virtdevice)(struct se_hba *, const char *);
struct se_device *(*create_virtdevice)(struct se_hba *,
struct se_subsystem_dev *, void *);
void (*free_device)(void *);
int (*transport_complete)(struct se_task *task);
struct se_task *(*alloc_task)(unsigned char *cdb);
int (*do_task)(struct se_task *);
int (*do_discard)(struct se_device *, sector_t, u32);
void (*do_sync_cache)(struct se_task *);
void (*free_task)(struct se_task *);
ssize_t (*check_configfs_dev_params)(struct se_hba *,
struct se_subsystem_dev *);
ssize_t (*set_configfs_dev_params)(struct se_hba *,
struct se_subsystem_dev *, const char *, ssize_t);
ssize_t (*show_configfs_dev_params)(struct se_hba *,
struct se_subsystem_dev *, char *);
u32 (*get_device_rev)(struct se_device *);
u32 (*get_device_type)(struct se_device *);
sector_t (*get_blocks)(struct se_device *);
unsigned char *(*get_sense_buffer)(struct se_task *);
};
int transport_subsystem_register(struct se_subsystem_api *);
void transport_subsystem_release(struct se_subsystem_api *);
struct se_device *transport_add_device_to_core_hba(struct se_hba *,
struct se_subsystem_api *, struct se_subsystem_dev *, u32,
void *, struct se_dev_limits *, const char *, const char *);
void transport_complete_sync_cache(struct se_cmd *, int);
void transport_complete_task(struct se_task *, int);
void target_get_task_cdb(struct se_task *, unsigned char *);
void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
/* core helpers also used by command snooping in pscsi */
void *transport_kmap_first_data_page(struct se_cmd *);
void transport_kunmap_first_data_page(struct se_cmd *);
#endif /* TARGET_CORE_BACKEND_H */

View File

@ -10,6 +10,7 @@
#include <net/tcp.h>
#define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml"
#define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION
/* Maximum Number of LUNs per Target Portal Group */
/* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
@ -34,6 +35,7 @@
#define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE
/* Used by transport_send_check_condition_and_sense() */
#define SPC_SENSE_KEY_OFFSET 2
#define SPC_ADD_SENSE_LEN_OFFSET 7
#define SPC_ASC_KEY_OFFSET 12
#define SPC_ASCQ_KEY_OFFSET 13
#define TRANSPORT_IQN_LEN 224
@ -53,6 +55,72 @@
/* Used by transport_get_inquiry_vpd_device_ident() */
#define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254
/* Attempts before moving from SHORT to LONG */
#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3
#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */
#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG 10 /* In milliseconds */
#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
/*
* struct se_subsystem_dev->su_dev_flags
*/
#define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001
#define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002
#define SDF_USING_UDEV_PATH 0x00000004
#define SDF_USING_ALIAS 0x00000008
/*
* struct se_device->dev_flags
*/
#define DF_READ_ONLY 0x00000001
#define DF_SPC2_RESERVATIONS 0x00000002
#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004
/* struct se_dev_attrib sanity values */
/* Default max_unmap_lba_count */
#define DA_MAX_UNMAP_LBA_COUNT 0
/* Default max_unmap_block_desc_count */
#define DA_MAX_UNMAP_BLOCK_DESC_COUNT 0
/* Default unmap_granularity */
#define DA_UNMAP_GRANULARITY_DEFAULT 0
/* Default unmap_granularity_alignment */
#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
/* Emulation for Direct Page Out */
#define DA_EMULATE_DPO 0
/* Emulation for Forced Unit Access WRITEs */
#define DA_EMULATE_FUA_WRITE 1
/* Emulation for Forced Unit Access READs */
#define DA_EMULATE_FUA_READ 0
/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
#define DA_EMULATE_WRITE_CACHE 0
/* Emulation for UNIT ATTENTION Interlock Control */
#define DA_EMULATE_UA_INTLLCK_CTRL 0
/* Emulation for TASK_ABORTED status (TAS) by default */
#define DA_EMULATE_TAS 1
/* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
#define DA_EMULATE_TPU 0
/*
* Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using
* block/blk-lib.c:blkdev_issue_discard()
*/
#define DA_EMULATE_TPWS 0
/* No Emulation for PSCSI by default */
#define DA_EMULATE_RESERVATIONS 0
/* No Emulation for PSCSI by default */
#define DA_EMULATE_ALUA 0
/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
#define DA_ENFORCE_PR_ISIDS 1
#define DA_STATUS_MAX_SECTORS_MIN 16
#define DA_STATUS_MAX_SECTORS_MAX 8192
/* By default don't report non-rotating (solid state) medium */
#define DA_IS_NONROT 0
/* Queue Algorithm Modifier default for restricted reordering in control mode page */
#define DA_EMULATE_REST_REORD 0
#define SE_MODE_PAGE_BUF 512
/* struct se_hba->hba_flags */
enum hba_flags_table {
HBA_FLAGS_INTERNAL_USE = 0x01,
@ -71,11 +139,12 @@ enum transport_tpg_type_table {
TRANSPORT_TPG_TYPE_DISCOVERY = 1,
};
/* Used for generate timer flags */
/* struct se_task->task_flags */
enum se_task_flags {
TF_ACTIVE = (1 << 0),
TF_SENT = (1 << 1),
TF_REQUEST_STOP = (1 << 2),
TF_HAS_SENSE = (1 << 3),
};
/* Special transport agnostic struct se_cmd->t_states */
@ -158,9 +227,38 @@ enum tcm_sense_reason_table {
TCM_RESERVATION_CONFLICT = 0x10,
};
enum target_sc_flags_table {
TARGET_SCF_BIDI_OP = 0x01,
TARGET_SCF_ACK_KREF = 0x02,
};
/* fabric independent task management function values */
enum tcm_tmreq_table {
TMR_ABORT_TASK = 1,
TMR_ABORT_TASK_SET = 2,
TMR_CLEAR_ACA = 3,
TMR_CLEAR_TASK_SET = 4,
TMR_LUN_RESET = 5,
TMR_TARGET_WARM_RESET = 6,
TMR_TARGET_COLD_RESET = 7,
TMR_FABRIC_TMR = 255,
};
/* fabric independent task management response values */
enum tcm_tmrsp_table {
TMR_FUNCTION_COMPLETE = 0,
TMR_TASK_DOES_NOT_EXIST = 1,
TMR_LUN_DOES_NOT_EXIST = 2,
TMR_TASK_STILL_ALLEGIANT = 3,
TMR_TASK_FAILOVER_NOT_SUPPORTED = 4,
TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED = 5,
TMR_FUNCTION_AUTHORIZATION_FAILED = 6,
TMR_FUNCTION_REJECTED = 255,
};
struct se_obj {
atomic_t obj_access_count;
} ____cacheline_aligned;
};
/*
* Used by TCM Core internally to signal if ALUA emulation is enabled or
@ -207,7 +305,7 @@ struct t10_alua {
struct config_group alua_tg_pt_gps_group;
int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *);
struct list_head tg_pt_gps_list;
} ____cacheline_aligned;
};
struct t10_alua_lu_gp {
u16 lu_gp_id;
@ -218,7 +316,7 @@ struct t10_alua_lu_gp {
struct config_group lu_gp_group;
struct list_head lu_gp_node;
struct list_head lu_gp_mem_list;
} ____cacheline_aligned;
};
struct t10_alua_lu_gp_member {
bool lu_gp_assoc;
@ -227,7 +325,7 @@ struct t10_alua_lu_gp_member {
struct t10_alua_lu_gp *lu_gp;
struct se_device *lu_gp_mem_dev;
struct list_head lu_gp_mem_list;
} ____cacheline_aligned;
};
struct t10_alua_tg_pt_gp {
u16 tg_pt_gp_id;
@ -250,7 +348,7 @@ struct t10_alua_tg_pt_gp {
struct config_group tg_pt_gp_group;
struct list_head tg_pt_gp_list;
struct list_head tg_pt_gp_mem_list;
} ____cacheline_aligned;
};
struct t10_alua_tg_pt_gp_member {
bool tg_pt_gp_assoc;
@ -259,7 +357,7 @@ struct t10_alua_tg_pt_gp_member {
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct se_port *tg_pt;
struct list_head tg_pt_gp_mem_list;
} ____cacheline_aligned;
};
struct t10_vpd {
unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN];
@ -269,7 +367,7 @@ struct t10_vpd {
u32 association;
u32 device_identifier_type;
struct list_head vpd_list;
} ____cacheline_aligned;
};
struct t10_wwn {
char vendor[8];
@ -280,7 +378,7 @@ struct t10_wwn {
struct se_subsystem_dev *t10_sub_dev;
struct config_group t10_wwn_group;
struct list_head t10_vpd_list;
} ____cacheline_aligned;
};
/*
@ -333,7 +431,7 @@ struct t10_pr_registration {
struct list_head pr_reg_aptpl_list;
struct list_head pr_reg_atp_list;
struct list_head pr_reg_atp_mem_list;
} ____cacheline_aligned;
};
/*
* This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS,
@ -374,20 +472,20 @@ struct t10_reservation {
struct list_head registration_list;
struct list_head aptpl_reg_list;
struct t10_reservation_ops pr_ops;
} ____cacheline_aligned;
};
struct se_queue_req {
int state;
struct se_cmd *cmd;
struct list_head qr_list;
} ____cacheline_aligned;
};
struct se_queue_obj {
atomic_t queue_cnt;
spinlock_t cmd_queue_lock;
struct list_head qobj_list;
wait_queue_head_t thread_wq;
} ____cacheline_aligned;
};
struct se_task {
unsigned long long task_lba;
@ -397,16 +495,14 @@ struct se_task {
struct scatterlist *task_sg;
u32 task_sg_nents;
u16 task_flags;
u8 task_sense;
u8 task_scsi_status;
int task_error_status;
enum dma_data_direction task_data_direction;
atomic_t task_state_active;
struct list_head t_list;
struct list_head t_execute_list;
struct list_head t_state_list;
bool t_state_active;
struct completion task_stop_comp;
} ____cacheline_aligned;
};
struct se_cmd {
/* SAM response code being sent to initiator */
@ -451,6 +547,7 @@ struct se_cmd {
struct list_head se_queue_node;
struct list_head se_cmd_list;
struct completion cmd_wait_comp;
struct kref cmd_kref;
struct target_core_fabric_ops *se_tfo;
int (*execute_task)(struct se_task *);
void (*transport_complete_callback)(struct se_cmd *);
@ -492,7 +589,7 @@ struct se_cmd {
struct list_head t_task_list;
u32 t_task_list_num;
} ____cacheline_aligned;
};
struct se_tmr_req {
/* Task Management function to be preformed */
@ -510,7 +607,7 @@ struct se_tmr_req {
struct se_device *tmr_dev;
struct se_lun *tmr_lun;
struct list_head tmr_list;
} ____cacheline_aligned;
};
struct se_ua {
u8 ua_asc;
@ -518,7 +615,7 @@ struct se_ua {
struct se_node_acl *ua_nacl;
struct list_head ua_dev_list;
struct list_head ua_nacl_list;
} ____cacheline_aligned;
};
struct se_node_acl {
char initiatorname[TRANSPORT_IQN_LEN];
@ -545,7 +642,7 @@ struct se_node_acl {
struct config_group *acl_default_groups[5];
struct list_head acl_list;
struct list_head acl_sess_list;
} ____cacheline_aligned;
};
struct se_session {
unsigned sess_tearing_down:1;
@ -558,7 +655,7 @@ struct se_session {
struct list_head sess_cmd_list;
struct list_head sess_wait_list;
spinlock_t sess_cmd_lock;
} ____cacheline_aligned;
};
struct se_device;
struct se_transform_info;
@ -578,7 +675,7 @@ struct se_lun_acl {
struct list_head lacl_list;
struct config_group se_lun_group;
struct se_ml_stat_grps ml_stat_grps;
} ____cacheline_aligned;
};
struct se_dev_entry {
bool def_pr_registered;
@ -603,7 +700,7 @@ struct se_dev_entry {
struct se_lun *se_lun;
struct list_head alua_port_list;
struct list_head ua_list;
} ____cacheline_aligned;
};
struct se_dev_limits {
/* Max supported HW queue depth */
@ -612,7 +709,7 @@ struct se_dev_limits {
u32 queue_depth;
/* From include/linux/blkdev.h for the other HW/SW limits. */
struct queue_limits limits;
} ____cacheline_aligned;
};
struct se_dev_attrib {
int emulate_dpo;
@ -641,7 +738,7 @@ struct se_dev_attrib {
u32 unmap_granularity_alignment;
struct se_subsystem_dev *da_sub_dev;
struct config_group da_group;
} ____cacheline_aligned;
};
struct se_dev_stat_grps {
struct config_group stat_group;
@ -674,7 +771,7 @@ struct se_subsystem_dev {
struct config_group se_dev_pr_group;
/* For target_core_stat.c groups */
struct se_dev_stat_grps dev_stat_grps;
} ____cacheline_aligned;
};
struct se_device {
/* RELATIVE TARGET PORT IDENTIFER Counter */
@ -685,7 +782,6 @@ struct se_device {
u32 dev_port_count;
/* See transport_device_status_table */
u32 dev_status;
u32 dev_tcq_window_closed;
/* Physical device queue depth */
u32 queue_depth;
/* Used for SPC-2 reservations enforce of ISIDs */
@ -702,7 +798,6 @@ struct se_device {
spinlock_t stats_lock;
/* Active commands on this virtual SE device */
atomic_t simple_cmds;
atomic_t depth_left;
atomic_t dev_ordered_id;
atomic_t execute_tasks;
atomic_t dev_ordered_sync;
@ -740,7 +835,7 @@ struct se_device {
struct se_subsystem_api *transport;
/* Linked list for struct se_hba struct se_device list */
struct list_head dev_list;
} ____cacheline_aligned;
};
struct se_hba {
u16 hba_tpgt;
@ -759,7 +854,7 @@ struct se_hba {
struct config_group hba_group;
struct mutex hba_access_mutex;
struct se_subsystem_api *transport;
} ____cacheline_aligned;
};
struct se_port_stat_grps {
struct config_group stat_group;
@ -785,13 +880,13 @@ struct se_lun {
struct se_port *lun_sep;
struct config_group lun_group;
struct se_port_stat_grps port_stat_grps;
} ____cacheline_aligned;
};
struct scsi_port_stats {
u64 cmd_pdus;
u64 tx_data_octets;
u64 rx_data_octets;
} ____cacheline_aligned;
};
struct se_port {
/* RELATIVE TARGET PORT IDENTIFER */
@ -811,12 +906,12 @@ struct se_port {
struct se_portal_group *sep_tpg;
struct list_head sep_alua_list;
struct list_head sep_list;
} ____cacheline_aligned;
};
struct se_tpg_np {
struct se_portal_group *tpg_np_parent;
struct config_group tpg_np_group;
} ____cacheline_aligned;
};
struct se_portal_group {
/* Type of target portal group, see transport_tpg_type_table */
@ -849,13 +944,13 @@ struct se_portal_group {
struct config_group tpg_acl_group;
struct config_group tpg_attrib_group;
struct config_group tpg_param_group;
} ____cacheline_aligned;
};
struct se_wwn {
struct target_fabric_configfs *wwn_tf;
struct config_group wwn_group;
struct config_group *wwn_default_groups[2];
struct config_group fabric_stat_group;
} ____cacheline_aligned;
};
#endif /* TARGET_CORE_BASE_H */

View File

@ -1,63 +0,0 @@
#ifndef TARGET_CORE_DEVICE_H
#define TARGET_CORE_DEVICE_H
extern int transport_lookup_cmd_lun(struct se_cmd *, u32);
extern int transport_lookup_tmr_lun(struct se_cmd *, u32);
extern struct se_dev_entry *core_get_se_deve_from_rtpi(
struct se_node_acl *, u16);
extern int core_free_device_list_for_node(struct se_node_acl *,
struct se_portal_group *);
extern void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
extern void core_update_device_list_access(u32, u32, struct se_node_acl *);
extern int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *, u32,
u32, struct se_node_acl *,
struct se_portal_group *, int);
extern void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
extern int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *);
extern void core_dev_unexport(struct se_device *, struct se_portal_group *,
struct se_lun *);
extern int target_report_luns(struct se_task *);
extern void se_release_device_for_hba(struct se_device *);
extern void se_release_vpd_for_dev(struct se_device *);
extern void se_clear_dev_ports(struct se_device *);
extern int se_free_virtual_device(struct se_device *, struct se_hba *);
extern int se_dev_check_online(struct se_device *);
extern int se_dev_check_shutdown(struct se_device *);
extern void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
extern int se_dev_set_task_timeout(struct se_device *, u32);
extern int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
extern int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
extern int se_dev_set_unmap_granularity(struct se_device *, u32);
extern int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
extern int se_dev_set_emulate_dpo(struct se_device *, int);
extern int se_dev_set_emulate_fua_write(struct se_device *, int);
extern int se_dev_set_emulate_fua_read(struct se_device *, int);
extern int se_dev_set_emulate_write_cache(struct se_device *, int);
extern int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
extern int se_dev_set_emulate_tas(struct se_device *, int);
extern int se_dev_set_emulate_tpu(struct se_device *, int);
extern int se_dev_set_emulate_tpws(struct se_device *, int);
extern int se_dev_set_enforce_pr_isids(struct se_device *, int);
extern int se_dev_set_is_nonrot(struct se_device *, int);
extern int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
extern int se_dev_set_queue_depth(struct se_device *, u32);
extern int se_dev_set_max_sectors(struct se_device *, u32);
extern int se_dev_set_optimal_sectors(struct se_device *, u32);
extern int se_dev_set_block_size(struct se_device *, u32);
extern struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
struct se_device *, u32);
extern int core_dev_del_lun(struct se_portal_group *, u32);
extern struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
extern struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
u32, char *, int *);
extern int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *, u32, u32);
extern int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun *, struct se_lun_acl *);
extern void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *lacl);
extern int core_dev_setup_virtual_lun0(void);
extern void core_dev_release_virtual_lun0(void);
#endif /* TARGET_CORE_DEVICE_H */

View File

@ -1,5 +1,5 @@
/* Defined in target_core_configfs.h */
struct target_fabric_configfs;
#ifndef TARGET_CORE_FABRIC_H
#define TARGET_CORE_FABRIC_H
struct target_core_fabric_ops {
struct configfs_subsystem *tf_subsys;
@ -52,10 +52,6 @@ struct target_core_fabric_ops {
* Returning 0 will signal a descriptor has not been released.
*/
int (*check_stop_free)(struct se_cmd *);
/*
* Optional check for active I/O shutdown
*/
int (*check_release_cmd)(struct se_cmd *);
void (*release_cmd)(struct se_cmd *);
/*
* Called with spin_lock_bh(struct se_portal_group->session_lock held.
@ -103,3 +99,89 @@ struct target_core_fabric_ops {
struct config_group *, const char *);
void (*fabric_drop_nodeacl)(struct se_node_acl *);
};
struct se_session *transport_init_session(void);
void __transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
void transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
void transport_free_session(struct se_session *);
void transport_deregister_session_configfs(struct se_session *);
void transport_deregister_session(struct se_session *);
void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *,
struct se_session *, u32, int, int, unsigned char *);
int transport_lookup_cmd_lun(struct se_cmd *, u32);
int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
unsigned char *, u32, u32, int, int, int);
int transport_handle_cdb_direct(struct se_cmd *);
int transport_generic_handle_cdb_map(struct se_cmd *);
int transport_generic_handle_data(struct se_cmd *);
int transport_generic_map_mem_to_cmd(struct se_cmd *cmd,
struct scatterlist *, u32, struct scatterlist *, u32);
void transport_do_task_sg_chain(struct se_cmd *);
int transport_generic_new_cmd(struct se_cmd *);
void transport_generic_process_write(struct se_cmd *);
void transport_generic_free_cmd(struct se_cmd *, int);
bool transport_wait_for_tasks(struct se_cmd *);
int transport_check_aborted_status(struct se_cmd *, int);
int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
void target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
int target_put_sess_cmd(struct se_session *, struct se_cmd *);
void target_splice_sess_cmd_list(struct se_session *);
void target_wait_for_sess_cmds(struct se_session *, int);
int core_alua_check_nonop_delay(struct se_cmd *);
struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
void core_tmr_release_req(struct se_tmr_req *);
int transport_generic_handle_tmr(struct se_cmd *);
int transport_lookup_tmr_lun(struct se_cmd *, u32);
struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
unsigned char *);
void core_tpg_clear_object_luns(struct se_portal_group *);
struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *,
struct se_node_acl *, const char *, u32);
int core_tpg_del_initiator_node_acl(struct se_portal_group *,
struct se_node_acl *, int);
int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
unsigned char *, u32, int);
int core_tpg_register(struct target_core_fabric_ops *, struct se_wwn *,
struct se_portal_group *, void *, int);
int core_tpg_deregister(struct se_portal_group *);
/* SAS helpers */
u8 sas_get_fabric_proto_ident(struct se_portal_group *);
u32 sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *, unsigned char *);
u32 sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *);
char *sas_parse_pr_out_transport_id(struct se_portal_group *, const char *,
u32 *, char **);
/* FC helpers */
u8 fc_get_fabric_proto_ident(struct se_portal_group *);
u32 fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *, unsigned char *);
u32 fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *);
char *fc_parse_pr_out_transport_id(struct se_portal_group *, const char *,
u32 *, char **);
/* iSCSI helpers */
u8 iscsi_get_fabric_proto_ident(struct se_portal_group *);
u32 iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *, unsigned char *);
u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *);
char *iscsi_parse_pr_out_transport_id(struct se_portal_group *, const char *,
u32 *, char **);
#endif /* TARGET_CORE_FABRICH */

View File

@ -1,28 +0,0 @@
#ifndef TARGET_CORE_FABRIC_LIB_H
#define TARGET_CORE_FABRIC_LIB_H
extern u8 sas_get_fabric_proto_ident(struct se_portal_group *);
extern u32 sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *, unsigned char *);
extern u32 sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *);
extern char *sas_parse_pr_out_transport_id(struct se_portal_group *,
const char *, u32 *, char **);
extern u8 fc_get_fabric_proto_ident(struct se_portal_group *);
extern u32 fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *, unsigned char *);
extern u32 fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *);
extern char *fc_parse_pr_out_transport_id(struct se_portal_group *,
const char *, u32 *, char **);
extern u8 iscsi_get_fabric_proto_ident(struct se_portal_group *);
extern u32 iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *, unsigned char *);
extern u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *);
extern char *iscsi_parse_pr_out_transport_id(struct se_portal_group *,
const char *, u32 *, char **);
#endif /* TARGET_CORE_FABRIC_LIB_H */

View File

@ -1,35 +0,0 @@
#ifndef TARGET_CORE_TMR_H
#define TARGET_CORE_TMR_H
/* fabric independent task management function values */
enum tcm_tmreq_table {
TMR_ABORT_TASK = 1,
TMR_ABORT_TASK_SET = 2,
TMR_CLEAR_ACA = 3,
TMR_CLEAR_TASK_SET = 4,
TMR_LUN_RESET = 5,
TMR_TARGET_WARM_RESET = 6,
TMR_TARGET_COLD_RESET = 7,
TMR_FABRIC_TMR = 255,
};
/* fabric independent task management response values */
enum tcm_tmrsp_table {
TMR_FUNCTION_COMPLETE = 0,
TMR_TASK_DOES_NOT_EXIST = 1,
TMR_LUN_DOES_NOT_EXIST = 2,
TMR_TASK_STILL_ALLEGIANT = 3,
TMR_TASK_FAILOVER_NOT_SUPPORTED = 4,
TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED = 5,
TMR_FUNCTION_AUTHORIZATION_FAILED = 6,
TMR_FUNCTION_REJECTED = 255,
};
extern struct kmem_cache *se_tmr_req_cache;
extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
extern void core_tmr_release_req(struct se_tmr_req *);
extern int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
struct list_head *, struct se_cmd *);
#endif /* TARGET_CORE_TMR_H */

View File

@ -1,35 +0,0 @@
#ifndef TARGET_CORE_TPG_H
#define TARGET_CORE_TPG_H
extern struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
const char *);
extern struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
unsigned char *);
extern void core_tpg_add_node_to_devs(struct se_node_acl *,
struct se_portal_group *);
extern struct se_node_acl *core_tpg_check_initiator_node_acl(
struct se_portal_group *,
unsigned char *);
extern void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
extern void core_tpg_wait_for_mib_ref(struct se_node_acl *);
extern void core_tpg_clear_object_luns(struct se_portal_group *);
extern struct se_node_acl *core_tpg_add_initiator_node_acl(
struct se_portal_group *,
struct se_node_acl *,
const char *, u32);
extern int core_tpg_del_initiator_node_acl(struct se_portal_group *,
struct se_node_acl *, int);
extern int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
unsigned char *, u32, int);
extern int core_tpg_register(struct target_core_fabric_ops *,
struct se_wwn *,
struct se_portal_group *, void *,
int);
extern int core_tpg_deregister(struct se_portal_group *);
extern struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
extern int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, u32,
void *);
extern struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32, int *);
extern int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
#endif /* TARGET_CORE_TPG_H */

View File

@ -1,287 +0,0 @@
#ifndef TARGET_CORE_TRANSPORT_H
#define TARGET_CORE_TRANSPORT_H
#define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION
/* Attempts before moving from SHORT to LONG */
#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3
#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */
#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG 10 /* In milliseconds */
#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
#define TRANSPORT_PLUGIN_PHBA_PDEV 1
#define TRANSPORT_PLUGIN_VHBA_PDEV 2
#define TRANSPORT_PLUGIN_VHBA_VDEV 3
/*
* struct se_subsystem_dev->su_dev_flags
*/
#define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001
#define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002
#define SDF_USING_UDEV_PATH 0x00000004
#define SDF_USING_ALIAS 0x00000008
/*
* struct se_device->dev_flags
*/
#define DF_READ_ONLY 0x00000001
#define DF_SPC2_RESERVATIONS 0x00000002
#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004
/* struct se_dev_attrib sanity values */
/* Default max_unmap_lba_count */
#define DA_MAX_UNMAP_LBA_COUNT 0
/* Default max_unmap_block_desc_count */
#define DA_MAX_UNMAP_BLOCK_DESC_COUNT 0
/* Default unmap_granularity */
#define DA_UNMAP_GRANULARITY_DEFAULT 0
/* Default unmap_granularity_alignment */
#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
/* Emulation for Direct Page Out */
#define DA_EMULATE_DPO 0
/* Emulation for Forced Unit Access WRITEs */
#define DA_EMULATE_FUA_WRITE 1
/* Emulation for Forced Unit Access READs */
#define DA_EMULATE_FUA_READ 0
/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
#define DA_EMULATE_WRITE_CACHE 0
/* Emulation for UNIT ATTENTION Interlock Control */
#define DA_EMULATE_UA_INTLLCK_CTRL 0
/* Emulation for TASK_ABORTED status (TAS) by default */
#define DA_EMULATE_TAS 1
/* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
#define DA_EMULATE_TPU 0
/*
* Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using
* block/blk-lib.c:blkdev_issue_discard()
*/
#define DA_EMULATE_TPWS 0
/* No Emulation for PSCSI by default */
#define DA_EMULATE_RESERVATIONS 0
/* No Emulation for PSCSI by default */
#define DA_EMULATE_ALUA 0
/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
#define DA_ENFORCE_PR_ISIDS 1
#define DA_STATUS_MAX_SECTORS_MIN 16
#define DA_STATUS_MAX_SECTORS_MAX 8192
/* By default don't report non-rotating (solid state) medium */
#define DA_IS_NONROT 0
/* Queue Algorithm Modifier default for restricted reordering in control mode page */
#define DA_EMULATE_REST_REORD 0
#define SE_MODE_PAGE_BUF 512
#define MOD_MAX_SECTORS(ms, bs) (ms % (PAGE_SIZE / bs))
struct se_subsystem_api;
extern int init_se_kmem_caches(void);
extern void release_se_kmem_caches(void);
extern u32 scsi_get_new_index(scsi_index_t);
extern void transport_init_queue_obj(struct se_queue_obj *);
extern void transport_subsystem_check_init(void);
extern int transport_subsystem_register(struct se_subsystem_api *);
extern void transport_subsystem_release(struct se_subsystem_api *);
extern void transport_load_plugins(void);
extern struct se_session *transport_init_session(void);
extern void __transport_register_session(struct se_portal_group *,
struct se_node_acl *,
struct se_session *, void *);
extern void transport_register_session(struct se_portal_group *,
struct se_node_acl *,
struct se_session *, void *);
extern void transport_free_session(struct se_session *);
extern void transport_deregister_session_configfs(struct se_session *);
extern void transport_deregister_session(struct se_session *);
extern void transport_cmd_finish_abort(struct se_cmd *, int);
extern void transport_complete_sync_cache(struct se_cmd *, int);
extern void transport_complete_task(struct se_task *, int);
extern void transport_add_task_to_execute_queue(struct se_task *,
struct se_task *,
struct se_device *);
extern void transport_remove_task_from_execute_queue(struct se_task *,
struct se_device *);
extern void __transport_remove_task_from_execute_queue(struct se_task *,
struct se_device *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
extern void transport_dump_dev_state(struct se_device *, char *, int *);
extern void transport_dump_dev_info(struct se_device *, struct se_lun *,
unsigned long long, char *, int *);
extern void transport_dump_vpd_proto_id(struct t10_vpd *,
unsigned char *, int);
extern void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
extern int transport_dump_vpd_assoc(struct t10_vpd *,
unsigned char *, int);
extern int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
extern int transport_dump_vpd_ident_type(struct t10_vpd *,
unsigned char *, int);
extern int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
extern int transport_dump_vpd_ident(struct t10_vpd *,
unsigned char *, int);
extern int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
extern struct se_device *transport_add_device_to_core_hba(struct se_hba *,
struct se_subsystem_api *,
struct se_subsystem_dev *, u32,
void *, struct se_dev_limits *,
const char *, const char *);
extern void transport_init_se_cmd(struct se_cmd *,
struct target_core_fabric_ops *,
struct se_session *, u32, int, int,
unsigned char *);
void *transport_kmap_first_data_page(struct se_cmd *cmd);
void transport_kunmap_first_data_page(struct se_cmd *cmd);
extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
extern int transport_handle_cdb_direct(struct se_cmd *);
extern int transport_generic_handle_cdb_map(struct se_cmd *);
extern int transport_generic_handle_data(struct se_cmd *);
extern int transport_generic_handle_tmr(struct se_cmd *);
extern bool target_stop_task(struct se_task *task, unsigned long *flags);
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
struct scatterlist *, u32);
extern int transport_clear_lun_from_sessions(struct se_lun *);
extern bool transport_wait_for_tasks(struct se_cmd *);
extern int transport_check_aborted_status(struct se_cmd *, int);
extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
extern void transport_send_task_abort(struct se_cmd *);
extern void transport_release_cmd(struct se_cmd *);
extern void transport_generic_free_cmd(struct se_cmd *, int);
extern void target_get_sess_cmd(struct se_session *, struct se_cmd *);
extern int target_put_sess_cmd(struct se_session *, struct se_cmd *);
extern void target_splice_sess_cmd_list(struct se_session *);
extern void target_wait_for_sess_cmds(struct se_session *, int);
extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
extern void transport_do_task_sg_chain(struct se_cmd *);
extern void transport_generic_process_write(struct se_cmd *);
extern int transport_generic_new_cmd(struct se_cmd *);
extern int transport_generic_do_tmr(struct se_cmd *);
/* From target_core_alua.c */
extern int core_alua_check_nonop_delay(struct se_cmd *);
/* From target_core_cdb.c */
extern int transport_emulate_control_cdb(struct se_task *);
extern void target_get_task_cdb(struct se_task *task, unsigned char *cdb);
/*
* Each se_transport_task_t can have N number of possible struct se_task's
* for the storage transport(s) to possibly execute.
* Used primarily for splitting up CDBs that exceed the physical storage
* HBA's maximum sector count per task.
*/
struct se_mem {
struct page *se_page;
u32 se_len;
u32 se_off;
struct list_head se_list;
} ____cacheline_aligned;
/*
* Each type of disk transport supported MUST have a template defined
* within its .h file.
*/
struct se_subsystem_api {
/*
* The Name. :-)
*/
char name[16];
/*
* Transport Type.
*/
u8 transport_type;
unsigned int fua_write_emulated : 1;
unsigned int write_cache_emulated : 1;
/*
* struct module for struct se_hba references
*/
struct module *owner;
/*
* Used for global se_subsystem_api list_head
*/
struct list_head sub_api_list;
/*
* attach_hba():
*/
int (*attach_hba)(struct se_hba *, u32);
/*
* detach_hba():
*/
void (*detach_hba)(struct se_hba *);
/*
* pmode_hba(): Used for TCM/pSCSI subsystem plugin HBA ->
* Linux/SCSI struct Scsi_Host passthrough
*/
int (*pmode_enable_hba)(struct se_hba *, unsigned long);
/*
* allocate_virtdevice():
*/
void *(*allocate_virtdevice)(struct se_hba *, const char *);
/*
* create_virtdevice(): Only for Virtual HBAs
*/
struct se_device *(*create_virtdevice)(struct se_hba *,
struct se_subsystem_dev *, void *);
/*
* free_device():
*/
void (*free_device)(void *);
/*
* transport_complete():
*
* Use transport_generic_complete() for majority of DAS transport
* drivers. Provided out of convenience.
*/
int (*transport_complete)(struct se_task *task);
struct se_task *(*alloc_task)(unsigned char *cdb);
/*
* do_task():
*/
int (*do_task)(struct se_task *);
/*
* Used by virtual subsystem plugins IBLOCK and FILEIO to emulate
* UNMAP and WRITE_SAME_* w/ UNMAP=1 <-> Linux/Block Discard
*/
int (*do_discard)(struct se_device *, sector_t, u32);
/*
* Used by virtual subsystem plugins IBLOCK and FILEIO to emulate
* SYNCHRONIZE_CACHE_* <-> Linux/Block blkdev_issue_flush()
*/
void (*do_sync_cache)(struct se_task *);
/*
* free_task():
*/
void (*free_task)(struct se_task *);
/*
* check_configfs_dev_params():
*/
ssize_t (*check_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *);
/*
* set_configfs_dev_params():
*/
ssize_t (*set_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
const char *, ssize_t);
/*
* show_configfs_dev_params():
*/
ssize_t (*show_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
char *);
/*
* get_device_rev():
*/
u32 (*get_device_rev)(struct se_device *);
/*
* get_device_type():
*/
u32 (*get_device_type)(struct se_device *);
/*
* Get the sector_t from a subsystem backstore..
*/
sector_t (*get_blocks)(struct se_device *);
/*
* get_sense_buffer():
*/
unsigned char *(*get_sense_buffer)(struct se_task *);
} ____cacheline_aligned;
#endif /* TARGET_CORE_TRANSPORT_H */