Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (28 commits)
  [SCSI] qla4xxx: fix compilation warning
  [SCSI] make error handling more robust in the face of reservations
  [SCSI] tgt: fix warning
  [SCSI] drivers/message/fusion: Adjust confusing if indentation
  [SCSI] Return NEEDS_RETRY for eh commands with status BUSY
  [SCSI] ibmvfc: Driver version 1.0.9
  [SCSI] ibmvfc: Fix terminate_rport_io
  [SCSI] ibmvfc: Fix rport add/delete race resulting in oops
  [SCSI] lpfc 8.3.16: Change LPFC driver version to 8.3.16
  [SCSI] lpfc 8.3.16: FCoE Discovery and Failover Fixes
  [SCSI] lpfc 8.3.16: SLI Additions, updates, and code cleanup
  [SCSI] pm8001: introduce missing kfree
  [SCSI] qla4xxx: Update driver version to 5.02.00-k3
  [SCSI] qla4xxx: Added AER support for ISP82xx
  [SCSI] qla4xxx: Handle outstanding mbx cmds on hung f/w scenarios
  [SCSI] qla4xxx: updated mbx_sys_info struct to sync with FW 4.6.x
  [SCSI] qla4xxx: clear AF_DPC_SCHEDULED flage when exit from do_dpc
  [SCSI] qla4xxx: Stop firmware before doing init firmware.
  [SCSI] qla4xxx: Use the correct request queue.
  [SCSI] qla4xxx: set correct value in sess->recovery_tmo
  ...
This commit is contained in:
Linus Torvalds 2010-08-14 12:34:34 -07:00
commit c29c08b598
41 changed files with 1546 additions and 395 deletions

View File

@ -122,14 +122,6 @@ config ISCSI_IBFT_FIND
is necessary for iSCSI Boot Firmware Table Attributes module to work is necessary for iSCSI Boot Firmware Table Attributes module to work
properly. properly.
config ISCSI_BOOT_SYSFS
tristate "iSCSI Boot Sysfs Interface"
default n
help
This option enables support for exposing iSCSI boot information
via sysfs to userspace. If you wish to export this information,
say Y. Otherwise, say N.
config ISCSI_IBFT config ISCSI_IBFT
tristate "iSCSI Boot Firmware Table Attributes module" tristate "iSCSI Boot Firmware Table Attributes module"
select ISCSI_BOOT_SYSFS select ISCSI_BOOT_SYSFS

View File

@ -10,5 +10,4 @@ obj-$(CONFIG_DCDBAS) += dcdbas.o
obj-$(CONFIG_DMIID) += dmi-id.o obj-$(CONFIG_DMIID) += dmi-id.o
obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o

View File

@ -370,6 +370,14 @@ config ISCSI_TCP
http://open-iscsi.org http://open-iscsi.org
config ISCSI_BOOT_SYSFS
tristate "iSCSI Boot Sysfs Interface"
default n
help
This option enables support for exposing iSCSI boot information
via sysfs to userspace. If you wish to export this information,
say Y. Otherwise, say N.
source "drivers/scsi/cxgb3i/Kconfig" source "drivers/scsi/cxgb3i/Kconfig"
source "drivers/scsi/bnx2i/Kconfig" source "drivers/scsi/bnx2i/Kconfig"
source "drivers/scsi/be2iscsi/Kconfig" source "drivers/scsi/be2iscsi/Kconfig"

View File

@ -42,6 +42,7 @@ obj-$(CONFIG_FCOE) += fcoe/
obj-$(CONFIG_FCOE_FNIC) += fnic/ obj-$(CONFIG_FCOE_FNIC) += fnic/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o
obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o
obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o

View File

@ -2,6 +2,7 @@ config BE2ISCSI
tristate "ServerEngines' 10Gbps iSCSI - BladeEngine 2" tristate "ServerEngines' 10Gbps iSCSI - BladeEngine 2"
depends on PCI && SCSI && NET depends on PCI && SCSI && NET
select SCSI_ISCSI_ATTRS select SCSI_ISCSI_ATTRS
select ISCSI_BOOT_SYSFS
help help
This driver implements the iSCSI functionality for ServerEngines' This driver implements the iSCSI functionality for ServerEngines'

View File

@ -162,6 +162,13 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES 2 #define OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES 2
#define OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES 3 #define OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES 3
#define OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG 7 #define OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG 7
#define OPCODE_COMMON_ISCSI_NTWK_SET_VLAN 14
#define OPCODE_COMMON_ISCSI_NTWK_CONFIGURE_STATELESS_IP_ADDR 17
#define OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR 21
#define OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY 22
#define OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY 23
#define OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID 24
#define OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO 25
#define OPCODE_COMMON_ISCSI_SET_FRAGNUM_BITS_FOR_SGL_CRA 61 #define OPCODE_COMMON_ISCSI_SET_FRAGNUM_BITS_FOR_SGL_CRA 61
#define OPCODE_COMMON_ISCSI_DEFQ_CREATE 64 #define OPCODE_COMMON_ISCSI_DEFQ_CREATE 64
#define OPCODE_COMMON_ISCSI_DEFQ_DESTROY 65 #define OPCODE_COMMON_ISCSI_DEFQ_DESTROY 65
@ -237,11 +244,109 @@ struct be_cmd_resp_eq_create {
u16 rsvd0; /* sword */ u16 rsvd0; /* sword */
} __packed; } __packed;
struct mgmt_chap_format {
u32 flags;
u8 intr_chap_name[256];
u8 intr_secret[16];
u8 target_chap_name[256];
u8 target_secret[16];
u16 intr_chap_name_length;
u16 intr_secret_length;
u16 target_chap_name_length;
u16 target_secret_length;
} __packed;
struct mgmt_auth_method_format {
u8 auth_method_type;
u8 padding[3];
struct mgmt_chap_format chap;
} __packed;
struct mgmt_conn_login_options {
u8 flags;
u8 header_digest;
u8 data_digest;
u8 rsvd0;
u32 max_recv_datasegment_len_ini;
u32 max_recv_datasegment_len_tgt;
u32 tcp_mss;
u32 tcp_window_size;
struct mgmt_auth_method_format auth_data;
} __packed;
struct ip_address_format {
u16 size_of_structure;
u8 reserved;
u8 ip_type;
u8 ip_address[16];
u32 rsvd0;
} __packed;
struct mgmt_conn_info {
u32 connection_handle;
u32 connection_status;
u16 src_port;
u16 dest_port;
u16 dest_port_redirected;
u16 cid;
u32 estimated_throughput;
struct ip_address_format src_ipaddr;
struct ip_address_format dest_ipaddr;
struct ip_address_format dest_ipaddr_redirected;
struct mgmt_conn_login_options negotiated_login_options;
} __packed;
struct mgmt_session_login_options {
u8 flags;
u8 error_recovery_level;
u16 rsvd0;
u32 first_burst_length;
u32 max_burst_length;
u16 max_connections;
u16 max_outstanding_r2t;
u16 default_time2wait;
u16 default_time2retain;
} __packed;
struct mgmt_session_info {
u32 session_handle;
u32 status;
u8 isid[6];
u16 tsih;
u32 session_flags;
u16 conn_count;
u16 pad;
u8 target_name[224];
u8 initiator_iscsiname[224];
struct mgmt_session_login_options negotiated_login_options;
struct mgmt_conn_info conn_list[1];
} __packed;
struct be_cmd_req_get_session {
struct be_cmd_req_hdr hdr;
u32 session_handle;
} __packed;
struct be_cmd_resp_get_session {
struct be_cmd_resp_hdr hdr;
struct mgmt_session_info session_info;
} __packed;
struct mac_addr { struct mac_addr {
u16 size_of_struct; u16 size_of_struct;
u8 addr[ETH_ALEN]; u8 addr[ETH_ALEN];
} __packed; } __packed;
struct be_cmd_req_get_boot_target {
struct be_cmd_req_hdr hdr;
} __packed;
struct be_cmd_resp_get_boot_target {
struct be_cmd_resp_hdr hdr;
u32 boot_session_count;
int boot_session_handle;
};
struct be_cmd_req_mac_query { struct be_cmd_req_mac_query {
struct be_cmd_req_hdr hdr; struct be_cmd_req_hdr hdr;
u8 type; u8 type;
@ -426,6 +531,11 @@ int be_poll_mcc(struct be_ctrl_info *ctrl);
int mgmt_check_supported_fw(struct be_ctrl_info *ctrl, int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba); struct beiscsi_hba *phba);
unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba); unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba);
unsigned int beiscsi_get_boot_target(struct beiscsi_hba *phba);
unsigned int beiscsi_get_session_info(struct beiscsi_hba *phba,
u32 boot_session_handle,
struct be_dma_mem *nonemb_cmd);
void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag); void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
/*ISCSI Functuions */ /*ISCSI Functuions */
int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
@ -601,14 +711,6 @@ struct be_eq_delay_params_in {
struct eq_delay delay[8]; struct eq_delay delay[8];
} __packed; } __packed;
struct ip_address_format {
u16 size_of_structure;
u8 reserved;
u8 ip_type;
u8 ip_address[16];
u32 rsvd0;
} __packed;
struct tcp_connect_and_offload_in { struct tcp_connect_and_offload_in {
struct be_cmd_req_hdr hdr; struct be_cmd_req_hdr hdr;
struct ip_address_format ip_address; struct ip_address_format ip_address;
@ -688,18 +790,29 @@ struct be_fw_cfg {
u32 function_caps; u32 function_caps;
} __packed; } __packed;
#define CMD_ISCSI_COMMAND_INVALIDATE 1 struct be_all_if_id {
struct be_cmd_req_hdr hdr;
u32 if_count;
u32 if_hndl_list[1];
} __packed;
#define ISCSI_OPCODE_SCSI_DATA_OUT 5 #define ISCSI_OPCODE_SCSI_DATA_OUT 5
#define OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD 70
#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41
#define OPCODE_COMMON_MODIFY_EQ_DELAY 41 #define OPCODE_COMMON_MODIFY_EQ_DELAY 41
#define OPCODE_COMMON_ISCSI_CLEANUP 59 #define OPCODE_COMMON_ISCSI_CLEANUP 59
#define OPCODE_COMMON_TCP_UPLOAD 56 #define OPCODE_COMMON_TCP_UPLOAD 56
#define OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD 70
#define OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS 1 #define OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS 1
#define OPCODE_ISCSI_INI_CFG_GET_HBA_NAME 6
#define OPCODE_ISCSI_INI_CFG_SET_HBA_NAME 7
#define OPCODE_ISCSI_INI_SESSION_GET_A_SESSION 14
#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41
#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42
#define OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET 52
/* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */ /* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */
#define CMD_ISCSI_COMMAND_INVALIDATE 1
#define CMD_ISCSI_CONNECTION_INVALIDATE 0x8001 #define CMD_ISCSI_CONNECTION_INVALIDATE 0x8001
#define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 0x8002 #define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 0x8002
#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42
#define INI_WR_CMD 1 /* Initiator write command */ #define INI_WR_CMD 1 /* Initiator write command */
#define INI_TMF_CMD 2 /* Initiator TMF command */ #define INI_TMF_CMD 2 /* Initiator TMF command */

View File

@ -300,20 +300,41 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf) enum iscsi_host_param param, char *buf)
{ {
struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
struct be_cmd_resp_get_mac_addr *resp;
struct be_mcc_wrb *wrb;
unsigned int tag, wrb_num;
int len = 0; int len = 0;
unsigned short status, extd_status; int status;
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param); SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
switch (param) { switch (param) {
case ISCSI_HOST_PARAM_HWADDRESS: case ISCSI_HOST_PARAM_HWADDRESS:
status = beiscsi_get_macaddr(buf, phba);
if (status < 0) {
SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n");
return status;
}
break;
default:
return iscsi_host_get_param(shost, param, buf);
}
return len;
}
int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
{
struct be_cmd_resp_get_mac_addr *resp;
struct be_mcc_wrb *wrb;
unsigned int tag, wrb_num;
unsigned short status, extd_status;
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
int rc;
if (phba->read_mac_address)
return sysfs_format_mac(buf, phba->mac_address,
ETH_ALEN);
tag = be_cmd_get_mac_addr(phba); tag = be_cmd_get_mac_addr(phba);
if (!tag) { if (!tag) {
SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n"); SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
return -EAGAIN; return -EBUSY;
} else } else
wait_event_interruptible(phba->ctrl.mcc_wait[tag], wait_event_interruptible(phba->ctrl.mcc_wait[tag],
phba->ctrl.mcc_numtag[tag]); phba->ctrl.mcc_numtag[tag]);
@ -322,26 +343,23 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
if (status || extd_status) { if (status || extd_status) {
SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed" SE_DEBUG(DBG_LVL_1, "Failed to get be_cmd_get_mac_addr"
" status = %d extd_status = %d\n", " status = %d extd_status = %d\n",
status, extd_status); status, extd_status);
free_mcc_tag(&phba->ctrl, tag); free_mcc_tag(&phba->ctrl, tag);
return -EAGAIN; return -EAGAIN;
} else { }
wrb = queue_get_wrb(mccq, wrb_num); wrb = queue_get_wrb(mccq, wrb_num);
free_mcc_tag(&phba->ctrl, tag); free_mcc_tag(&phba->ctrl, tag);
resp = embedded_payload(wrb); resp = embedded_payload(wrb);
memcpy(phba->mac_address, resp->mac_address, ETH_ALEN); memcpy(phba->mac_address, resp->mac_address, ETH_ALEN);
len = sysfs_format_mac(buf, phba->mac_address, rc = sysfs_format_mac(buf, phba->mac_address,
ETH_ALEN); ETH_ALEN);
} phba->read_mac_address = 1;
break; return rc;
default:
return iscsi_host_get_param(shost, param, buf);
}
return len;
} }
/** /**
* beiscsi_conn_get_stats - get the iscsi stats * beiscsi_conn_get_stats - get the iscsi stats
* @cls_conn: pointer to iscsi cls conn * @cls_conn: pointer to iscsi cls conn

View File

@ -54,6 +54,8 @@ int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
int beiscsi_get_host_param(struct Scsi_Host *shost, int beiscsi_get_host_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf); enum iscsi_host_param param, char *buf);
int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba);
int beiscsi_set_param(struct iscsi_cls_conn *cls_conn, int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf, int buflen); enum iscsi_param param, char *buf, int buflen);

View File

@ -26,6 +26,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/semaphore.h> #include <linux/semaphore.h>
#include <linux/iscsi_boot_sysfs.h>
#include <scsi/libiscsi.h> #include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h> #include <scsi/scsi_transport_iscsi.h>
@ -211,6 +212,218 @@ unlock:
return rc; return rc;
} }
static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
{
struct beiscsi_hba *phba = data;
char *str = buf;
int rc;
switch (type) {
case ISCSI_BOOT_TGT_NAME:
rc = sprintf(buf, "%.*s\n",
(int)strlen(phba->boot_sess.target_name),
(char *)&phba->boot_sess.target_name);
break;
case ISCSI_BOOT_TGT_IP_ADDR:
if (phba->boot_sess.conn_list[0].dest_ipaddr.ip_type == 0x1)
rc = sprintf(buf, "%pI4\n",
(char *)&phba->boot_sess.conn_list[0].
dest_ipaddr.ip_address);
else
rc = sprintf(str, "%pI6\n",
(char *)&phba->boot_sess.conn_list[0].
dest_ipaddr.ip_address);
break;
case ISCSI_BOOT_TGT_PORT:
rc = sprintf(str, "%d\n", phba->boot_sess.conn_list[0].
dest_port);
break;
case ISCSI_BOOT_TGT_CHAP_NAME:
rc = sprintf(str, "%.*s\n",
phba->boot_sess.conn_list[0].
negotiated_login_options.auth_data.chap.
target_chap_name_length,
(char *)&phba->boot_sess.conn_list[0].
negotiated_login_options.auth_data.chap.
target_chap_name);
break;
case ISCSI_BOOT_TGT_CHAP_SECRET:
rc = sprintf(str, "%.*s\n",
phba->boot_sess.conn_list[0].
negotiated_login_options.auth_data.chap.
target_secret_length,
(char *)&phba->boot_sess.conn_list[0].
negotiated_login_options.auth_data.chap.
target_secret);
break;
case ISCSI_BOOT_TGT_REV_CHAP_NAME:
rc = sprintf(str, "%.*s\n",
phba->boot_sess.conn_list[0].
negotiated_login_options.auth_data.chap.
intr_chap_name_length,
(char *)&phba->boot_sess.conn_list[0].
negotiated_login_options.auth_data.chap.
intr_chap_name);
break;
case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
rc = sprintf(str, "%.*s\n",
phba->boot_sess.conn_list[0].
negotiated_login_options.auth_data.chap.
intr_secret_length,
(char *)&phba->boot_sess.conn_list[0].
negotiated_login_options.auth_data.chap.
intr_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
rc = sprintf(str, "2\n");
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
rc = sprintf(str, "0\n");
break;
default:
rc = -ENOSYS;
break;
}
return rc;
}
static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
{
struct beiscsi_hba *phba = data;
char *str = buf;
int rc;
switch (type) {
case ISCSI_BOOT_INI_INITIATOR_NAME:
rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
break;
default:
rc = -ENOSYS;
break;
}
return rc;
}
static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
{
struct beiscsi_hba *phba = data;
char *str = buf;
int rc;
switch (type) {
case ISCSI_BOOT_ETH_FLAGS:
rc = sprintf(str, "2\n");
break;
case ISCSI_BOOT_ETH_INDEX:
rc = sprintf(str, "0\n");
break;
case ISCSI_BOOT_ETH_MAC:
rc = beiscsi_get_macaddr(buf, phba);
if (rc < 0) {
SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n");
return rc;
}
break;
default:
rc = -ENOSYS;
break;
}
return rc;
}
static mode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
{
int rc;
switch (type) {
case ISCSI_BOOT_TGT_NAME:
case ISCSI_BOOT_TGT_IP_ADDR:
case ISCSI_BOOT_TGT_PORT:
case ISCSI_BOOT_TGT_CHAP_NAME:
case ISCSI_BOOT_TGT_CHAP_SECRET:
case ISCSI_BOOT_TGT_REV_CHAP_NAME:
case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
case ISCSI_BOOT_TGT_NIC_ASSOC:
case ISCSI_BOOT_TGT_FLAGS:
rc = S_IRUGO;
break;
default:
rc = 0;
break;
}
return rc;
}
static mode_t beiscsi_ini_get_attr_visibility(void *data, int type)
{
int rc;
switch (type) {
case ISCSI_BOOT_INI_INITIATOR_NAME:
rc = S_IRUGO;
break;
default:
rc = 0;
break;
}
return rc;
}
static mode_t beiscsi_eth_get_attr_visibility(void *data, int type)
{
int rc;
switch (type) {
case ISCSI_BOOT_ETH_FLAGS:
case ISCSI_BOOT_ETH_MAC:
case ISCSI_BOOT_ETH_INDEX:
rc = S_IRUGO;
break;
default:
rc = 0;
break;
}
return rc;
}
static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
{
struct iscsi_boot_kobj *boot_kobj;
phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
if (!phba->boot_kset)
return -ENOMEM;
/* get boot info using mgmt cmd */
boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
beiscsi_show_boot_tgt_info,
beiscsi_tgt_get_attr_visibility);
if (!boot_kobj)
goto free_kset;
boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
beiscsi_show_boot_ini_info,
beiscsi_ini_get_attr_visibility);
if (!boot_kobj)
goto free_kset;
boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
beiscsi_show_boot_eth_info,
beiscsi_eth_get_attr_visibility);
if (!boot_kobj)
goto free_kset;
return 0;
free_kset:
iscsi_boot_destroy_kset(phba->boot_kset);
return -ENOMEM;
}
/*------------------- PCI Driver operations and data ----------------- */ /*------------------- PCI Driver operations and data ----------------- */
static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@ -268,6 +481,15 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
if (iscsi_host_add(shost, &phba->pcidev->dev)) if (iscsi_host_add(shost, &phba->pcidev->dev))
goto free_devices; goto free_devices;
if (beiscsi_setup_boot_info(phba))
/*
* log error but continue, because we may not be using
* iscsi boot.
*/
shost_printk(KERN_ERR, phba->shost, "Could not set up "
"iSCSI boot info.");
return phba; return phba;
free_devices: free_devices:
@ -3279,6 +3501,89 @@ static void hwi_disable_intr(struct beiscsi_hba *phba)
"In hwi_disable_intr, Already Disabled\n"); "In hwi_disable_intr, Already Disabled\n");
} }
static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
{
struct be_cmd_resp_get_boot_target *boot_resp;
struct be_cmd_resp_get_session *session_resp;
struct be_mcc_wrb *wrb;
struct be_dma_mem nonemb_cmd;
unsigned int tag, wrb_num;
unsigned short status, extd_status;
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
tag = beiscsi_get_boot_target(phba);
if (!tag) {
SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
return -EAGAIN;
} else
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
phba->ctrl.mcc_numtag[tag]);
wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
if (status || extd_status) {
SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
" status = %d extd_status = %d\n",
status, extd_status);
free_mcc_tag(&phba->ctrl, tag);
return -EBUSY;
}
wrb = queue_get_wrb(mccq, wrb_num);
free_mcc_tag(&phba->ctrl, tag);
boot_resp = embedded_payload(wrb);
if (boot_resp->boot_session_handle < 0) {
printk(KERN_ERR "No Boot Session for this pci_func,"
"session Hndl = %d\n", boot_resp->boot_session_handle);
return -ENXIO;
}
nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
sizeof(*session_resp),
&nonemb_cmd.dma);
if (nonemb_cmd.va == NULL) {
SE_DEBUG(DBG_LVL_1,
"Failed to allocate memory for"
"beiscsi_get_session_info\n");
return -ENOMEM;
}
memset(nonemb_cmd.va, 0, sizeof(*session_resp));
tag = beiscsi_get_session_info(phba,
boot_resp->boot_session_handle, &nonemb_cmd);
if (!tag) {
SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info"
" Failed\n");
goto boot_freemem;
} else
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
phba->ctrl.mcc_numtag[tag]);
wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
if (status || extd_status) {
SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info Failed"
" status = %d extd_status = %d\n",
status, extd_status);
free_mcc_tag(&phba->ctrl, tag);
goto boot_freemem;
}
wrb = queue_get_wrb(mccq, wrb_num);
free_mcc_tag(&phba->ctrl, tag);
session_resp = nonemb_cmd.va ;
memcpy(&phba->boot_sess, &session_resp->session_info,
sizeof(struct mgmt_session_info));
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return 0;
boot_freemem:
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return -ENOMEM;
}
static int beiscsi_init_port(struct beiscsi_hba *phba) static int beiscsi_init_port(struct beiscsi_hba *phba)
{ {
int ret; int ret;
@ -3841,6 +4146,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
iscsi_host_remove(phba->shost); iscsi_host_remove(phba->shost);
pci_dev_put(phba->pcidev); pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost); iscsi_host_free(phba->shost);
iscsi_boot_destroy_kset(phba->boot_kset);
} }
static void beiscsi_msix_enable(struct beiscsi_hba *phba) static void beiscsi_msix_enable(struct beiscsi_hba *phba)
@ -3996,6 +4302,11 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
goto free_blkenbld; goto free_blkenbld;
} }
hwi_enable_intr(phba); hwi_enable_intr(phba);
ret = beiscsi_get_boot_info(phba);
if (ret < 0) {
shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
"No Boot Devices !!!!!\n");
}
SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n"); SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
return 0; return 0;

View File

@ -35,7 +35,7 @@
#include "be.h" #include "be.h"
#define DRV_NAME "be2iscsi" #define DRV_NAME "be2iscsi"
#define BUILD_STR "2.0.527.0" #define BUILD_STR "2.0.549.0"
#define BE_NAME "ServerEngines BladeEngine2" \ #define BE_NAME "ServerEngines BladeEngine2" \
"Linux iSCSI Driver version" BUILD_STR "Linux iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver" #define DRV_DESC BE_NAME " " "Driver"
@ -63,7 +63,7 @@
#define BEISCSI_SGLIST_ELEMENTS 30 #define BEISCSI_SGLIST_ELEMENTS 30
#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ #define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
#define BEISCSI_MAX_SECTORS 256 /* scsi_host->max_sectors */ #define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */
#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ #define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */ #define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */
@ -312,6 +312,7 @@ struct beiscsi_hba {
struct list_head hba_queue; struct list_head hba_queue;
unsigned short *cid_array; unsigned short *cid_array;
struct iscsi_endpoint **ep_array; struct iscsi_endpoint **ep_array;
struct iscsi_boot_kset *boot_kset;
struct Scsi_Host *shost; struct Scsi_Host *shost;
struct { struct {
/** /**
@ -342,6 +343,8 @@ struct beiscsi_hba {
struct work_struct work_cqs; /* The work being queued */ struct work_struct work_cqs; /* The work being queued */
struct be_ctrl_info ctrl; struct be_ctrl_info ctrl;
unsigned int generation; unsigned int generation;
unsigned int read_mac_address;
struct mgmt_session_info boot_sess;
struct invalidate_command_table inv_tbl[128]; struct invalidate_command_table inv_tbl[128];
}; };

View File

@ -20,6 +20,77 @@
#include "be_mgmt.h" #include "be_mgmt.h"
#include "be_iscsi.h" #include "be_iscsi.h"
#include <scsi/scsi_transport_iscsi.h>
unsigned int beiscsi_get_boot_target(struct beiscsi_hba *phba)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_mac_addr *req;
unsigned int tag = 0;
SE_DEBUG(DBG_LVL_8, "In bescsi_get_boot_target\n");
spin_lock(&ctrl->mbox_lock);
tag = alloc_mcc_tag(phba);
if (!tag) {
spin_unlock(&ctrl->mbox_lock);
return tag;
}
wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET,
sizeof(*req));
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
return tag;
}
unsigned int beiscsi_get_session_info(struct beiscsi_hba *phba,
u32 boot_session_handle,
struct be_dma_mem *nonemb_cmd)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
unsigned int tag = 0;
struct be_cmd_req_get_session *req;
struct be_cmd_resp_get_session *resp;
struct be_sge *sge;
SE_DEBUG(DBG_LVL_8, "In beiscsi_get_session_info\n");
spin_lock(&ctrl->mbox_lock);
tag = alloc_mcc_tag(phba);
if (!tag) {
spin_unlock(&ctrl->mbox_lock);
return tag;
}
nonemb_cmd->size = sizeof(*resp);
req = nonemb_cmd->va;
memset(req, 0, sizeof(*req));
wrb = wrb_from_mccq(phba);
sge = nonembedded_sgl(wrb);
wrb->tag0 |= tag;
wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_SESSION_GET_A_SESSION,
sizeof(*resp));
req->session_handle = boot_session_handle;
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(nonemb_cmd->size);
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
return tag;
}
int mgmt_get_fw_config(struct be_ctrl_info *ctrl, int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba) struct beiscsi_hba *phba)

View File

@ -433,6 +433,9 @@ static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
{ {
switch (tgt->action) { switch (tgt->action) {
case IBMVFC_TGT_ACTION_DEL_RPORT: case IBMVFC_TGT_ACTION_DEL_RPORT:
if (action == IBMVFC_TGT_ACTION_DELETED_RPORT)
tgt->action = action;
case IBMVFC_TGT_ACTION_DELETED_RPORT:
break; break;
default: default:
if (action == IBMVFC_TGT_ACTION_DEL_RPORT) if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
@ -2036,178 +2039,22 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
} }
/** /**
* ibmvfc_abort_task_set - Abort outstanding commands to the device * ibmvfc_match_rport - Match function for specified remote port
* @sdev: scsi device to abort commands * @evt: ibmvfc event struct
* * @device: device to match (rport)
* This sends an Abort Task Set to the VIOS for the specified device. This does
* NOT send any cancel to the VIOS. That must be done separately.
* *
* Returns: * Returns:
* 0 on success / other on failure * 1 if event matches rport / 0 if event does not match rport
**/ **/
static int ibmvfc_abort_task_set(struct scsi_device *sdev) static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
{ {
struct ibmvfc_host *vhost = shost_priv(sdev->host); struct fc_rport *cmd_rport;
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct ibmvfc_cmd *tmf;
struct ibmvfc_event *evt, *found_evt;
union ibmvfc_iu rsp_iu;
struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
int rsp_rc = -EBUSY;
unsigned long flags;
int rsp_code = 0;
spin_lock_irqsave(vhost->host->host_lock, flags); if (evt->cmnd) {
found_evt = NULL; cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
list_for_each_entry(evt, &vhost->sent, queue) { if (cmd_rport == rport)
if (evt->cmnd && evt->cmnd->device == sdev) { return 1;
found_evt = evt;
break;
} }
}
if (!found_evt) {
if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return 0;
}
if (vhost->state == IBMVFC_ACTIVE) {
evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
tmf = &evt->iu.cmd;
memset(tmf, 0, sizeof(*tmf));
tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
tmf->resp.len = sizeof(tmf->rsp);
tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
tmf->payload_len = sizeof(tmf->iu);
tmf->resp_len = sizeof(tmf->rsp);
tmf->cancel_key = (unsigned long)sdev->hostdata;
tmf->tgt_scsi_id = rport->port_id;
int_to_scsilun(sdev->lun, &tmf->iu.lun);
tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
evt->sync_iu = &rsp_iu;
init_completion(&evt->comp);
rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rsp_rc != 0) {
sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
return -EIO;
}
sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
wait_for_completion(&evt->comp);
if (rsp_iu.cmd.status)
rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
if (rsp_code) {
if (fc_rsp->flags & FCP_RSP_LEN_VALID)
rsp_code = fc_rsp->data.info.rsp_code;
sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
"flags: %x fcp_rsp: %x, scsi_status: %x\n",
ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
fc_rsp->scsi_status);
rsp_rc = -EIO;
} else
sdev_printk(KERN_INFO, sdev, "Abort successful\n");
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_free_event(evt);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return rsp_rc;
}
/**
* ibmvfc_cancel_all - Cancel all outstanding commands to the device
* @sdev: scsi device to cancel commands
* @type: type of error recovery being performed
*
* This sends a cancel to the VIOS for the specified device. This does
* NOT send any abort to the actual device. That must be done separately.
*
* Returns:
* 0 on success / other on failure
**/
static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
{
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct scsi_target *starget = scsi_target(sdev);
struct fc_rport *rport = starget_to_rport(starget);
struct ibmvfc_tmf *tmf;
struct ibmvfc_event *evt, *found_evt;
union ibmvfc_iu rsp;
int rsp_rc = -EBUSY;
unsigned long flags;
u16 status;
ENTER;
spin_lock_irqsave(vhost->host->host_lock, flags);
found_evt = NULL;
list_for_each_entry(evt, &vhost->sent, queue) {
if (evt->cmnd && evt->cmnd->device == sdev) {
found_evt = evt;
break;
}
}
if (!found_evt) {
if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return 0;
}
if (vhost->state == IBMVFC_ACTIVE) {
evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
tmf = &evt->iu.tmf;
memset(tmf, 0, sizeof(*tmf));
tmf->common.version = 1;
tmf->common.opcode = IBMVFC_TMF_MAD;
tmf->common.length = sizeof(*tmf);
tmf->scsi_id = rport->port_id;
int_to_scsilun(sdev->lun, &tmf->lun);
tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
tmf->cancel_key = (unsigned long)sdev->hostdata;
tmf->my_cancel_key = (unsigned long)starget->hostdata;
evt->sync_iu = &rsp;
init_completion(&evt->comp);
rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rsp_rc != 0) {
sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
return -EIO;
}
sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
wait_for_completion(&evt->comp);
status = rsp.mad_common.status;
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_free_event(evt);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (status != IBMVFC_MAD_SUCCESS) {
sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
return -EIO;
}
sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
return 0; return 0;
} }
@ -2296,6 +2143,217 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
return SUCCESS; return SUCCESS;
} }
/**
* ibmvfc_cancel_all - Cancel all outstanding commands to the device
* @sdev: scsi device to cancel commands
* @type: type of error recovery being performed
*
* This sends a cancel to the VIOS for the specified device. This does
* NOT send any abort to the actual device. That must be done separately.
*
* Returns:
* 0 on success / other on failure
**/
static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
{
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct scsi_target *starget = scsi_target(sdev);
struct fc_rport *rport = starget_to_rport(starget);
struct ibmvfc_tmf *tmf;
struct ibmvfc_event *evt, *found_evt;
union ibmvfc_iu rsp;
int rsp_rc = -EBUSY;
unsigned long flags;
u16 status;
ENTER;
spin_lock_irqsave(vhost->host->host_lock, flags);
found_evt = NULL;
list_for_each_entry(evt, &vhost->sent, queue) {
if (evt->cmnd && evt->cmnd->device == sdev) {
found_evt = evt;
break;
}
}
if (!found_evt) {
if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return 0;
}
if (vhost->state == IBMVFC_ACTIVE) {
evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
tmf = &evt->iu.tmf;
memset(tmf, 0, sizeof(*tmf));
tmf->common.version = 1;
tmf->common.opcode = IBMVFC_TMF_MAD;
tmf->common.length = sizeof(*tmf);
tmf->scsi_id = rport->port_id;
int_to_scsilun(sdev->lun, &tmf->lun);
tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
tmf->cancel_key = (unsigned long)sdev->hostdata;
tmf->my_cancel_key = (unsigned long)starget->hostdata;
evt->sync_iu = &rsp;
init_completion(&evt->comp);
rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rsp_rc != 0) {
sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
return -EIO;
}
sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
wait_for_completion(&evt->comp);
status = rsp.mad_common.status;
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_free_event(evt);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (status != IBMVFC_MAD_SUCCESS) {
sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
return -EIO;
}
sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
return 0;
}
/**
* ibmvfc_match_key - Match function for specified cancel key
* @evt: ibmvfc event struct
* @key: cancel key to match
*
* Returns:
* 1 if event matches key / 0 if event does not match key
**/
static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
{
unsigned long cancel_key = (unsigned long)key;
if (evt->crq.format == IBMVFC_CMD_FORMAT &&
evt->iu.cmd.cancel_key == cancel_key)
return 1;
return 0;
}
/**
* ibmvfc_abort_task_set - Abort outstanding commands to the device
* @sdev: scsi device to abort commands
*
* This sends an Abort Task Set to the VIOS for the specified device. This does
* NOT send any cancel to the VIOS. That must be done separately.
*
* Returns:
* 0 on success / other on failure
**/
static int ibmvfc_abort_task_set(struct scsi_device *sdev)
{
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct ibmvfc_cmd *tmf;
struct ibmvfc_event *evt, *found_evt;
union ibmvfc_iu rsp_iu;
struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
int rc, rsp_rc = -EBUSY;
unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
int rsp_code = 0;
spin_lock_irqsave(vhost->host->host_lock, flags);
found_evt = NULL;
list_for_each_entry(evt, &vhost->sent, queue) {
if (evt->cmnd && evt->cmnd->device == sdev) {
found_evt = evt;
break;
}
}
if (!found_evt) {
if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return 0;
}
if (vhost->state == IBMVFC_ACTIVE) {
evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
tmf = &evt->iu.cmd;
memset(tmf, 0, sizeof(*tmf));
tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
tmf->resp.len = sizeof(tmf->rsp);
tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
tmf->payload_len = sizeof(tmf->iu);
tmf->resp_len = sizeof(tmf->rsp);
tmf->cancel_key = (unsigned long)sdev->hostdata;
tmf->tgt_scsi_id = rport->port_id;
int_to_scsilun(sdev->lun, &tmf->iu.lun);
tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
evt->sync_iu = &rsp_iu;
init_completion(&evt->comp);
rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rsp_rc != 0) {
sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
return -EIO;
}
sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
timeout = wait_for_completion_timeout(&evt->comp, timeout);
if (!timeout) {
rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
if (!rc) {
rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
if (rc == SUCCESS)
rc = 0;
}
if (rc) {
sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
ibmvfc_reset_host(vhost);
rsp_rc = 0;
goto out;
}
}
if (rsp_iu.cmd.status)
rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
if (rsp_code) {
if (fc_rsp->flags & FCP_RSP_LEN_VALID)
rsp_code = fc_rsp->data.info.rsp_code;
sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
"flags: %x fcp_rsp: %x, scsi_status: %x\n",
ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
fc_rsp->scsi_status);
rsp_rc = -EIO;
} else
sdev_printk(KERN_INFO, sdev, "Abort successful\n");
out:
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_free_event(evt);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return rsp_rc;
}
/** /**
* ibmvfc_eh_abort_handler - Abort a command * ibmvfc_eh_abort_handler - Abort a command
* @cmd: scsi command to abort * @cmd: scsi command to abort
@ -2350,18 +2408,6 @@ static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
return rc; return rc;
} }
/**
* ibmvfc_dev_cancel_all_abts - Device iterated cancel all function
* @sdev: scsi device struct
* @data: return code
*
**/
static void ibmvfc_dev_cancel_all_abts(struct scsi_device *sdev, void *data)
{
unsigned long *rc = data;
*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
}
/** /**
* ibmvfc_dev_cancel_all_reset - Device iterated cancel all function * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
* @sdev: scsi device struct * @sdev: scsi device struct
@ -2374,18 +2420,6 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET); *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
} }
/**
* ibmvfc_dev_abort_all - Device iterated abort task set function
* @sdev: scsi device struct
* @data: return code
*
**/
static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data)
{
unsigned long *rc = data;
*rc |= ibmvfc_abort_task_set(sdev);
}
/** /**
* ibmvfc_eh_target_reset_handler - Reset the target * ibmvfc_eh_target_reset_handler - Reset the target
* @cmd: scsi command struct * @cmd: scsi command struct
@ -2440,19 +2474,22 @@ static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
**/ **/
static void ibmvfc_terminate_rport_io(struct fc_rport *rport) static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
{ {
struct scsi_target *starget = to_scsi_target(&rport->dev); struct Scsi_Host *shost = rport_to_shost(rport);
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ibmvfc_host *vhost = shost_priv(shost); struct ibmvfc_host *vhost = shost_priv(shost);
unsigned long cancel_rc = 0; struct fc_rport *dev_rport;
unsigned long abort_rc = 0; struct scsi_device *sdev;
int rc = FAILED; unsigned long rc;
ENTER; ENTER;
starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_abts); shost_for_each_device(sdev, shost) {
starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all); dev_rport = starget_to_rport(scsi_target(sdev));
if (dev_rport != rport)
continue;
ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
ibmvfc_abort_task_set(sdev);
}
if (!cancel_rc && !abort_rc) rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
if (rc == FAILED) if (rc == FAILED)
ibmvfc_issue_fc_host_lip(shost); ibmvfc_issue_fc_host_lip(shost);
@ -4193,11 +4230,15 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) { if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
tgt_dbg(tgt, "Deleting rport\n"); tgt_dbg(tgt, "Deleting rport\n");
list_del(&tgt->queue); list_del(&tgt->queue);
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
spin_unlock_irqrestore(vhost->host->host_lock, flags); spin_unlock_irqrestore(vhost->host->host_lock, flags);
fc_remote_port_delete(rport); fc_remote_port_delete(rport);
del_timer_sync(&tgt->timer); del_timer_sync(&tgt->timer);
kref_put(&tgt->kref, ibmvfc_release_tgt); kref_put(&tgt->kref, ibmvfc_release_tgt);
return; return;
} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return;
} }
if (rport) { if (rport) {
@ -4297,6 +4338,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
rport = tgt->rport; rport = tgt->rport;
tgt->rport = NULL; tgt->rport = NULL;
list_del(&tgt->queue); list_del(&tgt->queue);
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
spin_unlock_irqrestore(vhost->host->host_lock, flags); spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rport) if (rport)
fc_remote_port_delete(rport); fc_remote_port_delete(rport);

View File

@ -29,8 +29,8 @@
#include "viosrp.h" #include "viosrp.h"
#define IBMVFC_NAME "ibmvfc" #define IBMVFC_NAME "ibmvfc"
#define IBMVFC_DRIVER_VERSION "1.0.8" #define IBMVFC_DRIVER_VERSION "1.0.9"
#define IBMVFC_DRIVER_DATE "(June 17, 2010)" #define IBMVFC_DRIVER_DATE "(August 5, 2010)"
#define IBMVFC_DEFAULT_TIMEOUT 60 #define IBMVFC_DEFAULT_TIMEOUT 60
#define IBMVFC_ADISC_CANCEL_TIMEOUT 45 #define IBMVFC_ADISC_CANCEL_TIMEOUT 45
@ -38,6 +38,7 @@
#define IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT \ #define IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT \
(IBMVFC_ADISC_TIMEOUT + IBMVFC_ADISC_CANCEL_TIMEOUT) (IBMVFC_ADISC_TIMEOUT + IBMVFC_ADISC_CANCEL_TIMEOUT)
#define IBMVFC_INIT_TIMEOUT 120 #define IBMVFC_INIT_TIMEOUT 120
#define IBMVFC_ABORT_TIMEOUT 8
#define IBMVFC_ABORT_WAIT_TIMEOUT 40 #define IBMVFC_ABORT_WAIT_TIMEOUT 40
#define IBMVFC_MAX_REQUESTS_DEFAULT 100 #define IBMVFC_MAX_REQUESTS_DEFAULT 100
@ -597,6 +598,7 @@ enum ibmvfc_target_action {
IBMVFC_TGT_ACTION_INIT, IBMVFC_TGT_ACTION_INIT,
IBMVFC_TGT_ACTION_INIT_WAIT, IBMVFC_TGT_ACTION_INIT_WAIT,
IBMVFC_TGT_ACTION_DEL_RPORT, IBMVFC_TGT_ACTION_DEL_RPORT,
IBMVFC_TGT_ACTION_DELETED_RPORT,
}; };
struct ibmvfc_target { struct ibmvfc_target {

View File

@ -1765,14 +1765,14 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
struct fcoe_dev_stats *stats; struct fcoe_dev_stats *stats;
lport = shost_priv(sc_cmd->device->host); lport = shost_priv(sc_cmd->device->host);
spin_unlock_irq(lport->host->host_lock);
rval = fc_remote_port_chkready(rport); rval = fc_remote_port_chkready(rport);
if (rval) { if (rval) {
sc_cmd->result = rval; sc_cmd->result = rval;
done(sc_cmd); done(sc_cmd);
goto out; return 0;
} }
spin_unlock_irq(lport->host->host_lock);
if (!*(struct fc_remote_port **)rport->dd_data) { if (!*(struct fc_remote_port **)rport->dd_data) {
/* /*

View File

@ -775,6 +775,7 @@ struct lpfc_hba {
uint8_t temp_sensor_support; uint8_t temp_sensor_support;
/* Fields used for heart beat. */ /* Fields used for heart beat. */
unsigned long last_completion_time; unsigned long last_completion_time;
unsigned long skipped_hb;
struct timer_list hb_tmofunc; struct timer_list hb_tmofunc;
uint8_t hb_outstanding; uint8_t hb_outstanding;
enum hba_temp_state over_temp_state; enum hba_temp_state over_temp_state;
@ -817,6 +818,8 @@ struct lpfc_hba {
uint32_t iocb_cnt; uint32_t iocb_cnt;
uint32_t iocb_max; uint32_t iocb_max;
atomic_t sdev_cnt; atomic_t sdev_cnt;
uint8_t fips_spec_rev;
uint8_t fips_level;
}; };
static inline struct Scsi_Host * static inline struct Scsi_Host *

View File

@ -1239,6 +1239,44 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
return strlen(buf); return strlen(buf);
} }
/**
* lpfc_fips_level_show - Return the current FIPS level for the HBA
* @dev: class unused variable.
* @attr: device attribute, not used.
* @buf: on return contains the module description text.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_fips_level_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
}
/**
* lpfc_fips_rev_show - Return the FIPS Spec revision for the HBA
* @dev: class unused variable.
* @attr: device attribute, not used.
* @buf: on return contains the module description text.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
}
/** /**
* lpfc_param_show - Return a cfg attribute value in decimal * lpfc_param_show - Return a cfg attribute value in decimal
* *
@ -1677,6 +1715,8 @@ static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL); static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL); static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL); static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A"; static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@ -3278,7 +3318,7 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
# - Default will result in registering capabilities for all profiles. # - Default will result in registering capabilities for all profiles.
# #
*/ */
unsigned int lpfc_prot_mask = SHOST_DIX_TYPE0_PROTECTION; unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION;
module_param(lpfc_prot_mask, uint, 0); module_param(lpfc_prot_mask, uint, 0);
MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
@ -3383,6 +3423,8 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_iocb_hw, &dev_attr_iocb_hw,
&dev_attr_txq_hw, &dev_attr_txq_hw,
&dev_attr_txcmplq_hw, &dev_attr_txcmplq_hw,
&dev_attr_lpfc_fips_level,
&dev_attr_lpfc_fips_rev,
NULL, NULL,
}; };
@ -3409,6 +3451,8 @@ struct device_attribute *lpfc_vport_attrs[] = {
&dev_attr_lpfc_max_scsicmpl_time, &dev_attr_lpfc_max_scsicmpl_time,
&dev_attr_lpfc_stat_data_ctrl, &dev_attr_lpfc_stat_data_ctrl,
&dev_attr_lpfc_static_vport, &dev_attr_lpfc_static_vport,
&dev_attr_lpfc_fips_level,
&dev_attr_lpfc_fips_rev,
NULL, NULL,
}; };

View File

@ -2722,15 +2722,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
mbox_req->inExtWLen * sizeof(uint32_t)); mbox_req->inExtWLen * sizeof(uint32_t));
} }
pmboxq->context2 = ext;
pmboxq->in_ext_byte_len =
mbox_req->inExtWLen *
sizeof(uint32_t);
pmboxq->out_ext_byte_len =
mbox_req->outExtWLen *
sizeof(uint32_t);
pmboxq->mbox_offset_word =
mbox_req->mbOffset;
pmboxq->context2 = ext; pmboxq->context2 = ext;
pmboxq->in_ext_byte_len = pmboxq->in_ext_byte_len =
mbox_req->inExtWLen * sizeof(uint32_t); mbox_req->inExtWLen * sizeof(uint32_t);

View File

@ -82,8 +82,7 @@ lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
static inline void static inline void
lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes) lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes)
{ {
/* actually returns 1 byte past dest */ __iowrite32_copy(dest, src, bytes);
memcpy_toio( dest, src, bytes);
} }
static inline void static inline void

View File

@ -600,6 +600,14 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
} }
} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
/*
* Driver needs to re-reg VPI in order for f/w
* to update the MAC address.
*/
lpfc_register_new_vport(phba, vport, ndlp);
return 0;
} }
if (phba->sli_rev < LPFC_SLI_REV4) { if (phba->sli_rev < LPFC_SLI_REV4) {
@ -801,9 +809,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) { (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
"2611 FLOGI failed on registered " "2611 FLOGI failed on registered "
"FCF record fcf_index:%d, trying " "FCF record fcf_index(%d), status: "
"to perform round robin failover\n", "x%x/x%x, tmo:x%x, trying to perform "
phba->fcf.current_rec.fcf_indx); "round robin failover\n",
phba->fcf.current_rec.fcf_indx,
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout);
fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
/* /*
@ -841,6 +852,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
} }
} }
/* FLOGI failure */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"2858 FLOGI failure Status:x%x/x%x TMO:x%x\n",
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout);
/* Check for retry */ /* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out; goto out;
@ -1291,6 +1308,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct serv_parm *sp; struct serv_parm *sp;
uint8_t name[sizeof(struct lpfc_name)]; uint8_t name[sizeof(struct lpfc_name)];
uint32_t rc, keepDID = 0; uint32_t rc, keepDID = 0;
int put_node;
int put_rport;
/* Fabric nodes can have the same WWPN so we don't bother searching /* Fabric nodes can have the same WWPN so we don't bother searching
* by WWPN. Just return the ndlp that was given to us. * by WWPN. Just return the ndlp that was given to us.
@ -1379,6 +1398,28 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Two ndlps cannot have the same did */ /* Two ndlps cannot have the same did */
ndlp->nlp_DID = keepDID; ndlp->nlp_DID = keepDID;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
/* Since we are swapping the ndlp passed in with the new one
* and the did has already been swapped, copy over the
* state and names.
*/
memcpy(&new_ndlp->nlp_portname, &ndlp->nlp_portname,
sizeof(struct lpfc_name));
memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename,
sizeof(struct lpfc_name));
new_ndlp->nlp_state = ndlp->nlp_state;
/* Fix up the rport accordingly */
rport = ndlp->rport;
if (rport) {
rdata = rport->dd_data;
put_node = rdata->pnode != NULL;
put_rport = ndlp->rport != NULL;
rdata->pnode = NULL;
ndlp->rport = NULL;
if (put_node)
lpfc_nlp_put(ndlp);
if (put_rport)
put_device(&rport->dev);
}
} }
return new_ndlp; return new_ndlp;
} }
@ -2880,6 +2921,17 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
retry = 0; retry = 0;
if (retry) { if (retry) {
if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
/* Stop retrying PLOGI and FDISC if in FCF discovery */
if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2849 Stop retry ELS command "
"x%x to remote NPORT x%x, "
"Data: x%x x%x\n", cmd, did,
cmdiocb->retry, delay);
return 0;
}
}
/* Retry ELS command <elsCmd> to remote NPORT <did> */ /* Retry ELS command <elsCmd> to remote NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@ -6076,8 +6128,12 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (mb->mbxStatus) { if (mb->mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
"0915 Register VPI failed: 0x%x\n", "0915 Register VPI failed : Status: x%x"
mb->mbxStatus); " upd bit: x%x \n", mb->mbxStatus,
mb->un.varRegVpi.upd);
if (phba->sli_rev == LPFC_SLI_REV4 &&
mb->un.varRegVpi.upd)
goto mbox_err_exit ;
switch (mb->mbxStatus) { switch (mb->mbxStatus) {
case 0x11: /* unsupported feature */ case 0x11: /* unsupported feature */
@ -6142,7 +6198,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else } else
lpfc_do_scr_ns_plogi(phba, vport); lpfc_do_scr_ns_plogi(phba, vport);
} }
mbox_err_exit:
/* Now, we decrement the ndlp reference count held for this /* Now, we decrement the ndlp reference count held for this
* callback function * callback function
*/ */
@ -6387,6 +6443,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
else else
vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
/*
* Driver needs to re-reg VPI in order for f/w
* to update the MAC address.
*/
lpfc_register_new_vport(phba, vport, ndlp);
return ;
} }
if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)

View File

@ -588,7 +588,7 @@ lpfc_work_done(struct lpfc_hba *phba)
(status & (status &
HA_RXMASK)); HA_RXMASK));
} }
if (pring->txq_cnt) if ((phba->sli_rev == LPFC_SLI_REV4) && pring->txq_cnt)
lpfc_drain_txq(phba); lpfc_drain_txq(phba);
/* /*
* Turn on Ring interrupts * Turn on Ring interrupts
@ -1852,8 +1852,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
__lpfc_sli4_stop_fcf_redisc_wait_timer(phba); __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
/* If in fast failover, mark it's completed */ /* If in fast failover, mark it's completed */
phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
FCF_DISCOVERY);
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP, lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2836 The new FCF record (x%x) " "2836 The new FCF record (x%x) "
@ -2651,7 +2650,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2778 Start FCF table scan at linkup\n"); "2778 Start FCF table scan at linkup\n");
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
LPFC_FCOE_FCF_GET_FIRST); LPFC_FCOE_FCF_GET_FIRST);
if (rc) { if (rc) {
@ -2660,6 +2658,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
goto out; goto out;
} }
/* Reset FCF roundrobin bmask for new discovery */
memset(phba->fcf.fcf_rr_bmask, 0,
sizeof(*phba->fcf.fcf_rr_bmask));
} }
return; return;
@ -5097,6 +5098,7 @@ static void
lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{ {
struct lpfc_vport *vport = mboxq->vport; struct lpfc_vport *vport = mboxq->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (mboxq->u.mb.mbxStatus) { if (mboxq->u.mb.mbxStatus) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
@ -5104,6 +5106,9 @@ lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
"HBA state x%x\n", "HBA state x%x\n",
mboxq->u.mb.mbxStatus, vport->port_state); mboxq->u.mb.mbxStatus, vport->port_state);
} }
spin_lock_irq(shost->host_lock);
phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
spin_unlock_irq(shost->host_lock);
mempool_free(mboxq, phba->mbox_mem_pool); mempool_free(mboxq, phba->mbox_mem_pool);
return; return;
} }
@ -5285,6 +5290,10 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag |= FCF_INIT_DISC; phba->fcf.fcf_flag |= FCF_INIT_DISC;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
/* Reset FCF roundrobin bmask for new discovery */
memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
if (rc) { if (rc) {

View File

@ -2291,7 +2291,8 @@ typedef struct {
typedef struct { typedef struct {
#ifdef __BIG_ENDIAN_BITFIELD #ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd1; uint32_t rsvd1;
uint32_t rsvd2:8; uint32_t rsvd2:7;
uint32_t upd:1;
uint32_t sid:24; uint32_t sid:24;
uint32_t wwn[2]; uint32_t wwn[2];
uint32_t rsvd5; uint32_t rsvd5;
@ -2300,7 +2301,8 @@ typedef struct {
#else /* __LITTLE_ENDIAN */ #else /* __LITTLE_ENDIAN */
uint32_t rsvd1; uint32_t rsvd1;
uint32_t sid:24; uint32_t sid:24;
uint32_t rsvd2:8; uint32_t upd:1;
uint32_t rsvd2:7;
uint32_t wwn[2]; uint32_t wwn[2];
uint32_t rsvd5; uint32_t rsvd5;
uint16_t vpi; uint16_t vpi;
@ -2806,11 +2808,15 @@ typedef struct {
uint32_t rsvd6; /* Reserved */ uint32_t rsvd6; /* Reserved */
#ifdef __BIG_ENDIAN_BITFIELD #ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd7 : 16; /* Reserved */ uint32_t fips_rev : 3; /* FIPS Spec Revision */
uint32_t fips_level : 4; /* FIPS Level */
uint32_t sec_err : 9; /* security crypto error */
uint32_t max_vpi : 16; /* Max number of virt N-Ports */ uint32_t max_vpi : 16; /* Max number of virt N-Ports */
#else /* __LITTLE_ENDIAN */ #else /* __LITTLE_ENDIAN */
uint32_t max_vpi : 16; /* Max number of virt N-Ports */ uint32_t max_vpi : 16; /* Max number of virt N-Ports */
uint32_t rsvd7 : 16; /* Reserved */ uint32_t sec_err : 9; /* security crypto error */
uint32_t fips_level : 4; /* FIPS Level */
uint32_t fips_rev : 3; /* FIPS Spec Revision */
#endif #endif
} CONFIG_PORT_VAR; } CONFIG_PORT_VAR;
@ -3441,63 +3447,63 @@ struct sli3_bg_fields {
static inline uint32_t static inline uint32_t
lpfc_bgs_get_bidir_bg_prof(uint32_t bgstat) lpfc_bgs_get_bidir_bg_prof(uint32_t bgstat)
{ {
return (le32_to_cpu(bgstat) & BGS_BIDIR_BG_PROF_MASK) >> return (bgstat & BGS_BIDIR_BG_PROF_MASK) >>
BGS_BIDIR_BG_PROF_SHIFT; BGS_BIDIR_BG_PROF_SHIFT;
} }
static inline uint32_t static inline uint32_t
lpfc_bgs_get_bidir_err_cond(uint32_t bgstat) lpfc_bgs_get_bidir_err_cond(uint32_t bgstat)
{ {
return (le32_to_cpu(bgstat) & BGS_BIDIR_ERR_COND_FLAGS_MASK) >> return (bgstat & BGS_BIDIR_ERR_COND_FLAGS_MASK) >>
BGS_BIDIR_ERR_COND_SHIFT; BGS_BIDIR_ERR_COND_SHIFT;
} }
static inline uint32_t static inline uint32_t
lpfc_bgs_get_bg_prof(uint32_t bgstat) lpfc_bgs_get_bg_prof(uint32_t bgstat)
{ {
return (le32_to_cpu(bgstat) & BGS_BG_PROFILE_MASK) >> return (bgstat & BGS_BG_PROFILE_MASK) >>
BGS_BG_PROFILE_SHIFT; BGS_BG_PROFILE_SHIFT;
} }
static inline uint32_t static inline uint32_t
lpfc_bgs_get_invalid_prof(uint32_t bgstat) lpfc_bgs_get_invalid_prof(uint32_t bgstat)
{ {
return (le32_to_cpu(bgstat) & BGS_INVALID_PROF_MASK) >> return (bgstat & BGS_INVALID_PROF_MASK) >>
BGS_INVALID_PROF_SHIFT; BGS_INVALID_PROF_SHIFT;
} }
static inline uint32_t static inline uint32_t
lpfc_bgs_get_uninit_dif_block(uint32_t bgstat) lpfc_bgs_get_uninit_dif_block(uint32_t bgstat)
{ {
return (le32_to_cpu(bgstat) & BGS_UNINIT_DIF_BLOCK_MASK) >> return (bgstat & BGS_UNINIT_DIF_BLOCK_MASK) >>
BGS_UNINIT_DIF_BLOCK_SHIFT; BGS_UNINIT_DIF_BLOCK_SHIFT;
} }
static inline uint32_t static inline uint32_t
lpfc_bgs_get_hi_water_mark_present(uint32_t bgstat) lpfc_bgs_get_hi_water_mark_present(uint32_t bgstat)
{ {
return (le32_to_cpu(bgstat) & BGS_HI_WATER_MARK_PRESENT_MASK) >> return (bgstat & BGS_HI_WATER_MARK_PRESENT_MASK) >>
BGS_HI_WATER_MARK_PRESENT_SHIFT; BGS_HI_WATER_MARK_PRESENT_SHIFT;
} }
static inline uint32_t static inline uint32_t
lpfc_bgs_get_reftag_err(uint32_t bgstat) lpfc_bgs_get_reftag_err(uint32_t bgstat)
{ {
return (le32_to_cpu(bgstat) & BGS_REFTAG_ERR_MASK) >> return (bgstat & BGS_REFTAG_ERR_MASK) >>
BGS_REFTAG_ERR_SHIFT; BGS_REFTAG_ERR_SHIFT;
} }
static inline uint32_t static inline uint32_t
lpfc_bgs_get_apptag_err(uint32_t bgstat) lpfc_bgs_get_apptag_err(uint32_t bgstat)
{ {
return (le32_to_cpu(bgstat) & BGS_APPTAG_ERR_MASK) >> return (bgstat & BGS_APPTAG_ERR_MASK) >>
BGS_APPTAG_ERR_SHIFT; BGS_APPTAG_ERR_SHIFT;
} }
static inline uint32_t static inline uint32_t
lpfc_bgs_get_guard_err(uint32_t bgstat) lpfc_bgs_get_guard_err(uint32_t bgstat)
{ {
return (le32_to_cpu(bgstat) & BGS_GUARD_ERR_MASK) >> return (bgstat & BGS_GUARD_ERR_MASK) >>
BGS_GUARD_ERR_SHIFT; BGS_GUARD_ERR_SHIFT;
} }

View File

@ -1032,27 +1032,46 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
/* If there is no heart beat outstanding, issue a heartbeat command */ /* If there is no heart beat outstanding, issue a heartbeat command */
if (phba->cfg_enable_hba_heartbeat) { if (phba->cfg_enable_hba_heartbeat) {
if (!phba->hb_outstanding) { if (!phba->hb_outstanding) {
pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
(list_empty(&psli->mboxq))) {
pmboxq = mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL);
if (!pmboxq) { if (!pmboxq) {
mod_timer(&phba->hb_tmofunc, mod_timer(&phba->hb_tmofunc,
jiffies + HZ * LPFC_HB_MBOX_INTERVAL); jiffies +
HZ * LPFC_HB_MBOX_INTERVAL);
return; return;
} }
lpfc_heart_beat(phba, pmboxq); lpfc_heart_beat(phba, pmboxq);
pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
pmboxq->vport = phba->pport; pmboxq->vport = phba->pport;
retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); retval = lpfc_sli_issue_mbox(phba, pmboxq,
MBX_NOWAIT);
if (retval != MBX_BUSY && retval != MBX_SUCCESS) { if (retval != MBX_BUSY &&
mempool_free(pmboxq, phba->mbox_mem_pool); retval != MBX_SUCCESS) {
mempool_free(pmboxq,
phba->mbox_mem_pool);
mod_timer(&phba->hb_tmofunc, mod_timer(&phba->hb_tmofunc,
jiffies + HZ * LPFC_HB_MBOX_INTERVAL); jiffies +
HZ * LPFC_HB_MBOX_INTERVAL);
return; return;
} }
phba->skipped_hb = 0;
phba->hb_outstanding = 1;
} else if (time_before_eq(phba->last_completion_time,
phba->skipped_hb)) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2857 Last completion time not "
" updated in %d ms\n",
jiffies_to_msecs(jiffies
- phba->last_completion_time));
} else
phba->skipped_hb = jiffies;
mod_timer(&phba->hb_tmofunc, mod_timer(&phba->hb_tmofunc,
jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
phba->hb_outstanding = 1;
return; return;
} else { } else {
/* /*
@ -3281,10 +3300,10 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
if (!ndlp) if (!ndlp)
return 0; return 0;
} }
if (phba->pport->port_state <= LPFC_FLOGI) if (phba->pport->port_state < LPFC_FLOGI)
return NULL; return NULL;
/* If virtual link is not yet instantiated ignore CVL */ /* If virtual link is not yet instantiated ignore CVL */
if (vport->port_state <= LPFC_FDISC) if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC))
return NULL; return NULL;
shost = lpfc_shost_from_vport(vport); shost = lpfc_shost_from_vport(vport);
if (!shost) if (!shost)
@ -3357,21 +3376,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
"evt_tag:x%x, fcf_index:x%x\n", "evt_tag:x%x, fcf_index:x%x\n",
acqe_fcoe->event_tag, acqe_fcoe->event_tag,
acqe_fcoe->index); acqe_fcoe->index);
/* If the FCF discovery is in progress, do nothing. */ if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
spin_lock_irq(&phba->hbalock);
if (phba->hba_flag & FCF_DISC_INPROGRESS) {
spin_unlock_irq(&phba->hbalock);
break;
}
/* If fast FCF failover rescan event is pending, do nothing */
if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
spin_unlock_irq(&phba->hbalock);
break;
}
spin_unlock_irq(&phba->hbalock);
if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
/* /*
* During period of FCF discovery, read the FCF * During period of FCF discovery, read the FCF
* table record indexed by the event to update * table record indexed by the event to update
@ -3385,13 +3390,26 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
acqe_fcoe->index); acqe_fcoe->index);
rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
} }
/* If the FCF has been in discovered state, do nothing. */
/* If the FCF discovery is in progress, do nothing. */
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
if (phba->hba_flag & FCF_DISC_INPROGRESS) {
spin_unlock_irq(&phba->hbalock);
break;
}
/* If fast FCF failover rescan event is pending, do nothing */
if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
spin_unlock_irq(&phba->hbalock);
break;
}
/* If the FCF has been in discovered state, do nothing. */
if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
break; break;
} }
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
/* Otherwise, scan the entire FCF table and re-discover SAN */ /* Otherwise, scan the entire FCF table and re-discover SAN */
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2770 Start FCF table scan due to new FCF " "2770 Start FCF table scan due to new FCF "
@ -3417,13 +3435,9 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
"2549 FCF disconnected from network index 0x%x" "2549 FCF disconnected from network index 0x%x"
" tag 0x%x\n", acqe_fcoe->index, " tag 0x%x\n", acqe_fcoe->index,
acqe_fcoe->event_tag); acqe_fcoe->event_tag);
/* If the event is not for currently used fcf do nothing */ /*
if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) * If we are in the middle of FCF failover process, clear
break; * the corresponding FCF bit in the roundrobin bitmap.
/* We request port to rediscover the entire FCF table for
* a fast recovery from case that the current FCF record
* is no longer valid if we are not in the middle of FCF
* failover process already.
*/ */
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
if (phba->fcf.fcf_flag & FCF_DISCOVERY) { if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
@ -3432,9 +3446,23 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index); lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
break; break;
} }
spin_unlock_irq(&phba->hbalock);
/* If the event is not for currently used fcf do nothing */
if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
break;
/*
* Otherwise, request the port to rediscover the entire FCF
* table for a fast recovery from case that the current FCF
* is no longer valid as we are not in the middle of FCF
* failover process already.
*/
spin_lock_irq(&phba->hbalock);
/* Mark the fast failover process in progress */ /* Mark the fast failover process in progress */
phba->fcf.fcf_flag |= FCF_DEAD_DISC; phba->fcf.fcf_flag |= FCF_DEAD_DISC;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2771 Start FCF fast failover process due to " "2771 Start FCF fast failover process due to "
"FCF DEAD event: evt_tag:x%x, fcf_index:x%x " "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
@ -3454,12 +3482,16 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
* as a link down to FCF registration. * as a link down to FCF registration.
*/ */
lpfc_sli4_fcf_dead_failthrough(phba); lpfc_sli4_fcf_dead_failthrough(phba);
} else } else {
/* Handling fast FCF failover to a DEAD FCF event /* Reset FCF roundrobin bmask for new discovery */
* is considered equalivant to receiving CVL to all memset(phba->fcf.fcf_rr_bmask, 0,
* vports. sizeof(*phba->fcf.fcf_rr_bmask));
/*
* Handling fast FCF failover to a DEAD FCF event is
* considered equalivant to receiving CVL to all vports.
*/ */
lpfc_sli4_perform_all_vport_cvl(phba); lpfc_sli4_perform_all_vport_cvl(phba);
}
break; break;
case LPFC_FCOE_EVENT_TYPE_CVL: case LPFC_FCOE_EVENT_TYPE_CVL:
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
@ -3534,7 +3566,13 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
* the current registered FCF entry. * the current registered FCF entry.
*/ */
lpfc_retry_pport_discovery(phba); lpfc_retry_pport_discovery(phba);
} } else
/*
* Reset FCF roundrobin bmask for new
* discovery.
*/
memset(phba->fcf.fcf_rr_bmask, 0,
sizeof(*phba->fcf.fcf_rr_bmask));
} }
break; break;
default: default:

View File

@ -815,9 +815,15 @@ void
lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb) lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
{ {
MAILBOX_t *mb = &pmb->u.mb; MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_hba *phba = vport->phba;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
/*
* Set the re-reg VPI bit for f/w to update the MAC address.
*/
if ((phba->sli_rev == LPFC_SLI_REV4) &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
mb->un.varRegVpi.upd = 1;
mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base; mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
mb->un.varRegVpi.sid = vport->fc_myDID; mb->un.varRegVpi.sid = vport->fc_myDID;
mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;

View File

@ -1325,7 +1325,7 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
pde5->reftag = reftag; pde5->reftag = reftag;
/* Endian convertion if necessary for PDE5 */ /* Endianness conversion if necessary for PDE5 */
pde5->word0 = cpu_to_le32(pde5->word0); pde5->word0 = cpu_to_le32(pde5->word0);
pde5->reftag = cpu_to_le32(pde5->reftag); pde5->reftag = cpu_to_le32(pde5->reftag);
@ -1347,7 +1347,7 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bf_set(pde6_ai, pde6, 1); bf_set(pde6_ai, pde6, 1);
bf_set(pde6_apptagval, pde6, apptagval); bf_set(pde6_apptagval, pde6, apptagval);
/* Endian convertion if necessary for PDE6 */ /* Endianness conversion if necessary for PDE6 */
pde6->word0 = cpu_to_le32(pde6->word0); pde6->word0 = cpu_to_le32(pde6->word0);
pde6->word1 = cpu_to_le32(pde6->word1); pde6->word1 = cpu_to_le32(pde6->word1);
pde6->word2 = cpu_to_le32(pde6->word2); pde6->word2 = cpu_to_le32(pde6->word2);
@ -1459,7 +1459,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
pde5->reftag = reftag; pde5->reftag = reftag;
/* Endian convertion if necessary for PDE5 */ /* Endianness conversion if necessary for PDE5 */
pde5->word0 = cpu_to_le32(pde5->word0); pde5->word0 = cpu_to_le32(pde5->word0);
pde5->reftag = cpu_to_le32(pde5->reftag); pde5->reftag = cpu_to_le32(pde5->reftag);
@ -1479,7 +1479,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bf_set(pde6_ai, pde6, 1); bf_set(pde6_ai, pde6, 1);
bf_set(pde6_apptagval, pde6, apptagval); bf_set(pde6_apptagval, pde6, apptagval);
/* Endian convertion if necessary for PDE6 */ /* Endianness conversion if necessary for PDE6 */
pde6->word0 = cpu_to_le32(pde6->word0); pde6->word0 = cpu_to_le32(pde6->word0);
pde6->word1 = cpu_to_le32(pde6->word1); pde6->word1 = cpu_to_le32(pde6->word1);
pde6->word2 = cpu_to_le32(pde6->word2); pde6->word2 = cpu_to_le32(pde6->word2);

View File

@ -1046,7 +1046,7 @@ lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
} else } else
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR,LOG_SLI, lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0318 Failed to allocate IOTAG.last IOTAG is %d\n", "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
psli->last_iotag); psli->last_iotag);
@ -3914,7 +3914,8 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
LPFC_SLI3_HBQ_ENABLED | LPFC_SLI3_HBQ_ENABLED |
LPFC_SLI3_CRP_ENABLED | LPFC_SLI3_CRP_ENABLED |
LPFC_SLI3_BG_ENABLED); LPFC_SLI3_BG_ENABLED |
LPFC_SLI3_DSS_ENABLED);
if (rc != MBX_SUCCESS) { if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0442 Adapter failed to init, mbxCmd x%x " "0442 Adapter failed to init, mbxCmd x%x "
@ -3949,8 +3950,23 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
} else } else
phba->max_vpi = 0; phba->max_vpi = 0;
if (pmb->u.mb.un.varCfgPort.gdss) phba->fips_level = 0;
phba->fips_spec_rev = 0;
if (pmb->u.mb.un.varCfgPort.gdss) {
phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2850 Security Crypto Active. FIPS x%d "
"(Spec Rev: x%d)",
phba->fips_level, phba->fips_spec_rev);
}
if (pmb->u.mb.un.varCfgPort.sec_err) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2856 Config Port Security Crypto "
"Error: x%x ",
pmb->u.mb.un.varCfgPort.sec_err);
}
if (pmb->u.mb.un.varCfgPort.gerbm) if (pmb->u.mb.un.varCfgPort.gerbm)
phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
if (pmb->u.mb.un.varCfgPort.gcrp) if (pmb->u.mb.un.varCfgPort.gcrp)
@ -9040,6 +9056,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
switch (bf_get(lpfc_cqe_code, &cqevt)) { switch (bf_get(lpfc_cqe_code, &cqevt)) {
case CQE_CODE_COMPL_WQE: case CQE_CODE_COMPL_WQE:
/* Process the WQ/RQ complete event */ /* Process the WQ/RQ complete event */
phba->last_completion_time = jiffies;
workposted = lpfc_sli4_sp_handle_els_wcqe(phba, workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
(struct lpfc_wcqe_complete *)&cqevt); (struct lpfc_wcqe_complete *)&cqevt);
break; break;
@ -9050,11 +9067,13 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
break; break;
case CQE_CODE_XRI_ABORTED: case CQE_CODE_XRI_ABORTED:
/* Process the WQ XRI abort event */ /* Process the WQ XRI abort event */
phba->last_completion_time = jiffies;
workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
(struct sli4_wcqe_xri_aborted *)&cqevt); (struct sli4_wcqe_xri_aborted *)&cqevt);
break; break;
case CQE_CODE_RECEIVE: case CQE_CODE_RECEIVE:
/* Process the RQ event */ /* Process the RQ event */
phba->last_completion_time = jiffies;
workposted = lpfc_sli4_sp_handle_rcqe(phba, workposted = lpfc_sli4_sp_handle_rcqe(phba,
(struct lpfc_rcqe *)&cqevt); (struct lpfc_rcqe *)&cqevt);
break; break;
@ -9276,7 +9295,6 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
{ {
struct lpfc_wcqe_release wcqe; struct lpfc_wcqe_release wcqe;
bool workposted = false; bool workposted = false;
unsigned long iflag;
/* Copy the work queue CQE and convert endian order if needed */ /* Copy the work queue CQE and convert endian order if needed */
lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
@ -9285,9 +9303,7 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
case CQE_CODE_COMPL_WQE: case CQE_CODE_COMPL_WQE:
/* Process the WQ complete event */ /* Process the WQ complete event */
spin_lock_irqsave(&phba->hbalock, iflag);
phba->last_completion_time = jiffies; phba->last_completion_time = jiffies;
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_sli4_fp_handle_fcp_wcqe(phba, lpfc_sli4_fp_handle_fcp_wcqe(phba,
(struct lpfc_wcqe_complete *)&wcqe); (struct lpfc_wcqe_complete *)&wcqe);
break; break;
@ -9298,6 +9314,7 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
break; break;
case CQE_CODE_XRI_ABORTED: case CQE_CODE_XRI_ABORTED:
/* Process the WQ XRI abort event */ /* Process the WQ XRI abort event */
phba->last_completion_time = jiffies;
workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
(struct sli4_wcqe_xri_aborted *)&wcqe); (struct sli4_wcqe_xri_aborted *)&wcqe);
break; break;
@ -12278,12 +12295,9 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
phba->hba_flag |= FCF_DISC_INPROGRESS; phba->hba_flag |= FCF_DISC_INPROGRESS;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
/* Reset FCF round robin index bmask for new scan */ /* Reset eligible FCF count for new scan */
if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) { if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
memset(phba->fcf.fcf_rr_bmask, 0,
sizeof(*phba->fcf.fcf_rr_bmask));
phba->fcf.eligible_fcf_cnt = 0; phba->fcf.eligible_fcf_cnt = 0;
}
error = 0; error = 0;
} }
fail_fcf_scan: fail_fcf_scan:

View File

@ -18,7 +18,7 @@
* included with this package. * * included with this package. *
*******************************************************************/ *******************************************************************/
#define LPFC_DRIVER_VERSION "8.3.15" #define LPFC_DRIVER_VERSION "8.3.16"
#define LPFC_DRIVER_NAME "lpfc" #define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"

View File

@ -4199,8 +4199,10 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
circularQ = &pm8001_ha->inbnd_q_tbl[0]; circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&nvmd_req, 0, sizeof(nvmd_req)); memset(&nvmd_req, 0, sizeof(nvmd_req));
rc = pm8001_tag_alloc(pm8001_ha, &tag); rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc) if (rc) {
kfree(fw_control_context);
return rc; return rc;
}
ccb = &pm8001_ha->ccb_info[tag]; ccb = &pm8001_ha->ccb_info[tag];
ccb->ccb_tag = tag; ccb->ccb_tag = tag;
ccb->fw_control_context = fw_control_context; ccb->fw_control_context = fw_control_context;
@ -4276,8 +4278,10 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
ioctl_payload->length); ioctl_payload->length);
memset(&nvmd_req, 0, sizeof(nvmd_req)); memset(&nvmd_req, 0, sizeof(nvmd_req));
rc = pm8001_tag_alloc(pm8001_ha, &tag); rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc) if (rc) {
kfree(fw_control_context);
return rc; return rc;
}
ccb = &pm8001_ha->ccb_info[tag]; ccb = &pm8001_ha->ccb_info[tag];
ccb->fw_control_context = fw_control_context; ccb->fw_control_context = fw_control_context;
ccb->ccb_tag = tag; ccb->ccb_tag = tag;
@ -4387,6 +4391,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
fw_control->len, 0) != 0) { fw_control->len, 0) != 0) {
PM8001_FAIL_DBG(pm8001_ha, PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Mem alloc failure\n")); pm8001_printk("Mem alloc failure\n"));
kfree(fw_control_context);
return -ENOMEM; return -ENOMEM;
} }
} }
@ -4401,8 +4406,10 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
fw_control_context->virtAddr = buffer; fw_control_context->virtAddr = buffer;
fw_control_context->len = fw_control->len; fw_control_context->len = fw_control->len;
rc = pm8001_tag_alloc(pm8001_ha, &tag); rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc) if (rc) {
kfree(fw_control_context);
return rc; return rc;
}
ccb = &pm8001_ha->ccb_info[tag]; ccb = &pm8001_ha->ccb_info[tag];
ccb->fw_control_context = fw_control_context; ccb->fw_control_context = fw_control_context;
ccb->ccb_tag = tag; ccb->ccb_tag = tag;

View File

@ -36,6 +36,24 @@
#include "ql4_dbg.h" #include "ql4_dbg.h"
#include "ql4_nx.h" #include "ql4_nx.h"
#if defined(CONFIG_PCIEAER)
#include <linux/aer.h>
#else
/* AER releated */
static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
return -EINVAL;
}
static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
{
return -EINVAL;
}
static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
{
return -EINVAL;
}
#endif
#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010 #ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010 #define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010
#endif #endif
@ -137,6 +155,9 @@
#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alias name size */ #define ISCSI_ALIAS_SIZE 32 /* ISCSI Alias name size */
#define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */ #define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */
#define QL4_SESS_RECOVERY_TMO 30 /* iSCSI session */
/* recovery timeout */
#define LSDW(x) ((u32)((u64)(x))) #define LSDW(x) ((u32)((u64)(x)))
#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16)) #define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
@ -249,7 +270,6 @@ struct ddb_entry {
uint32_t default_time2wait; /* Default Min time between uint32_t default_time2wait; /* Default Min time between
* relogins (+aens) */ * relogins (+aens) */
atomic_t port_down_timer; /* Device connection timer */
atomic_t retry_relogin_timer; /* Min Time between relogins atomic_t retry_relogin_timer; /* Min Time between relogins
* (4000 only) */ * (4000 only) */
atomic_t relogin_timer; /* Max Time to wait for relogin to complete */ atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
@ -378,7 +398,9 @@ struct scsi_qla_host {
#define AF_MSI_ENABLED 16 /* 0x00010000 */ #define AF_MSI_ENABLED 16 /* 0x00010000 */
#define AF_MSIX_ENABLED 17 /* 0x00020000 */ #define AF_MSIX_ENABLED 17 /* 0x00020000 */
#define AF_MBOX_COMMAND_NOPOLL 18 /* 0x00040000 */ #define AF_MBOX_COMMAND_NOPOLL 18 /* 0x00040000 */
#define AF_FW_RECOVERY 19 /* 0x00080000 */
#define AF_EEH_BUSY 20 /* 0x00100000 */
#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
unsigned long dpc_flags; unsigned long dpc_flags;
@ -474,7 +496,6 @@ struct scsi_qla_host {
uint32_t timer_active; uint32_t timer_active;
/* Recovery Timers */ /* Recovery Timers */
uint32_t port_down_retry_count;
uint32_t discovery_wait; uint32_t discovery_wait;
atomic_t check_relogin_timeouts; atomic_t check_relogin_timeouts;
uint32_t retry_reset_ha_cnt; uint32_t retry_reset_ha_cnt;
@ -615,6 +636,15 @@ static inline int is_qla8022(struct scsi_qla_host *ha)
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022; return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022;
} }
/* Note: Currently AER/EEH is now supported only for 8022 cards
* This function needs to be updated when AER/EEH is enabled
* for other cards.
*/
static inline int is_aer_supported(struct scsi_qla_host *ha)
{
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022;
}
static inline int adapter_up(struct scsi_qla_host *ha) static inline int adapter_up(struct scsi_qla_host *ha)
{ {
return (test_bit(AF_ONLINE, &ha->flags) != 0) && return (test_bit(AF_ONLINE, &ha->flags) != 0) &&

View File

@ -673,17 +673,17 @@ struct flash_sys_info {
}; /* 200 */ }; /* 200 */
struct mbx_sys_info { struct mbx_sys_info {
uint8_t board_id_str[16]; /* Keep board ID string first */ uint8_t board_id_str[16]; /* 0-f Keep board ID string first */
/* in this structure for GUI. */ /* in this structure for GUI. */
uint16_t board_id; /* board ID code */ uint16_t board_id; /* 10-11 board ID code */
uint16_t phys_port_cnt; /* number of physical network ports */ uint16_t phys_port_cnt; /* 12-13 number of physical network ports */
uint16_t port_num; /* network port for this PCI function */ uint16_t port_num; /* 14-15 network port for this PCI function */
/* (port 0 is first port) */ /* (port 0 is first port) */
uint8_t mac_addr[6]; /* MAC address for this PCI function */ uint8_t mac_addr[6]; /* 16-1b MAC address for this PCI function */
uint32_t iscsi_pci_func_cnt; /* number of iSCSI PCI functions */ uint32_t iscsi_pci_func_cnt; /* 1c-1f number of iSCSI PCI functions */
uint32_t pci_func; /* this PCI function */ uint32_t pci_func; /* 20-23 this PCI function */
unsigned char serial_number[16]; /* serial number string */ unsigned char serial_number[16]; /* 24-33 serial number string */
uint8_t reserved[16]; uint8_t reserved[12]; /* 34-3f */
}; };
struct crash_record { struct crash_record {

View File

@ -93,6 +93,7 @@ void qla4xxx_free_irqs(struct scsi_qla_host *ha);
void qla4xxx_process_response_queue(struct scsi_qla_host *ha); void qla4xxx_process_response_queue(struct scsi_qla_host *ha);
void qla4xxx_wake_dpc(struct scsi_qla_host *ha); void qla4xxx_wake_dpc(struct scsi_qla_host *ha);
void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha); void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha);
void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha);
void qla4_8xxx_pci_config(struct scsi_qla_host *); void qla4_8xxx_pci_config(struct scsi_qla_host *);
int qla4_8xxx_iospace_config(struct scsi_qla_host *ha); int qla4_8xxx_iospace_config(struct scsi_qla_host *ha);
@ -131,6 +132,7 @@ void qla4_8xxx_idc_unlock(struct scsi_qla_host *ha);
int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha); int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha);
void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha); void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha);
void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha); void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
inline void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha);
extern int ql4xextended_error_logging; extern int ql4xextended_error_logging;
extern int ql4xdiscoverywait; extern int ql4xdiscoverywait;

View File

@ -308,7 +308,6 @@ static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
DEBUG2(printk("scsi%ld: %s: unable to get firmware " DEBUG2(printk("scsi%ld: %s: unable to get firmware "
"state\n", ha->host_no, __func__)); "state\n", ha->host_no, __func__));
break; break;
} }
if (ha->firmware_state & FW_STATE_ERROR) { if (ha->firmware_state & FW_STATE_ERROR) {
@ -445,6 +444,16 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
{ {
int status = QLA_ERROR; int status = QLA_ERROR;
if (is_aer_supported(ha) &&
test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
return status;
/* For 82xx, stop firmware before initializing because if BIOS
* has previously initialized firmware, then driver's initialize
* firmware will fail. */
if (is_qla8022(ha))
qla4_8xxx_stop_firmware(ha);
ql4_printk(KERN_INFO, ha, "Initializing firmware..\n"); ql4_printk(KERN_INFO, ha, "Initializing firmware..\n");
if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) { if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) {
DEBUG2(printk("scsi%ld: %s: Failed to initialize firmware " DEBUG2(printk("scsi%ld: %s: Failed to initialize firmware "
@ -669,7 +678,6 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
} }
ddb_entry->fw_ddb_index = fw_ddb_index; ddb_entry->fw_ddb_index = fw_ddb_index;
atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
atomic_set(&ddb_entry->relogin_timer, 0); atomic_set(&ddb_entry->relogin_timer, 0);
atomic_set(&ddb_entry->relogin_retry_count, 0); atomic_set(&ddb_entry->relogin_retry_count, 0);
@ -1556,8 +1564,6 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
/* Device is back online. */ /* Device is back online. */
if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
atomic_set(&ddb_entry->port_down_timer,
ha->port_down_retry_count);
atomic_set(&ddb_entry->relogin_retry_count, 0); atomic_set(&ddb_entry->relogin_retry_count, 0);
atomic_set(&ddb_entry->relogin_timer, 0); atomic_set(&ddb_entry->relogin_timer, 0);
clear_bit(DF_RELOGIN, &ddb_entry->flags); clear_bit(DF_RELOGIN, &ddb_entry->flags);

View File

@ -19,7 +19,7 @@ qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt)
/* Calculate number of free request entries. */ /* Calculate number of free request entries. */
if ((req_cnt + 2) >= ha->req_q_count) { if ((req_cnt + 2) >= ha->req_q_count) {
cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out); cnt = (uint16_t) ha->isp_ops->rd_shdw_req_q_out(ha);
if (ha->request_in < cnt) if (ha->request_in < cnt)
ha->req_q_count = cnt - ha->request_in; ha->req_q_count = cnt - ha->request_in;
else else

View File

@ -816,6 +816,9 @@ irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
unsigned long flags = 0; unsigned long flags = 0;
uint8_t reqs_count = 0; uint8_t reqs_count = 0;
if (unlikely(pci_channel_offline(ha->pdev)))
return IRQ_HANDLED;
ha->isr_count++; ha->isr_count++;
status = qla4_8xxx_rd_32(ha, ISR_INT_VECTOR); status = qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
if (!(status & ha->nx_legacy_intr.int_vec_bit)) if (!(status & ha->nx_legacy_intr.int_vec_bit))

View File

@ -39,6 +39,22 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
"pointer\n", ha->host_no, __func__)); "pointer\n", ha->host_no, __func__));
return status; return status;
} }
if (is_qla8022(ha) &&
test_bit(AF_FW_RECOVERY, &ha->flags)) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: prematurely "
"completing mbx cmd as firmware recovery detected\n",
ha->host_no, __func__));
return status;
}
if ((is_aer_supported(ha)) &&
(test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) {
DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, "
"timeout MBX Exiting.\n", ha->host_no, __func__));
return status;
}
/* Mailbox code active */ /* Mailbox code active */
wait_count = MBOX_TOV * 100; wait_count = MBOX_TOV * 100;
@ -150,6 +166,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) { while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
if (time_after_eq(jiffies, wait_count)) if (time_after_eq(jiffies, wait_count))
break; break;
/* /*
* Service the interrupt. * Service the interrupt.
* The ISR will save the mailbox status registers * The ISR will save the mailbox status registers
@ -196,6 +213,14 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
/* Check for mailbox timeout. */ /* Check for mailbox timeout. */
if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) { if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) {
if (is_qla8022(ha) &&
test_bit(AF_FW_RECOVERY, &ha->flags)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: %s: prematurely completing mbx cmd as "
"firmware recovery detected\n",
ha->host_no, __func__));
goto mbox_exit;
}
DEBUG2(printk("scsi%ld: Mailbox Cmd 0x%08X timed out ...," DEBUG2(printk("scsi%ld: Mailbox Cmd 0x%08X timed out ...,"
" Scheduling Adapter Reset\n", ha->host_no, " Scheduling Adapter Reset\n", ha->host_no,
mbx_cmd[0])); mbx_cmd[0]));
@ -246,6 +271,28 @@ mbox_exit:
return status; return status;
} }
void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha)
{
set_bit(AF_FW_RECOVERY, &ha->flags);
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: set FW RECOVERY!\n",
ha->host_no, __func__);
if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags)) {
complete(&ha->mbx_intr_comp);
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw "
"recovery, doing premature completion of "
"mbx cmd\n", ha->host_no, __func__);
} else {
set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw "
"recovery, doing premature completion of "
"polling mbx cmd\n", ha->host_no, __func__);
}
}
}
static uint8_t static uint8_t
qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma) uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
@ -361,7 +408,6 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/ min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
/* Save Command Line Paramater info */ /* Save Command Line Paramater info */
ha->port_down_retry_count = le16_to_cpu(init_fw_cb->conn_ka_timeout);
ha->discovery_wait = ql4xdiscoverywait; ha->discovery_wait = ql4xdiscoverywait;
if (ha->acb_version == ACB_SUPPORTED) { if (ha->acb_version == ACB_SUPPORTED) {

View File

@ -1418,7 +1418,7 @@ static int qla4_8xxx_rcvpeg_ready(struct scsi_qla_host *ha)
return QLA_SUCCESS; return QLA_SUCCESS;
} }
static inline void inline void
qla4_8xxx_set_drv_active(struct scsi_qla_host *ha) qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
{ {
uint32_t drv_active; uint32_t drv_active;
@ -1441,11 +1441,15 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
static inline int static inline int
qla4_8xxx_need_reset(struct scsi_qla_host *ha) qla4_8xxx_need_reset(struct scsi_qla_host *ha)
{ {
uint32_t drv_state; uint32_t drv_state, drv_active;
int rval; int rval;
drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
rval = drv_state & (1 << (ha->func_num * 4)); rval = drv_state & (1 << (ha->func_num * 4));
if ((test_bit(AF_EEH_BUSY, &ha->flags)) && drv_active)
rval = 1;
return rval; return rval;
} }
@ -1949,7 +1953,8 @@ qla4_8xxx_get_fdt_info(struct scsi_qla_host *ha)
uint16_t cnt, chksum; uint16_t cnt, chksum;
uint16_t *wptr; uint16_t *wptr;
struct qla_fdt_layout *fdt; struct qla_fdt_layout *fdt;
uint16_t mid, fid; uint16_t mid = 0;
uint16_t fid = 0;
struct ql82xx_hw_data *hw = &ha->hw; struct ql82xx_hw_data *hw = &ha->hw;
hw->flash_conf_off = FARX_ACCESS_FLASH_CONF; hw->flash_conf_off = FARX_ACCESS_FLASH_CONF;
@ -2105,6 +2110,9 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
qla4_8xxx_clear_rst_ready(ha); qla4_8xxx_clear_rst_ready(ha);
qla4_8xxx_idc_unlock(ha); qla4_8xxx_idc_unlock(ha);
if (rval == QLA_SUCCESS)
clear_bit(AF_FW_RECOVERY, &ha->flags);
return rval; return rval;
} }
@ -2145,7 +2153,8 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
goto exit_validate_mac82; goto exit_validate_mac82;
} }
if (mbox_sts[4] < sizeof(*sys_info)) { /* Make sure we receive the minimum required data to cache internally */
if (mbox_sts[4] < offsetof(struct mbx_sys_info, reserved)) {
DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive" DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive"
" error (%x)\n", ha->host_no, __func__, mbox_sts[4])); " error (%x)\n", ha->host_no, __func__, mbox_sts[4]));
goto exit_validate_mac82; goto exit_validate_mac82;

View File

@ -163,10 +163,10 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
atomic_set(&ddb_entry->state, DDB_STATE_DEAD); atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
DEBUG2(printk("scsi%ld: %s: ddb [%d] port down retry count " DEBUG2(printk("scsi%ld: %s: ddb [%d] session recovery timeout "
"of (%d) secs exhausted, marking device DEAD.\n", "of (%d) secs exhausted, marking device DEAD.\n",
ha->host_no, __func__, ddb_entry->fw_ddb_index, ha->host_no, __func__, ddb_entry->fw_ddb_index,
ha->port_down_retry_count)); QL4_SESS_RECOVERY_TMO));
qla4xxx_wake_dpc(ha); qla4xxx_wake_dpc(ha);
} }
@ -298,7 +298,8 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
{ {
int err; int err;
ddb_entry->sess->recovery_tmo = ddb_entry->ha->port_down_retry_count; ddb_entry->sess->recovery_tmo = QL4_SESS_RECOVERY_TMO;
err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index); err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index);
if (err) { if (err) {
DEBUG2(printk(KERN_ERR "Could not add session.\n")); DEBUG2(printk(KERN_ERR "Could not add session.\n"));
@ -474,6 +475,14 @@ static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
struct srb *srb; struct srb *srb;
int rval; int rval;
if (test_bit(AF_EEH_BUSY, &ha->flags)) {
if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
cmd->result = DID_NO_CONNECT << 16;
else
cmd->result = DID_REQUEUE << 16;
goto qc_fail_command;
}
if (!sess) { if (!sess) {
cmd->result = DID_IMM_RETRY << 16; cmd->result = DID_IMM_RETRY << 16;
goto qc_fail_command; goto qc_fail_command;
@ -654,6 +663,13 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
uint32_t fw_heartbeat_counter, halt_status; uint32_t fw_heartbeat_counter, halt_status;
fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
if (fw_heartbeat_counter == 0xffffffff) {
DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
"state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
ha->host_no, __func__));
return;
}
if (ha->fw_heartbeat_counter == fw_heartbeat_counter) { if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
ha->seconds_since_last_heartbeat++; ha->seconds_since_last_heartbeat++;
@ -662,6 +678,7 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
ha->seconds_since_last_heartbeat = 0; ha->seconds_since_last_heartbeat = 0;
halt_status = qla4_8xxx_rd_32(ha, halt_status = qla4_8xxx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS1); QLA82XX_PEG_HALT_STATUS1);
/* Since we cannot change dev_state in interrupt /* Since we cannot change dev_state in interrupt
* context, set appropriate DPC flag then wakeup * context, set appropriate DPC flag then wakeup
* DPC */ * DPC */
@ -673,6 +690,7 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
set_bit(DPC_RESET_HA, &ha->dpc_flags); set_bit(DPC_RESET_HA, &ha->dpc_flags);
} }
qla4xxx_wake_dpc(ha); qla4xxx_wake_dpc(ha);
qla4xxx_mailbox_premature_completion(ha);
} }
} }
ha->fw_heartbeat_counter = fw_heartbeat_counter; ha->fw_heartbeat_counter = fw_heartbeat_counter;
@ -698,6 +716,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
ha->host_no, __func__); ha->host_no, __func__);
set_bit(DPC_RESET_HA, &ha->dpc_flags); set_bit(DPC_RESET_HA, &ha->dpc_flags);
qla4xxx_wake_dpc(ha); qla4xxx_wake_dpc(ha);
qla4xxx_mailbox_premature_completion(ha);
} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
!test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
printk("scsi%ld: %s: HW State: NEED QUIES!\n", printk("scsi%ld: %s: HW State: NEED QUIES!\n",
@ -719,6 +738,19 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
{ {
struct ddb_entry *ddb_entry, *dtemp; struct ddb_entry *ddb_entry, *dtemp;
int start_dpc = 0; int start_dpc = 0;
uint16_t w;
/* If we are in the middle of AER/EEH processing
* skip any processing and reschedule the timer
*/
if (test_bit(AF_EEH_BUSY, &ha->flags)) {
mod_timer(&ha->timer, jiffies + HZ);
return;
}
/* Hardware read to trigger an EEH error during mailbox waits. */
if (!pci_channel_offline(ha->pdev))
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
if (test_bit(AF_HBA_GOING_AWAY, &ha->flags)) { if (test_bit(AF_HBA_GOING_AWAY, &ha->flags)) {
DEBUG2(ql4_printk(KERN_INFO, ha, "%s exited. HBA GOING AWAY\n", DEBUG2(ql4_printk(KERN_INFO, ha, "%s exited. HBA GOING AWAY\n",
@ -1207,7 +1239,13 @@ static void qla4xxx_do_dpc(struct work_struct *work)
/* Initialization not yet finished. Don't do anything yet. */ /* Initialization not yet finished. Don't do anything yet. */
if (!test_bit(AF_INIT_DONE, &ha->flags)) if (!test_bit(AF_INIT_DONE, &ha->flags))
return; goto do_dpc_exit;
if (test_bit(AF_EEH_BUSY, &ha->flags)) {
DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
ha->host_no, __func__, ha->flags));
goto do_dpc_exit;
}
/* HBA is in the process of being permanently disabled. /* HBA is in the process of being permanently disabled.
* Don't process anything */ * Don't process anything */
@ -1346,6 +1384,8 @@ dpc_post_reset_ha:
} }
} }
} }
do_dpc_exit:
clear_bit(AF_DPC_SCHEDULED, &ha->flags); clear_bit(AF_DPC_SCHEDULED, &ha->flags);
} }
@ -1612,6 +1652,8 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
ha->host = host; ha->host = host;
ha->host_no = host->host_no; ha->host_no = host->host_no;
pci_enable_pcie_error_reporting(pdev);
/* Setup Runtime configurable options */ /* Setup Runtime configurable options */
if (is_qla8022(ha)) { if (is_qla8022(ha)) {
ha->isp_ops = &qla4_8xxx_isp_ops; ha->isp_ops = &qla4_8xxx_isp_ops;
@ -1630,6 +1672,10 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
ha->isp_ops = &qla4xxx_isp_ops; ha->isp_ops = &qla4xxx_isp_ops;
} }
/* Set EEH reset type to fundamental if required by hba */
if (is_qla8022(ha))
pdev->needs_freset = 1;
/* Configure PCI I/O space. */ /* Configure PCI I/O space. */
ret = ha->isp_ops->iospace_config(ha); ret = ha->isp_ops->iospace_config(ha);
if (ret) if (ret)
@ -1726,6 +1772,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
} }
} }
pci_save_state(ha->pdev);
ha->isp_ops->enable_intrs(ha); ha->isp_ops->enable_intrs(ha);
/* Start timer thread. */ /* Start timer thread. */
@ -1752,6 +1799,7 @@ probe_failed:
qla4xxx_free_adapter(ha); qla4xxx_free_adapter(ha);
probe_failed_ioconfig: probe_failed_ioconfig:
pci_disable_pcie_error_reporting(pdev);
scsi_host_put(ha->host); scsi_host_put(ha->host);
probe_disable_device: probe_disable_device:
@ -1781,6 +1829,7 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
scsi_host_put(ha->host); scsi_host_put(ha->host);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev); pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
} }
@ -1877,6 +1926,17 @@ static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
int done = 0; int done = 0;
struct srb *rp; struct srb *rp;
uint32_t max_wait_time = EH_WAIT_CMD_TOV; uint32_t max_wait_time = EH_WAIT_CMD_TOV;
int ret = SUCCESS;
/* Dont wait on command if PCI error is being handled
* by PCI AER driver
*/
if (unlikely(pci_channel_offline(ha->pdev)) ||
(test_bit(AF_EEH_BUSY, &ha->flags))) {
ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
ha->host_no, __func__);
return ret;
}
do { do {
/* Checking to see if its returned to OS */ /* Checking to see if its returned to OS */
@ -2172,6 +2232,252 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
return return_status; return return_status;
} }
/* PCI AER driver recovers from all correctable errors w/o
* driver intervention. For uncorrectable errors PCI AER
* driver calls the following device driver's callbacks
*
* - Fatal Errors - link_reset
* - Non-Fatal Errors - driver's pci_error_detected() which
* returns CAN_RECOVER, NEED_RESET or DISCONNECT.
*
* PCI AER driver calls
* CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
* returns RECOVERED or NEED_RESET if fw_hung
* NEED_RESET - driver's slot_reset()
* DISCONNECT - device is dead & cannot recover
* RECOVERED - driver's pci_resume()
*/
static pci_ers_result_t
qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
struct scsi_qla_host *ha = pci_get_drvdata(pdev);
ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
ha->host_no, __func__, state);
if (!is_aer_supported(ha))
return PCI_ERS_RESULT_NONE;
switch (state) {
case pci_channel_io_normal:
clear_bit(AF_EEH_BUSY, &ha->flags);
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
set_bit(AF_EEH_BUSY, &ha->flags);
qla4xxx_mailbox_premature_completion(ha);
qla4xxx_free_irqs(ha);
pci_disable_device(pdev);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
set_bit(AF_EEH_BUSY, &ha->flags);
set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_NEED_RESET;
}
/**
* qla4xxx_pci_mmio_enabled() gets called if
* qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
* and read/write to the device still works.
**/
static pci_ers_result_t
qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
{
struct scsi_qla_host *ha = pci_get_drvdata(pdev);
if (!is_aer_supported(ha))
return PCI_ERS_RESULT_NONE;
if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: firmware hang -- "
"mmio_enabled\n", ha->host_no, __func__);
return PCI_ERS_RESULT_NEED_RESET;
} else
return PCI_ERS_RESULT_RECOVERED;
}
uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
{
uint32_t rval = QLA_ERROR;
int fn;
struct pci_dev *other_pdev = NULL;
ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
if (test_bit(AF_ONLINE, &ha->flags)) {
clear_bit(AF_ONLINE, &ha->flags);
qla4xxx_mark_all_devices_missing(ha);
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
}
fn = PCI_FUNC(ha->pdev->devfn);
while (fn > 0) {
fn--;
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
"func %x\n", ha->host_no, __func__, fn);
/* Get the pci device given the domain, bus,
* slot/function number */
other_pdev =
pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
fn));
if (!other_pdev)
continue;
if (atomic_read(&other_pdev->enable_cnt)) {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
"func in enabled state%x\n", ha->host_no,
__func__, fn);
pci_dev_put(other_pdev);
break;
}
pci_dev_put(other_pdev);
}
/* The first function on the card, the reset owner will
* start & initialize the firmware. The other functions
* on the card will reset the firmware context
*/
if (!fn) {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
"0x%x is the owner\n", ha->host_no, __func__,
ha->pdev->devfn);
qla4_8xxx_idc_lock(ha);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_COLD);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
QLA82XX_IDC_VERSION);
qla4_8xxx_idc_unlock(ha);
clear_bit(AF_FW_RECOVERY, &ha->flags);
rval = qla4xxx_initialize_adapter(ha, PRESERVE_DDB_LIST);
qla4_8xxx_idc_lock(ha);
if (rval != QLA_SUCCESS) {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
"FAILED\n", ha->host_no, __func__);
qla4_8xxx_clear_drv_active(ha);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_FAILED);
} else {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
"READY\n", ha->host_no, __func__);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_READY);
/* Clear driver state register */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
qla4_8xxx_set_drv_active(ha);
ha->isp_ops->enable_intrs(ha);
}
qla4_8xxx_idc_unlock(ha);
} else {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
"the reset owner\n", ha->host_no, __func__,
ha->pdev->devfn);
if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
QLA82XX_DEV_READY)) {
clear_bit(AF_FW_RECOVERY, &ha->flags);
rval = qla4xxx_initialize_adapter(ha,
PRESERVE_DDB_LIST);
if (rval == QLA_SUCCESS)
ha->isp_ops->enable_intrs(ha);
qla4_8xxx_idc_lock(ha);
qla4_8xxx_set_drv_active(ha);
qla4_8xxx_idc_unlock(ha);
}
}
clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
return rval;
}
static pci_ers_result_t
qla4xxx_pci_slot_reset(struct pci_dev *pdev)
{
pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
struct scsi_qla_host *ha = pci_get_drvdata(pdev);
int rc;
ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
ha->host_no, __func__);
if (!is_aer_supported(ha))
return PCI_ERS_RESULT_NONE;
/* Restore the saved state of PCIe device -
* BAR registers, PCI Config space, PCIX, MSI,
* IOV states
*/
pci_restore_state(pdev);
/* pci_restore_state() clears the saved_state flag of the device
* save restored state which resets saved_state flag
*/
pci_save_state(pdev);
/* Initialize device or resume if in suspended state */
rc = pci_enable_device(pdev);
if (rc) {
ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Cant re-enable "
"device after reset\n", ha->host_no, __func__);
goto exit_slot_reset;
}
ret = qla4xxx_request_irqs(ha);
if (ret) {
ql4_printk(KERN_WARNING, ha, "Failed to reserve interrupt %d"
" already in use.\n", pdev->irq);
goto exit_slot_reset;
}
if (is_qla8022(ha)) {
if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
ret = PCI_ERS_RESULT_RECOVERED;
goto exit_slot_reset;
} else
goto exit_slot_reset;
}
exit_slot_reset:
ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
"device after reset\n", ha->host_no, __func__, ret);
return ret;
}
static void
qla4xxx_pci_resume(struct pci_dev *pdev)
{
struct scsi_qla_host *ha = pci_get_drvdata(pdev);
int ret;
ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
ha->host_no, __func__);
ret = qla4xxx_wait_for_hba_online(ha);
if (ret != QLA_SUCCESS) {
ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
"resume I/O from slot/link_reset\n", ha->host_no,
__func__);
}
pci_cleanup_aer_uncorrect_error_status(pdev);
clear_bit(AF_EEH_BUSY, &ha->flags);
}
static struct pci_error_handlers qla4xxx_err_handler = {
.error_detected = qla4xxx_pci_error_detected,
.mmio_enabled = qla4xxx_pci_mmio_enabled,
.slot_reset = qla4xxx_pci_slot_reset,
.resume = qla4xxx_pci_resume,
};
static struct pci_device_id qla4xxx_pci_tbl[] = { static struct pci_device_id qla4xxx_pci_tbl[] = {
{ {
.vendor = PCI_VENDOR_ID_QLOGIC, .vendor = PCI_VENDOR_ID_QLOGIC,
@ -2206,6 +2512,7 @@ static struct pci_driver qla4xxx_pci_driver = {
.id_table = qla4xxx_pci_tbl, .id_table = qla4xxx_pci_tbl,
.probe = qla4xxx_probe_adapter, .probe = qla4xxx_probe_adapter,
.remove = qla4xxx_remove_adapter, .remove = qla4xxx_remove_adapter,
.err_handler = &qla4xxx_err_handler,
}; };
static int __init qla4xxx_module_init(void) static int __init qla4xxx_module_init(void)

View File

@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details. * See LICENSE.qla4xxx for copyright and licensing details.
*/ */
#define QLA4XXX_DRIVER_VERSION "5.02.00-k2" #define QLA4XXX_DRIVER_VERSION "5.02.00-k3"

View File

@ -473,14 +473,17 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
*/ */
return SUCCESS; return SUCCESS;
case RESERVATION_CONFLICT: case RESERVATION_CONFLICT:
/* if (scmd->cmnd[0] == TEST_UNIT_READY)
* let issuer deal with this, it could be just fine /* it is a success, we probed the device and
*/ * found it */
return SUCCESS; return SUCCESS;
/* otherwise, we failed to send the command */
return FAILED;
case QUEUE_FULL: case QUEUE_FULL:
scsi_handle_queue_full(scmd->device); scsi_handle_queue_full(scmd->device);
/* fall through */ /* fall through */
case BUSY: case BUSY:
return NEEDS_RETRY;
default: default:
return FAILED; return FAILED;
} }

View File

@ -185,6 +185,7 @@ static void scsi_tgt_cmd_destroy(struct work_struct *work)
dprintk("cmd %p %d %u\n", cmd, cmd->sc_data_direction, dprintk("cmd %p %d %u\n", cmd, cmd->sc_data_direction,
rq_data_dir(cmd->request)); rq_data_dir(cmd->request));
scsi_unmap_user_pages(tcmd); scsi_unmap_user_pages(tcmd);
tcmd->rq->bio = NULL;
scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd); scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
} }