Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6: (679 commits)
  commit 7676f83aeb
  Author: James Bottomley <James.Bottomley@steeleye.com>
  Date:   Fri Apr 14 09:47:59 2006 -0500
  
      [SCSI] scsi_transport_sas: don't scan a non-existent end device
      
      Any end device that can't support any of the scanning protocols
      shouldn't be scanned, so set its id to -1 to prevent
      scsi_scan_target() being called for it.
      
      Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
  
  commit 3c0c25b97c
  Author: Moore, Eric <Eric.Moore@lsil.com>
  Date:   Thu Apr 13 16:08:17 2006 -0600
  
      [SCSI] mptfusion - fix panic in mptsas_slave_configure
      
      Driver panic when RAID logical volume was present when driver
      loaded, or when a RAID logical volume was created on the fly.
  ...
This commit is contained in:
Linus Torvalds 2006-04-14 17:09:08 -07:00
commit f05472f10d
43 changed files with 1095 additions and 13266 deletions

View File

@ -350,16 +350,51 @@ out:
return ret;
}
/**
* sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
* @file: file this ioctl operates on (optional)
* @q: request queue to send scsi commands down
* @disk: gendisk to operate on (option)
* @sic: userspace structure describing the command to perform
*
* Send down the scsi command described by @sic to the device below
* the request queue @q. If @file is non-NULL it's used to perform
* fine-grained permission checks that allow users to send down
* non-destructive SCSI commands. If the caller has a struct gendisk
* available it should be passed in as @disk to allow the low level
* driver to use the information contained in it. A non-NULL @disk
* is only allowed if the caller knows that the low level driver doesn't
* need it (e.g. in the scsi subsystem).
*
* Notes:
* - This interface is deprecated - users should use the SG_IO
* interface instead, as this is a more flexible approach to
* performing SCSI commands on a device.
* - The SCSI command length is determined by examining the 1st byte
* of the given command. There is no way to override this.
* - Data transfers are limited to PAGE_SIZE
* - The length (x + y) must be at least OMAX_SB_LEN bytes long to
* accommodate the sense buffer when an error occurs.
* The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
* old code will not be surprised.
* - If a Unix error occurs (e.g. ENOMEM) then the user will receive
* a negative return and the Unix error code in 'errno'.
* If the SCSI command succeeds then 0 is returned.
* Positive numbers returned are the compacted SCSI error codes (4
* bytes in one int) where the lowest byte is the SCSI status.
*/
#define OMAX_SB_LEN 16 /* For backward compatibility */
static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
struct gendisk *bd_disk, Scsi_Ioctl_Command __user *sic)
int sg_scsi_ioctl(struct file *file, struct request_queue *q,
struct gendisk *disk, struct scsi_ioctl_command __user *sic)
{
struct request *rq;
int err;
unsigned int in_len, out_len, bytes, opcode, cmdlen;
char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
if (!sic)
return -EINVAL;
/*
* get in an out lengths, verify they don't exceed a page worth of data
*/
@ -393,45 +428,53 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
if (copy_from_user(rq->cmd, sic->data, cmdlen))
goto error;
if (copy_from_user(buffer, sic->data + cmdlen, in_len))
if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
goto error;
err = verify_command(file, rq->cmd);
if (err)
goto error;
/* default. possible overriden later */
rq->retries = 5;
switch (opcode) {
case SEND_DIAGNOSTIC:
case FORMAT_UNIT:
rq->timeout = FORMAT_UNIT_TIMEOUT;
break;
case START_STOP:
rq->timeout = START_STOP_TIMEOUT;
break;
case MOVE_MEDIUM:
rq->timeout = MOVE_MEDIUM_TIMEOUT;
break;
case READ_ELEMENT_STATUS:
rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
break;
case READ_DEFECT_DATA:
rq->timeout = READ_DEFECT_DATA_TIMEOUT;
break;
default:
rq->timeout = BLK_DEFAULT_TIMEOUT;
break;
case SEND_DIAGNOSTIC:
case FORMAT_UNIT:
rq->timeout = FORMAT_UNIT_TIMEOUT;
rq->retries = 1;
break;
case START_STOP:
rq->timeout = START_STOP_TIMEOUT;
break;
case MOVE_MEDIUM:
rq->timeout = MOVE_MEDIUM_TIMEOUT;
break;
case READ_ELEMENT_STATUS:
rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
break;
case READ_DEFECT_DATA:
rq->timeout = READ_DEFECT_DATA_TIMEOUT;
rq->retries = 1;
break;
default:
rq->timeout = BLK_DEFAULT_TIMEOUT;
break;
}
if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
err = DRIVER_ERROR << 24;
goto out;
}
memset(sense, 0, sizeof(sense));
rq->sense = sense;
rq->sense_len = 0;
rq->data = buffer;
rq->data_len = bytes;
rq->flags |= REQ_BLOCK_PC;
rq->retries = 0;
blk_execute_rq(q, bd_disk, rq, 0);
blk_execute_rq(q, disk, rq, 0);
out:
err = rq->errors & 0xff; /* only 8 bit SCSI status */
if (err) {
if (rq->sense_len && rq->sense) {
@ -450,7 +493,7 @@ error:
blk_put_request(rq);
return err;
}
EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
/* Send basic block requests */
static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int cmd, int data)

View File

@ -366,7 +366,15 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
static int
mptsas_slave_configure(struct scsi_device *sdev)
{
sas_read_port_mode_page(sdev);
struct Scsi_Host *host = sdev->host;
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
/*
* RAID volumes placed beyond the last expected port.
* Ignore sending sas mode pages in that case..
*/
if (sdev->channel < hd->ioc->num_ports)
sas_read_port_mode_page(sdev);
return mptscsih_slave_configure(sdev);
}

View File

@ -65,6 +65,7 @@
2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
2.26.02.006 - Fix 9550SX pchip reset timeout.
Add big endian support.
2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
*/
#include <linux/module.h>
@ -88,7 +89,7 @@
#include "3w-9xxx.h"
/* Globals */
#define TW_DRIVER_VERSION "2.26.02.006"
#define TW_DRIVER_VERSION "2.26.02.007"
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
static unsigned int twa_device_extension_count;
static int twa_major = -1;
@ -1942,9 +1943,13 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
}
if (tw_dev->srb[request_id]->use_sg == 1) {
struct scatterlist *sg = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
char *buf;
unsigned long flags = 0;
local_irq_save(flags);
buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
memcpy(buf, tw_dev->generic_buffer_virt[request_id], sg->length);
kunmap_atomic(buf - sg->offset, KM_IRQ0);
local_irq_restore(flags);
}
}
} /* End twa_scsiop_execute_scsi_complete() */

View File

@ -1079,7 +1079,7 @@ config SCSI_SYM53C8XX_DMA_ADDRESSING_MODE
memory using PCI DAC cycles.
config SCSI_SYM53C8XX_DEFAULT_TAGS
int "default tagged command queue depth"
int "Default tagged command queue depth"
depends on SCSI_SYM53C8XX_2
default "16"
help
@ -1090,7 +1090,7 @@ config SCSI_SYM53C8XX_DEFAULT_TAGS
exceed CONFIG_SCSI_SYM53C8XX_MAX_TAGS.
config SCSI_SYM53C8XX_MAX_TAGS
int "maximum number of queued commands"
int "Maximum number of queued commands"
depends on SCSI_SYM53C8XX_2
default "64"
help
@ -1099,13 +1099,14 @@ config SCSI_SYM53C8XX_MAX_TAGS
possible. The driver supports up to 256 queued commands per device.
This value is used as a compiled-in hard limit.
config SCSI_SYM53C8XX_IOMAPPED
bool "use port IO"
config SCSI_SYM53C8XX_MMIO
bool "Use memory mapped IO"
depends on SCSI_SYM53C8XX_2
default y
help
If you say Y here, the driver will use port IO to access
the card. This is significantly slower then using memory
mapped IO. Most people should answer N.
Memory mapped IO is faster than Port IO. Most people should
answer Y here, but some machines may have problems. If you have
to answer N here, please report the problem to the maintainer.
config SCSI_IPR
tristate "IBM Power Linux RAID adapter support"
@ -1309,15 +1310,6 @@ config SCSI_QLOGIC_FAS
To compile this driver as a module, choose M here: the
module will be called qlogicfas.
config SCSI_QLOGIC_FC
tristate "Qlogic ISP FC SCSI support"
depends on PCI && SCSI
help
This is a driver for the QLogic ISP2100 SCSI-FCP host adapter.
To compile this driver as a module, choose M here: the
module will be called qlogicfc.
config SCSI_QLOGIC_FC_FIRMWARE
bool "Include loadable firmware in driver"
depends on SCSI_QLOGIC_FC

View File

@ -78,7 +78,6 @@ obj-$(CONFIG_SCSI_NCR_Q720) += NCR_Q720_mod.o
obj-$(CONFIG_SCSI_SYM53C416) += sym53c416.o
obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
obj-$(CONFIG_SCSI_QLOGIC_FC) += qlogicfc.o
obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/

View File

@ -149,20 +149,20 @@ static int dacmode = -1;
static int commit = -1;
module_param(nondasd, int, 0);
module_param(nondasd, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
module_param(dacmode, int, 0);
module_param(dacmode, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on");
module_param(commit, int, 0);
module_param(commit, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the adapter for foreign arrays.\nThis is typically needed in systems that do not have a BIOS. 0=off, 1=on");
int numacb = -1;
module_param(numacb, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control blocks (FIB) allocated. Valid\nvalues are 512 and down. Default is to use suggestion from Firmware.");
MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control blocks (FIB) allocated. Valid values are 512 and down. Default is to use suggestion from Firmware.");
int acbsize = -1;
module_param(acbsize, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512,\n2048, 4096 and 8192. Default is to use suggestion from Firmware.");
MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware.");
/**
* aac_get_config_status - check the adapter configuration
* @common: adapter to query
@ -387,6 +387,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
struct scsi_cmnd * scsicmd;
scsicmd = (struct scsi_cmnd *) context;
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
if (fibptr == NULL)
@ -453,8 +454,10 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS)
if (status == -EINPROGRESS) {
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
}
printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
aac_fib_complete(cmd_fibcontext);
@ -907,9 +910,10 @@ static void io_callback(void *context, struct fib * fibptr)
u32 cid;
scsicmd = (struct scsi_cmnd *) context;
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
cid = scmd_id(scsicmd);
if (nblank(dprintk(x))) {
u64 lba;
@ -1151,8 +1155,10 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS)
if (status == -EINPROGRESS) {
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
}
printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
/*
@ -1318,8 +1324,8 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS)
{
if (status == -EINPROGRESS) {
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
}
@ -1341,6 +1347,7 @@ static void synchronize_callback(void *context, struct fib *fibptr)
struct scsi_cmnd *cmd;
cmd = context;
cmd->SCp.phase = AAC_OWNER_MIDLEVEL;
dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n",
smp_processor_id(), jiffies));
@ -1354,7 +1361,7 @@ static void synchronize_callback(void *context, struct fib *fibptr)
else {
struct scsi_device *sdev = cmd->device;
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
u32 cid = ID_LUN_TO_CONTAINER(sdev->id, sdev->lun);
u32 cid = sdev_id(sdev);
printk(KERN_WARNING
"synchronize_callback: synchronize failed, status = %d\n",
le32_to_cpu(synchronizereply->status));
@ -1386,12 +1393,12 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
unsigned long flags;
/*
* Wait for all commands to complete to this specific
* target (block).
* Wait for all outstanding queued commands to complete to this
* specific target (block).
*/
spin_lock_irqsave(&sdev->list_lock, flags);
list_for_each_entry(cmd, &sdev->cmd_list, list)
if (cmd != scsicmd && cmd->serial_number != 0) {
if (cmd != scsicmd && cmd->SCp.phase == AAC_OWNER_FIRMWARE) {
++active;
break;
}
@ -1434,8 +1441,10 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS)
if (status == -EINPROGRESS) {
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
}
printk(KERN_WARNING
"aac_synchronize: aac_fib_send failed with status: %d.\n", status);
@ -1458,7 +1467,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
struct Scsi_Host *host = scsicmd->device->host;
struct aac_dev *dev = (struct aac_dev *)host->hostdata;
struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
int ret;
/*
* If the bus, id or lun is out of range, return fail
@ -1466,13 +1474,14 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
* itself.
*/
if (scmd_id(scsicmd) != host->this_id) {
if ((scsicmd->device->channel == CONTAINER_CHANNEL)) {
if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){
if ((scmd_channel(scsicmd) == CONTAINER_CHANNEL)) {
if((scmd_id(scsicmd) >= dev->maximum_num_containers) ||
(scsicmd->device->lun != 0)) {
scsicmd->result = DID_NO_CONNECT << 16;
scsicmd->scsi_done(scsicmd);
return 0;
}
cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
cid = scmd_id(scsicmd);
/*
* If the target container doesn't exist, it may have
@ -1548,7 +1557,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
{
struct inquiry_data inq_data;
dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->device->id));
dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scmd_id(scsicmd)));
memset(&inq_data, 0, sizeof (struct inquiry_data));
inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
@ -1598,13 +1607,14 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
cp[11] = 0;
cp[12] = 0;
aac_internal_transfer(scsicmd, cp, 0,
min((unsigned int)scsicmd->cmnd[13], sizeof(cp)));
min_t(size_t, scsicmd->cmnd[13], sizeof(cp)));
if (sizeof(cp) < scsicmd->cmnd[13]) {
unsigned int len, offset = sizeof(cp);
memset(cp, 0, offset);
do {
len = min(scsicmd->cmnd[13]-offset, sizeof(cp));
len = min_t(size_t, scsicmd->cmnd[13] - offset,
sizeof(cp));
aac_internal_transfer(scsicmd, cp, offset, len);
} while ((offset += len) < scsicmd->cmnd[13]);
}
@ -1728,24 +1738,19 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
* containers to /dev/sd device names
*/
spin_unlock_irq(host->host_lock);
if (scsicmd->request->rq_disk)
strlcpy(fsa_dev_ptr[cid].devname,
scsicmd->request->rq_disk->disk_name,
min(sizeof(fsa_dev_ptr[cid].devname),
sizeof(scsicmd->request->rq_disk->disk_name) + 1));
ret = aac_read(scsicmd, cid);
spin_lock_irq(host->host_lock);
return ret;
return aac_read(scsicmd, cid);
case WRITE_6:
case WRITE_10:
case WRITE_12:
case WRITE_16:
spin_unlock_irq(host->host_lock);
ret = aac_write(scsicmd, cid);
spin_lock_irq(host->host_lock);
return ret;
return aac_write(scsicmd, cid);
case SYNCHRONIZE_CACHE:
/* Issue FIB to tell Firmware to flush it's cache */
@ -1778,7 +1783,7 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
return -EFAULT;
if (qd.cnum == -1)
qd.cnum = ID_LUN_TO_CONTAINER(qd.id, qd.lun);
qd.cnum = qd.id;
else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1))
{
if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
@ -1890,6 +1895,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
struct scsi_cmnd *scsicmd;
scsicmd = (struct scsi_cmnd *) context;
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
if (fibptr == NULL)
@ -2068,14 +2074,13 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
u32 timeout;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
if (scsicmd->device->id >= dev->maximum_num_physicals ||
if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
scsicmd->device->lun > 7) {
scsicmd->result = DID_NO_CONNECT << 16;
scsicmd->scsi_done(scsicmd);
return 0;
}
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
switch(scsicmd->sc_data_direction){
case DMA_TO_DEVICE:
flag = SRB_DataOut;
@ -2103,8 +2108,8 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scsicmd->device->channel));
srbcmd->id = cpu_to_le32(scsicmd->device->id);
srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(scsicmd)));
srbcmd->id = cpu_to_le32(scmd_id(scsicmd));
srbcmd->lun = cpu_to_le32(scsicmd->device->lun);
srbcmd->flags = cpu_to_le32(flag);
timeout = scsicmd->timeout_per_command/HZ;
@ -2161,7 +2166,8 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS){
if (status == -EINPROGRESS) {
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
}
@ -2192,8 +2198,6 @@ static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
scsicmd->sc_data_direction);
psg->count = cpu_to_le32(sg_count);
byte_count = 0;
for (i = 0; i < sg_count; i++) {
psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
@ -2249,18 +2253,17 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
scsicmd->sc_data_direction);
psg->count = cpu_to_le32(sg_count);
byte_count = 0;
for (i = 0; i < sg_count; i++) {
int count = sg_dma_len(sg);
addr = sg_dma_address(sg);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
byte_count += sg_dma_len(sg);
psg->sg[i].count = cpu_to_le32(count);
byte_count += count;
sg++;
}
psg->count = cpu_to_le32(sg_count);
/* hba wants the size to be exact */
if(byte_count > scsicmd->request_bufflen){
u32 temp = le32_to_cpu(psg->sg[i-1].count) -
@ -2275,16 +2278,15 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
}
}
else if(scsicmd->request_bufflen) {
u64 addr;
addr = pci_map_single(dev->pdev,
scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
scsicmd->request_buffer,
scsicmd->request_bufflen,
scsicmd->sc_data_direction);
addr = scsicmd->SCp.dma_handle;
psg->count = cpu_to_le32(1);
psg->sg[0].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[0].addr[1] = cpu_to_le32(addr >> 32);
psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
scsicmd->SCp.dma_handle = addr;
byte_count = scsicmd->request_bufflen;
}
return byte_count;

View File

@ -10,6 +10,10 @@
* D E F I N E S
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
# define AAC_DRIVER_BUILD 2409
# define AAC_DRIVER_BRANCH "-mh1"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
#define AAC_NUM_MGT_FIB 8
@ -25,7 +29,6 @@
* These macros convert from physical channels to virtual channels
*/
#define CONTAINER_CHANNEL (0)
#define ID_LUN_TO_CONTAINER(id, lun) (id)
#define CONTAINER_TO_CHANNEL(cont) (CONTAINER_CHANNEL)
#define CONTAINER_TO_ID(cont) (cont)
#define CONTAINER_TO_LUN(cont) (0)
@ -789,6 +792,7 @@ struct fsa_dev_info {
u64 size;
u32 type;
u32 config_waiting_on;
unsigned long config_waiting_stamp;
u16 queue_depth;
u8 config_needed;
u8 valid;
@ -1771,6 +1775,11 @@ static inline u32 cap_to_cyls(sector_t capacity, u32 divisor)
}
struct scsi_cmnd;
/* SCp.phase values */
#define AAC_OWNER_MIDLEVEL 0x101
#define AAC_OWNER_LOWLEVEL 0x102
#define AAC_OWNER_ERROR_HANDLER 0x103
#define AAC_OWNER_FIRMWARE 0x106
const char *aac_driverinfo(struct Scsi_Host *);
struct fib *aac_fib_alloc(struct aac_dev *dev);

View File

@ -38,6 +38,8 @@
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
@ -293,6 +295,16 @@ return_fib:
status = 0;
} else {
spin_unlock_irqrestore(&dev->fib_lock, flags);
/* If someone killed the AIF aacraid thread, restart it */
status = !dev->aif_thread;
if (status && dev->queues && dev->fsa_dev) {
/* Be paranoid, be very paranoid! */
kthread_stop(dev->thread);
ssleep(1);
dev->aif_thread = 0;
dev->thread = kthread_run(aac_command_thread, dev, dev->name);
ssleep(1);
}
if (f.wait) {
if(down_interruptible(&fibctx->wait_sem) < 0) {
status = -EINTR;

View File

@ -767,9 +767,9 @@ void aac_printf(struct aac_dev *dev, u32 val)
if (cp[length] != 0)
cp[length] = 0;
if (level == LOG_AAC_HIGH_ERROR)
printk(KERN_WARNING "aacraid:%s", cp);
printk(KERN_WARNING "%s:%s", dev->name, cp);
else
printk(KERN_INFO "aacraid:%s", cp);
printk(KERN_INFO "%s:%s", dev->name, cp);
}
memset(cp, 0, 256);
}
@ -784,6 +784,7 @@ void aac_printf(struct aac_dev *dev, u32 val)
* dispatches it to the appropriate routine for handling.
*/
#define AIF_SNIFF_TIMEOUT (30*HZ)
static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
{
struct hw_fib * hw_fib = fibptr->hw_fib;
@ -837,6 +838,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
if (device) {
dev->fsa_dev[container].config_needed = CHANGE;
dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
dev->fsa_dev[container].config_waiting_stamp = jiffies;
scsi_device_put(device);
}
}
@ -849,13 +851,15 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
if (container != (u32)-1) {
if (container >= dev->maximum_num_containers)
break;
if (dev->fsa_dev[container].config_waiting_on ==
le32_to_cpu(*(u32 *)aifcmd->data))
if ((dev->fsa_dev[container].config_waiting_on ==
le32_to_cpu(*(u32 *)aifcmd->data)) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
dev->fsa_dev[container].config_waiting_on = 0;
} else for (container = 0;
container < dev->maximum_num_containers; ++container) {
if (dev->fsa_dev[container].config_waiting_on ==
le32_to_cpu(*(u32 *)aifcmd->data))
if ((dev->fsa_dev[container].config_waiting_on ==
le32_to_cpu(*(u32 *)aifcmd->data)) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
dev->fsa_dev[container].config_waiting_on = 0;
}
break;
@ -872,6 +876,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
dev->fsa_dev[container].config_needed = ADD;
dev->fsa_dev[container].config_waiting_on =
AifEnConfigChange;
dev->fsa_dev[container].config_waiting_stamp = jiffies;
break;
/*
@ -884,6 +889,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
dev->fsa_dev[container].config_needed = DELETE;
dev->fsa_dev[container].config_waiting_on =
AifEnConfigChange;
dev->fsa_dev[container].config_waiting_stamp = jiffies;
break;
/*
@ -894,11 +900,13 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
if (container >= dev->maximum_num_containers)
break;
if (dev->fsa_dev[container].config_waiting_on)
if (dev->fsa_dev[container].config_waiting_on &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
break;
dev->fsa_dev[container].config_needed = CHANGE;
dev->fsa_dev[container].config_waiting_on =
AifEnConfigChange;
dev->fsa_dev[container].config_waiting_stamp = jiffies;
break;
case AifEnConfigChange:
@ -913,13 +921,15 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
if (container != (u32)-1) {
if (container >= dev->maximum_num_containers)
break;
if (dev->fsa_dev[container].config_waiting_on ==
le32_to_cpu(*(u32 *)aifcmd->data))
if ((dev->fsa_dev[container].config_waiting_on ==
le32_to_cpu(*(u32 *)aifcmd->data)) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
dev->fsa_dev[container].config_waiting_on = 0;
} else for (container = 0;
container < dev->maximum_num_containers; ++container) {
if (dev->fsa_dev[container].config_waiting_on ==
le32_to_cpu(*(u32 *)aifcmd->data))
if ((dev->fsa_dev[container].config_waiting_on ==
le32_to_cpu(*(u32 *)aifcmd->data)) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
dev->fsa_dev[container].config_waiting_on = 0;
}
break;
@ -946,6 +956,8 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
dev->fsa_dev[container].config_waiting_on =
AifEnContainerChange;
dev->fsa_dev[container].config_needed = ADD;
dev->fsa_dev[container].config_waiting_stamp =
jiffies;
}
}
if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
@ -961,6 +973,8 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
dev->fsa_dev[container].config_waiting_on =
AifEnContainerChange;
dev->fsa_dev[container].config_needed = DELETE;
dev->fsa_dev[container].config_waiting_stamp =
jiffies;
}
}
break;
@ -969,8 +983,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
device_config_needed = NOTHING;
for (container = 0; container < dev->maximum_num_containers;
++container) {
if ((dev->fsa_dev[container].config_waiting_on == 0)
&& (dev->fsa_dev[container].config_needed != NOTHING)) {
if ((dev->fsa_dev[container].config_waiting_on == 0) &&
(dev->fsa_dev[container].config_needed != NOTHING) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
device_config_needed =
dev->fsa_dev[container].config_needed;
dev->fsa_dev[container].config_needed = NOTHING;

View File

@ -27,12 +27,6 @@
* Abstract: Linux Driver entry module for Adaptec RAID Array Controller
*/
#define AAC_DRIVER_VERSION "1.1-4"
#ifndef AAC_DRIVER_BRANCH
#define AAC_DRIVER_BRANCH ""
#endif
#define AAC_DRIVER_BUILD_DATE __DATE__ " " __TIME__
#define AAC_DRIVERNAME "aacraid"
#include <linux/compat.h>
#include <linux/blkdev.h>
@ -62,6 +56,13 @@
#include "aacraid.h"
#define AAC_DRIVER_VERSION "1.1-5"
#ifndef AAC_DRIVER_BRANCH
#define AAC_DRIVER_BRANCH ""
#endif
#define AAC_DRIVER_BUILD_DATE __DATE__ " " __TIME__
#define AAC_DRIVERNAME "aacraid"
#ifdef AAC_DRIVER_BUILD
#define _str(x) #x
#define str(x) _str(x)
@ -73,7 +74,7 @@
MODULE_AUTHOR("Red Hat Inc and Adaptec");
MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
"Adaptec Advanced Raid Products, "
"and HP NetRAID-4M SCSI driver");
"HP NetRAID-4M, IBM ServeRAID & ICP SCSI driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
@ -243,6 +244,7 @@ static struct aac_driver_ident aac_drivers[] = {
static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
cmd->scsi_done = done;
cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
return (aac_scsi_cmd(cmd) ? FAILED : 0);
}
@ -471,7 +473,8 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
__shost_for_each_device(dev, host) {
spin_lock_irqsave(&dev->list_lock, flags);
list_for_each_entry(command, &dev->cmd_list, list) {
if (command->serial_number) {
if ((command != cmd) &&
(command->SCp.phase == AAC_OWNER_FIRMWARE)) {
active++;
break;
}
@ -569,12 +572,12 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
f = compat_alloc_user_space(sizeof(*f));
ret = 0;
if (clear_user(f, sizeof(*f) != sizeof(*f)))
if (clear_user(f, sizeof(*f)) != sizeof(*f))
ret = -EFAULT;
if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32)))
ret = -EFAULT;
if (!ret)
ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
ret = aac_do_ioctl(dev, cmd, f);
break;
}
@ -687,6 +690,18 @@ static ssize_t aac_show_serial_number(struct class_device *class_dev,
return len;
}
static ssize_t aac_show_max_channel(struct class_device *class_dev, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n",
class_to_shost(class_dev)->max_channel);
}
static ssize_t aac_show_max_id(struct class_device *class_dev, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n",
class_to_shost(class_dev)->max_id);
}
static struct class_device_attribute aac_model = {
.attr = {
@ -730,6 +745,20 @@ static struct class_device_attribute aac_serial_number = {
},
.show = aac_show_serial_number,
};
static struct class_device_attribute aac_max_channel = {
.attr = {
.name = "max_channel",
.mode = S_IRUGO,
},
.show = aac_show_max_channel,
};
static struct class_device_attribute aac_max_id = {
.attr = {
.name = "max_id",
.mode = S_IRUGO,
},
.show = aac_show_max_id,
};
static struct class_device_attribute *aac_attrs[] = {
&aac_model,
@ -738,6 +767,8 @@ static struct class_device_attribute *aac_attrs[] = {
&aac_monitor_version,
&aac_bios_version,
&aac_serial_number,
&aac_max_channel,
&aac_max_id,
NULL
};
@ -775,6 +806,7 @@ static struct scsi_host_template aac_driver_template = {
.cmd_per_lun = AAC_NUM_IO_FIB,
#endif
.use_clustering = ENABLE_CLUSTERING,
.emulated = 1,
};
@ -798,10 +830,11 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
error = pci_enable_device(pdev);
if (error)
goto out;
error = -ENODEV;
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) ||
pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
goto out;
goto out_disable_pdev;
/*
* If the quirk31 bit is set, the adapter needs adapter
* to driver communication memory to be allocated below 2gig
@ -809,7 +842,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
if (pci_set_dma_mask(pdev, DMA_31BIT_MASK) ||
pci_set_consistent_dma_mask(pdev, DMA_31BIT_MASK))
goto out;
goto out_disable_pdev;
pci_set_master(pdev);
@ -904,9 +937,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
* physical channels are address by their actual physical number+1
*/
if (aac->nondasd_support == 1)
shost->max_channel = aac->maximum_num_channels + 1;
shost->max_channel = aac->maximum_num_channels;
else
shost->max_channel = 1;
shost->max_channel = 0;
aac_get_config_status(aac);
aac_get_containers(aac);
@ -1020,7 +1053,8 @@ static int __init aac_init(void)
static void __exit aac_exit(void)
{
unregister_chrdev(aac_cfg_major, "aac");
if (aac_cfg_major > -1)
unregister_chrdev(aac_cfg_major, "aac");
pci_unregister_driver(&aac_pci_driver);
}

View File

@ -183,7 +183,7 @@ static int rkt_sync_cmd(struct aac_dev *dev, u32 command,
/*
* Yield the processor in case we are slow
*/
schedule_timeout_uninterruptible(1);
msleep(1);
}
if (ok != 1) {
/*
@ -343,7 +343,7 @@ static int aac_rkt_check_health(struct aac_dev *dev)
NULL, NULL, NULL, NULL, NULL);
pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
post, paddr);
if ((buffer[0] == '0') && (buffer[1] == 'x')) {
if ((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X'))) {
ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
ret <<= 4;
ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);

View File

@ -183,7 +183,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
/*
* Yield the processor in case we are slow
*/
schedule_timeout_uninterruptible(1);
msleep(1);
}
if (ok != 1) {
/*
@ -342,7 +342,7 @@ static int aac_rx_check_health(struct aac_dev *dev)
NULL, NULL, NULL, NULL, NULL);
pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
post, paddr);
if ((buffer[0] == '0') && (buffer[1] == 'x')) {
if ((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X'))) {
ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
ret <<= 4;
ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);

View File

@ -189,7 +189,7 @@ static int sa_sync_cmd(struct aac_dev *dev, u32 command,
ok = 1;
break;
}
schedule_timeout_uninterruptible(1);
msleep(1);
}
if (ok != 1)

View File

@ -372,7 +372,7 @@ typedef enum {
AHD_CURRENT_SENSING = 0x40000,
AHD_SCB_CONFIG_USED = 0x80000,/* No SEEPROM but SCB had info. */
AHD_HP_BOARD = 0x100000,
AHD_RESET_POLL_ACTIVE = 0x200000,
AHD_BUS_RESET_ACTIVE = 0x200000,
AHD_UPDATE_PEND_CMDS = 0x400000,
AHD_RUNNING_QOUTFIFO = 0x800000,
AHD_HAD_FIRST_SEL = 0x1000000
@ -589,7 +589,7 @@ typedef enum {
SCB_PACKETIZED = 0x00800,
SCB_EXPECT_PPR_BUSFREE = 0x01000,
SCB_PKT_SENSE = 0x02000,
SCB_CMDPHASE_ABORT = 0x04000,
SCB_EXTERNAL_RESET = 0x04000,/* Device was reset externally */
SCB_ON_COL_LIST = 0x08000,
SCB_SILENT = 0x10000 /*
* Be quiet about transmission type

View File

@ -207,7 +207,6 @@ static void ahd_add_scb_to_free_list(struct ahd_softc *ahd,
static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid,
u_int prev, u_int next, u_int tid);
static void ahd_reset_current_bus(struct ahd_softc *ahd);
static ahd_callback_t ahd_reset_poll;
static ahd_callback_t ahd_stat_timer;
#ifdef AHD_DUMP_SEQ
static void ahd_dumpseq(struct ahd_softc *ahd);
@ -1054,12 +1053,10 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
* If a target takes us into the command phase
* assume that it has been externally reset and
* has thus lost our previous packetized negotiation
* agreement. Since we have not sent an identify
* message and may not have fully qualified the
* connection, we change our command to TUR, assert
* ATN and ABORT the task when we go to message in
* phase. The OSM will see the REQUEUE_REQUEST
* status and retry the command.
* agreement.
* Revert to async/narrow transfers until we
* can renegotiate with the device and notify
* the OSM about the reset.
*/
scbid = ahd_get_scbptr(ahd);
scb = ahd_lookup_scb(ahd, scbid);
@ -1086,31 +1083,15 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
ahd_set_syncrate(ahd, &devinfo, /*period*/0,
/*offset*/0, /*ppr_options*/0,
AHD_TRANS_ACTIVE, /*paused*/TRUE);
ahd_outb(ahd, SCB_CDB_STORE, 0);
ahd_outb(ahd, SCB_CDB_STORE+1, 0);
ahd_outb(ahd, SCB_CDB_STORE+2, 0);
ahd_outb(ahd, SCB_CDB_STORE+3, 0);
ahd_outb(ahd, SCB_CDB_STORE+4, 0);
ahd_outb(ahd, SCB_CDB_STORE+5, 0);
ahd_outb(ahd, SCB_CDB_LEN, 6);
scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE);
scb->hscb->control |= MK_MESSAGE;
ahd_outb(ahd, SCB_CONTROL, scb->hscb->control);
ahd_outb(ahd, MSG_OUT, HOST_MSG);
ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
/*
* The lun is 0, regardless of the SCB's lun
* as we have not sent an identify message.
*/
ahd_outb(ahd, SAVED_LUN, 0);
ahd_outb(ahd, SEQ_FLAGS, 0);
ahd_assert_atn(ahd);
scb->flags &= ~SCB_PACKETIZED;
scb->flags |= SCB_ABORT|SCB_CMDPHASE_ABORT;
scb->flags |= SCB_EXTERNAL_RESET;
ahd_freeze_devq(ahd, scb);
ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
ahd_freeze_scb(scb);
/* Notify XPT */
ahd_send_async(ahd, devinfo.channel, devinfo.target,
CAM_LUN_WILDCARD, AC_SENT_BDR, NULL);
/*
* Allow the sequencer to continue with
* non-pack processing.
@ -1534,6 +1515,18 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
lqistat1 = ahd_inb(ahd, LQISTAT1);
lqostat0 = ahd_inb(ahd, LQOSTAT0);
busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME;
/*
* Ignore external resets after a bus reset.
*/
if (((status & SCSIRSTI) != 0) && (ahd->flags & AHD_BUS_RESET_ACTIVE))
return;
/*
* Clear bus reset flag
*/
ahd->flags &= ~AHD_BUS_RESET_ACTIVE;
if ((status0 & (SELDI|SELDO)) != 0) {
u_int simode0;
@ -2207,22 +2200,6 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
if (sent_msg == MSG_ABORT_TAG)
tag = SCB_GET_TAG(scb);
if ((scb->flags & SCB_CMDPHASE_ABORT) != 0) {
/*
* This abort is in response to an
* unexpected switch to command phase
* for a packetized connection. Since
* the identify message was never sent,
* "saved lun" is 0. We really want to
* abort only the SCB that encountered
* this error, which could have a different
* lun. The SCB will be retried so the OS
* will see the UA after renegotiating to
* packetized.
*/
tag = SCB_GET_TAG(scb);
saved_lun = scb->hscb->lun;
}
found = ahd_abort_scbs(ahd, target, 'A', saved_lun,
tag, ROLE_INITIATOR,
CAM_REQ_ABORTED);
@ -7847,6 +7824,17 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
int found;
u_int fifo;
u_int next_fifo;
uint8_t scsiseq;
/*
* Check if the last bus reset is cleared
*/
if (ahd->flags & AHD_BUS_RESET_ACTIVE) {
printf("%s: bus reset still active\n",
ahd_name(ahd));
return 0;
}
ahd->flags |= AHD_BUS_RESET_ACTIVE;
ahd->pending_device = NULL;
@ -7860,6 +7848,12 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
/* Make sure the sequencer is in a safe location. */
ahd_clear_critical_section(ahd);
/*
* Run our command complete fifos to ensure that we perform
* completion processing on any commands that 'completed'
* before the reset occurred.
*/
ahd_run_qoutfifo(ahd);
#ifdef AHD_TARGET_MODE
if ((ahd->flags & AHD_TARGETROLE) != 0) {
ahd_run_tqinfifo(ahd, /*paused*/TRUE);
@ -7924,30 +7918,14 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
ahd_clear_fifo(ahd, 1);
/*
* Revert to async/narrow transfers until we renegotiate.
* Reenable selections
*/
ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST);
scsiseq = ahd_inb(ahd, SCSISEQ_TEMPLATE);
ahd_outb(ahd, SCSISEQ1, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7;
for (target = 0; target <= max_scsiid; target++) {
if (ahd->enabled_targets[target] == NULL)
continue;
for (initiator = 0; initiator <= max_scsiid; initiator++) {
struct ahd_devinfo devinfo;
ahd_compile_devinfo(&devinfo, target, initiator,
CAM_LUN_WILDCARD,
'A', ROLE_UNKNOWN);
ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
AHD_TRANS_CUR, /*paused*/TRUE);
ahd_set_syncrate(ahd, &devinfo, /*period*/0,
/*offset*/0, /*ppr_options*/0,
AHD_TRANS_CUR, /*paused*/TRUE);
}
}
#ifdef AHD_TARGET_MODE
max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7;
/*
* Send an immediate notify ccb to all target more peripheral
* drivers affected by this action.
@ -7975,53 +7953,33 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
/* Notify the XPT that a bus reset occurred */
ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD,
CAM_LUN_WILDCARD, AC_BUS_RESET, NULL);
ahd_restart(ahd);
/*
* Freeze the SIMQ until our poller can determine that
* the bus reset has really gone away. We set the initial
* timer to 0 to have the check performed as soon as possible
* from the timer context.
* Revert to async/narrow transfers until we renegotiate.
*/
if ((ahd->flags & AHD_RESET_POLL_ACTIVE) == 0) {
ahd->flags |= AHD_RESET_POLL_ACTIVE;
ahd_freeze_simq(ahd);
ahd_timer_reset(&ahd->reset_timer, 0, ahd_reset_poll, ahd);
for (target = 0; target <= max_scsiid; target++) {
if (ahd->enabled_targets[target] == NULL)
continue;
for (initiator = 0; initiator <= max_scsiid; initiator++) {
struct ahd_devinfo devinfo;
ahd_compile_devinfo(&devinfo, target, initiator,
CAM_LUN_WILDCARD,
'A', ROLE_UNKNOWN);
ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
AHD_TRANS_CUR, /*paused*/TRUE);
ahd_set_syncrate(ahd, &devinfo, /*period*/0,
/*offset*/0, /*ppr_options*/0,
AHD_TRANS_CUR, /*paused*/TRUE);
}
}
ahd_restart(ahd);
return (found);
}
#define AHD_RESET_POLL_US 1000
static void
ahd_reset_poll(void *arg)
{
struct ahd_softc *ahd = arg;
u_int scsiseq1;
u_long s;
ahd_lock(ahd, &s);
ahd_pause(ahd);
ahd_update_modes(ahd);
ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI);
if ((ahd_inb(ahd, SSTAT1) & SCSIRSTI) != 0) {
ahd_timer_reset(&ahd->reset_timer, AHD_RESET_POLL_US,
ahd_reset_poll, ahd);
ahd_unpause(ahd);
ahd_unlock(ahd, &s);
return;
}
/* Reset is now low. Complete chip reinitialization. */
ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST);
scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE);
ahd_outb(ahd, SCSISEQ1, scsiseq1 & (ENSELI|ENRSELI|ENAUTOATNP));
ahd_unpause(ahd);
ahd->flags &= ~AHD_RESET_POLL_ACTIVE;
ahd_unlock(ahd, &s);
ahd_release_simq(ahd);
}
/**************************** Statistics Processing ***************************/
static void
ahd_stat_timer(void *arg)

View File

@ -782,6 +782,7 @@ ahd_linux_bus_reset(struct scsi_cmnd *cmd)
{
struct ahd_softc *ahd;
int found;
unsigned long flags;
ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
#ifdef AHD_DEBUG
@ -789,8 +790,11 @@ ahd_linux_bus_reset(struct scsi_cmnd *cmd)
printf("%s: Bus reset called for cmd %p\n",
ahd_name(ahd), cmd);
#endif
ahd_lock(ahd, &flags);
found = ahd_reset_channel(ahd, scmd_channel(cmd) + 'A',
/*initiate reset*/TRUE);
ahd_unlock(ahd, &flags);
if (bootverbose)
printf("%s: SCSI bus reset delivered. "

View File

@ -168,7 +168,7 @@ static void release_event_pool(struct event_pool *pool,
++in_use;
if (pool->events[i].ext_list) {
dma_free_coherent(hostdata->dev,
SG_ALL * sizeof(struct memory_descriptor),
SG_ALL * sizeof(struct srp_direct_buf),
pool->events[i].ext_list,
pool->events[i].ext_list_token);
}
@ -284,40 +284,37 @@ static void set_srp_direction(struct scsi_cmnd *cmd,
struct srp_cmd *srp_cmd,
int numbuf)
{
u8 fmt;
if (numbuf == 0)
return;
if (numbuf == 1) {
if (numbuf == 1)
fmt = SRP_DATA_DESC_DIRECT;
else {
fmt = SRP_DATA_DESC_INDIRECT;
numbuf = min(numbuf, MAX_INDIRECT_BUFS);
if (cmd->sc_data_direction == DMA_TO_DEVICE)
srp_cmd->data_out_format = SRP_DIRECT_BUFFER;
else
srp_cmd->data_in_format = SRP_DIRECT_BUFFER;
} else {
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
srp_cmd->data_out_format = SRP_INDIRECT_BUFFER;
srp_cmd->data_out_count =
numbuf < MAX_INDIRECT_BUFS ?
numbuf: MAX_INDIRECT_BUFS;
} else {
srp_cmd->data_in_format = SRP_INDIRECT_BUFFER;
srp_cmd->data_in_count =
numbuf < MAX_INDIRECT_BUFS ?
numbuf: MAX_INDIRECT_BUFS;
}
srp_cmd->data_out_desc_cnt = numbuf;
else
srp_cmd->data_in_desc_cnt = numbuf;
}
if (cmd->sc_data_direction == DMA_TO_DEVICE)
srp_cmd->buf_fmt = fmt << 4;
else
srp_cmd->buf_fmt = fmt;
}
static void unmap_sg_list(int num_entries,
static void unmap_sg_list(int num_entries,
struct device *dev,
struct memory_descriptor *md)
{
struct srp_direct_buf *md)
{
int i;
for (i = 0; i < num_entries; ++i) {
dma_unmap_single(dev,
md[i].virtual_address,
md[i].length, DMA_BIDIRECTIONAL);
}
for (i = 0; i < num_entries; ++i)
dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL);
}
/**
@ -330,23 +327,26 @@ static void unmap_cmd_data(struct srp_cmd *cmd,
struct srp_event_struct *evt_struct,
struct device *dev)
{
if ((cmd->data_out_format == SRP_NO_BUFFER) &&
(cmd->data_in_format == SRP_NO_BUFFER))
u8 out_fmt, in_fmt;
out_fmt = cmd->buf_fmt >> 4;
in_fmt = cmd->buf_fmt & ((1U << 4) - 1);
if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
return;
else if ((cmd->data_out_format == SRP_DIRECT_BUFFER) ||
(cmd->data_in_format == SRP_DIRECT_BUFFER)) {
struct memory_descriptor *data =
(struct memory_descriptor *)cmd->additional_data;
dma_unmap_single(dev, data->virtual_address, data->length,
DMA_BIDIRECTIONAL);
else if (out_fmt == SRP_DATA_DESC_DIRECT ||
in_fmt == SRP_DATA_DESC_DIRECT) {
struct srp_direct_buf *data =
(struct srp_direct_buf *) cmd->add_data;
dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL);
} else {
struct indirect_descriptor *indirect =
(struct indirect_descriptor *)cmd->additional_data;
int num_mapped = indirect->head.length /
sizeof(indirect->list[0]);
struct srp_indirect_buf *indirect =
(struct srp_indirect_buf *) cmd->add_data;
int num_mapped = indirect->table_desc.len /
sizeof(struct srp_direct_buf);
if (num_mapped <= MAX_INDIRECT_BUFS) {
unmap_sg_list(num_mapped, dev, &indirect->list[0]);
unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]);
return;
}
@ -356,17 +356,17 @@ static void unmap_cmd_data(struct srp_cmd *cmd,
static int map_sg_list(int num_entries,
struct scatterlist *sg,
struct memory_descriptor *md)
struct srp_direct_buf *md)
{
int i;
u64 total_length = 0;
for (i = 0; i < num_entries; ++i) {
struct memory_descriptor *descr = md + i;
struct srp_direct_buf *descr = md + i;
struct scatterlist *sg_entry = &sg[i];
descr->virtual_address = sg_dma_address(sg_entry);
descr->length = sg_dma_len(sg_entry);
descr->memory_handle = 0;
descr->va = sg_dma_address(sg_entry);
descr->len = sg_dma_len(sg_entry);
descr->key = 0;
total_length += sg_dma_len(sg_entry);
}
return total_length;
@ -389,10 +389,10 @@ static int map_sg_data(struct scsi_cmnd *cmd,
int sg_mapped;
u64 total_length = 0;
struct scatterlist *sg = cmd->request_buffer;
struct memory_descriptor *data =
(struct memory_descriptor *)srp_cmd->additional_data;
struct indirect_descriptor *indirect =
(struct indirect_descriptor *)data;
struct srp_direct_buf *data =
(struct srp_direct_buf *) srp_cmd->add_data;
struct srp_indirect_buf *indirect =
(struct srp_indirect_buf *) data;
sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL);
@ -403,9 +403,9 @@ static int map_sg_data(struct scsi_cmnd *cmd,
/* special case; we can use a single direct descriptor */
if (sg_mapped == 1) {
data->virtual_address = sg_dma_address(&sg[0]);
data->length = sg_dma_len(&sg[0]);
data->memory_handle = 0;
data->va = sg_dma_address(&sg[0]);
data->len = sg_dma_len(&sg[0]);
data->key = 0;
return 1;
}
@ -416,25 +416,26 @@ static int map_sg_data(struct scsi_cmnd *cmd,
return 0;
}
indirect->head.virtual_address = 0;
indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
indirect->head.memory_handle = 0;
indirect->table_desc.va = 0;
indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
indirect->table_desc.key = 0;
if (sg_mapped <= MAX_INDIRECT_BUFS) {
total_length = map_sg_list(sg_mapped, sg, &indirect->list[0]);
indirect->total_length = total_length;
total_length = map_sg_list(sg_mapped, sg,
&indirect->desc_list[0]);
indirect->len = total_length;
return 1;
}
/* get indirect table */
if (!evt_struct->ext_list) {
evt_struct->ext_list =(struct memory_descriptor*)
evt_struct->ext_list = (struct srp_direct_buf *)
dma_alloc_coherent(dev,
SG_ALL * sizeof(struct memory_descriptor),
&evt_struct->ext_list_token, 0);
SG_ALL * sizeof(struct srp_direct_buf),
&evt_struct->ext_list_token, 0);
if (!evt_struct->ext_list) {
printk(KERN_ERR
"ibmvscsi: Can't allocate memory for indirect table\n");
printk(KERN_ERR
"ibmvscsi: Can't allocate memory for indirect table\n");
return 0;
}
@ -442,11 +443,11 @@ static int map_sg_data(struct scsi_cmnd *cmd,
total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);
indirect->total_length = total_length;
indirect->head.virtual_address = evt_struct->ext_list_token;
indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
memcpy(indirect->list, evt_struct->ext_list,
MAX_INDIRECT_BUFS * sizeof(struct memory_descriptor));
indirect->len = total_length;
indirect->table_desc.va = evt_struct->ext_list_token;
indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
memcpy(indirect->desc_list, evt_struct->ext_list,
MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
return 1;
}
@ -463,20 +464,20 @@ static int map_sg_data(struct scsi_cmnd *cmd,
static int map_single_data(struct scsi_cmnd *cmd,
struct srp_cmd *srp_cmd, struct device *dev)
{
struct memory_descriptor *data =
(struct memory_descriptor *)srp_cmd->additional_data;
struct srp_direct_buf *data =
(struct srp_direct_buf *) srp_cmd->add_data;
data->virtual_address =
data->va =
dma_map_single(dev, cmd->request_buffer,
cmd->request_bufflen,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(data->virtual_address)) {
if (dma_mapping_error(data->va)) {
printk(KERN_ERR
"ibmvscsi: Unable to map request_buffer for command!\n");
return 0;
}
data->length = cmd->request_bufflen;
data->memory_handle = 0;
data->len = cmd->request_bufflen;
data->key = 0;
set_srp_direction(cmd, srp_cmd, 1);
@ -548,7 +549,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
/* Copy the IU into the transfer area */
*evt_struct->xfer_iu = evt_struct->iu;
evt_struct->xfer_iu->srp.generic.tag = (u64)evt_struct;
evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct;
/* Add this to the sent list. We need to do this
* before we actually send
@ -586,27 +587,27 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
struct scsi_cmnd *cmnd = evt_struct->cmnd;
if (unlikely(rsp->type != SRP_RSP_TYPE)) {
if (unlikely(rsp->opcode != SRP_RSP)) {
if (printk_ratelimit())
printk(KERN_WARNING
"ibmvscsi: bad SRP RSP type %d\n",
rsp->type);
rsp->opcode);
}
if (cmnd) {
cmnd->result = rsp->status;
if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
memcpy(cmnd->sense_buffer,
rsp->sense_and_response_data,
rsp->sense_data_list_length);
rsp->data,
rsp->sense_data_len);
unmap_cmd_data(&evt_struct->iu.srp.cmd,
evt_struct,
evt_struct->hostdata->dev);
if (rsp->doover)
cmnd->resid = rsp->data_out_residual_count;
else if (rsp->diover)
cmnd->resid = rsp->data_in_residual_count;
if (rsp->flags & SRP_RSP_FLAG_DOOVER)
cmnd->resid = rsp->data_out_res_cnt;
else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
cmnd->resid = rsp->data_in_res_cnt;
}
if (evt_struct->cmnd_done)
@ -633,10 +634,11 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
{
struct srp_cmd *srp_cmd;
struct srp_event_struct *evt_struct;
struct indirect_descriptor *indirect;
struct srp_indirect_buf *indirect;
struct ibmvscsi_host_data *hostdata =
(struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;
u16 lun = lun_from_dev(cmnd->device);
u8 out_fmt, in_fmt;
evt_struct = get_event_struct(&hostdata->pool);
if (!evt_struct)
@ -644,8 +646,8 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
/* Set up the actual SRP IU */
srp_cmd = &evt_struct->iu.srp.cmd;
memset(srp_cmd, 0x00, sizeof(*srp_cmd));
srp_cmd->type = SRP_CMD_TYPE;
memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
srp_cmd->opcode = SRP_CMD;
memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
srp_cmd->lun = ((u64) lun) << 48;
@ -664,13 +666,15 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
evt_struct->cmnd_done = done;
/* Fix up dma address of the buffer itself */
indirect = (struct indirect_descriptor *)srp_cmd->additional_data;
if (((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) ||
(srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) &&
(indirect->head.virtual_address == 0)) {
indirect->head.virtual_address = evt_struct->crq.IU_data_ptr +
offsetof(struct srp_cmd, additional_data) +
offsetof(struct indirect_descriptor, list);
indirect = (struct srp_indirect_buf *) srp_cmd->add_data;
out_fmt = srp_cmd->buf_fmt >> 4;
in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1);
if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
out_fmt == SRP_DATA_DESC_INDIRECT) &&
indirect->table_desc.va == 0) {
indirect->table_desc.va = evt_struct->crq.IU_data_ptr +
offsetof(struct srp_cmd, add_data) +
offsetof(struct srp_indirect_buf, desc_list);
}
return ibmvscsi_send_srp_event(evt_struct, hostdata);
@ -780,10 +784,10 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
static void login_rsp(struct srp_event_struct *evt_struct)
{
struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
switch (evt_struct->xfer_iu->srp.generic.type) {
case SRP_LOGIN_RSP_TYPE: /* it worked! */
switch (evt_struct->xfer_iu->srp.login_rsp.opcode) {
case SRP_LOGIN_RSP: /* it worked! */
break;
case SRP_LOGIN_REJ_TYPE: /* refused! */
case SRP_LOGIN_REJ: /* refused! */
printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n",
evt_struct->xfer_iu->srp.login_rej.reason);
/* Login failed. */
@ -792,7 +796,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
default:
printk(KERN_ERR
"ibmvscsi: Invalid login response typecode 0x%02x!\n",
evt_struct->xfer_iu->srp.generic.type);
evt_struct->xfer_iu->srp.login_rsp.opcode);
/* Login failed. */
atomic_set(&hostdata->request_limit, -1);
return;
@ -800,17 +804,17 @@ static void login_rsp(struct srp_event_struct *evt_struct)
printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n");
if (evt_struct->xfer_iu->srp.login_rsp.request_limit_delta >
if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta >
(max_requests - 2))
evt_struct->xfer_iu->srp.login_rsp.request_limit_delta =
evt_struct->xfer_iu->srp.login_rsp.req_lim_delta =
max_requests - 2;
/* Now we know what the real request-limit is */
atomic_set(&hostdata->request_limit,
evt_struct->xfer_iu->srp.login_rsp.request_limit_delta);
evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
hostdata->host->can_queue =
evt_struct->xfer_iu->srp.login_rsp.request_limit_delta - 2;
evt_struct->xfer_iu->srp.login_rsp.req_lim_delta - 2;
if (hostdata->host->can_queue < 1) {
printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n");
@ -849,18 +853,19 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
login = &evt_struct->iu.srp.login_req;
memset(login, 0x00, sizeof(struct srp_login_req));
login->type = SRP_LOGIN_REQ_TYPE;
login->max_requested_initiator_to_target_iulen = sizeof(union srp_iu);
login->required_buffer_formats = 0x0006;
login->opcode = SRP_LOGIN_REQ;
login->req_it_iu_len = sizeof(union srp_iu);
login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
spin_lock_irqsave(hostdata->host->host_lock, flags);
/* Start out with a request limit of 1, since this is negotiated in
* the login request we are just sending
*/
atomic_set(&hostdata->request_limit, 1);
spin_lock_irqsave(hostdata->host->host_lock, flags);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
printk("ibmvscsic: sent SRP login\n");
return rc;
};
@ -928,13 +933,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
/* Set up an abort SRP command */
memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
tsk_mgmt->type = SRP_TSK_MGMT_TYPE;
tsk_mgmt->opcode = SRP_TSK_MGMT;
tsk_mgmt->lun = ((u64) lun) << 48;
tsk_mgmt->task_mgmt_flags = 0x01; /* ABORT TASK */
tsk_mgmt->managed_task_tag = (u64) found_evt;
tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
tsk_mgmt->task_tag = (u64) found_evt;
printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n",
tsk_mgmt->lun, tsk_mgmt->managed_task_tag);
tsk_mgmt->lun, tsk_mgmt->task_tag);
evt->sync_srp = &srp_rsp;
init_completion(&evt->comp);
@ -948,25 +953,25 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
wait_for_completion(&evt->comp);
/* make sure we got a good response */
if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) {
if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
if (printk_ratelimit())
printk(KERN_WARNING
"ibmvscsi: abort bad SRP RSP type %d\n",
srp_rsp.srp.generic.type);
srp_rsp.srp.rsp.opcode);
return FAILED;
}
if (srp_rsp.srp.rsp.rspvalid)
rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data);
if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
rsp_rc = *((int *)srp_rsp.srp.rsp.data);
else
rsp_rc = srp_rsp.srp.rsp.status;
if (rsp_rc) {
if (printk_ratelimit())
printk(KERN_WARNING
"ibmvscsi: abort code %d for task tag 0x%lx\n",
"ibmvscsi: abort code %d for task tag 0x%lx\n",
rsp_rc,
tsk_mgmt->managed_task_tag);
tsk_mgmt->task_tag);
return FAILED;
}
@ -987,13 +992,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
printk(KERN_INFO
"ibmvscsi: aborted task tag 0x%lx completed\n",
tsk_mgmt->managed_task_tag);
tsk_mgmt->task_tag);
return SUCCESS;
}
printk(KERN_INFO
"ibmvscsi: successfully aborted task tag 0x%lx\n",
tsk_mgmt->managed_task_tag);
tsk_mgmt->task_tag);
cmd->result = (DID_ABORT << 16);
list_del(&found_evt->list);
@ -1040,9 +1045,9 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
/* Set up a lun reset SRP command */
memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
tsk_mgmt->type = SRP_TSK_MGMT_TYPE;
tsk_mgmt->opcode = SRP_TSK_MGMT;
tsk_mgmt->lun = ((u64) lun) << 48;
tsk_mgmt->task_mgmt_flags = 0x08; /* LUN RESET */
tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n",
tsk_mgmt->lun);
@ -1059,16 +1064,16 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
wait_for_completion(&evt->comp);
/* make sure we got a good response */
if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) {
if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
if (printk_ratelimit())
printk(KERN_WARNING
"ibmvscsi: reset bad SRP RSP type %d\n",
srp_rsp.srp.generic.type);
srp_rsp.srp.rsp.opcode);
return FAILED;
}
if (srp_rsp.srp.rsp.rspvalid)
rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data);
if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
rsp_rc = *((int *)srp_rsp.srp.rsp.data);
else
rsp_rc = srp_rsp.srp.rsp.status;
@ -1076,8 +1081,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
if (printk_ratelimit())
printk(KERN_WARNING
"ibmvscsi: reset code %d for task tag 0x%lx\n",
rsp_rc,
tsk_mgmt->managed_task_tag);
rsp_rc, tsk_mgmt->task_tag);
return FAILED;
}
@ -1179,6 +1183,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
/* We need to re-setup the interpartition connection */
printk(KERN_INFO
"ibmvscsi: Re-enabling adapter!\n");
atomic_set(&hostdata->request_limit, -1);
purge_requests(hostdata, DID_REQUEUE);
if (ibmvscsi_reenable_crq_queue(&hostdata->queue,
hostdata) == 0)
@ -1226,7 +1231,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
}
if (crq->format == VIOSRP_SRP_FORMAT)
atomic_add(evt_struct->xfer_iu->srp.rsp.request_limit_delta,
atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
&hostdata->request_limit);
if (evt_struct->done)

View File

@ -68,7 +68,7 @@ struct srp_event_struct {
void (*cmnd_done) (struct scsi_cmnd *);
struct completion comp;
union viosrp_iu *sync_srp;
struct memory_descriptor *ext_list;
struct srp_direct_buf *ext_list;
dma_addr_t ext_list_token;
};

View File

@ -34,7 +34,6 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include "ibmvscsi.h"
#include "srp.h"
static char partition_name[97] = "UNKNOWN";
static unsigned int partition_number = -1;

View File

@ -1,227 +0,0 @@
/*****************************************************************************/
/* srp.h -- SCSI RDMA Protocol definitions */
/* */
/* Written By: Colin Devilbis, IBM Corporation */
/* */
/* Copyright (C) 2003 IBM Corporation */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/* */
/* */
/* This file contains structures and definitions for the SCSI RDMA Protocol */
/* (SRP) as defined in the T10 standard available at www.t10.org. This */
/* file was based on the 16a version of the standard */
/* */
/*****************************************************************************/
#ifndef SRP_H
#define SRP_H
#define SRP_VERSION "16.a"
#define PACKED __attribute__((packed))
enum srp_types {
SRP_LOGIN_REQ_TYPE = 0x00,
SRP_LOGIN_RSP_TYPE = 0xC0,
SRP_LOGIN_REJ_TYPE = 0xC2,
SRP_I_LOGOUT_TYPE = 0x03,
SRP_T_LOGOUT_TYPE = 0x80,
SRP_TSK_MGMT_TYPE = 0x01,
SRP_CMD_TYPE = 0x02,
SRP_RSP_TYPE = 0xC1,
SRP_CRED_REQ_TYPE = 0x81,
SRP_CRED_RSP_TYPE = 0x41,
SRP_AER_REQ_TYPE = 0x82,
SRP_AER_RSP_TYPE = 0x42
};
enum srp_descriptor_formats {
SRP_NO_BUFFER = 0x00,
SRP_DIRECT_BUFFER = 0x01,
SRP_INDIRECT_BUFFER = 0x02
};
struct memory_descriptor {
u64 virtual_address;
u32 memory_handle;
u32 length;
};
struct indirect_descriptor {
struct memory_descriptor head;
u32 total_length;
struct memory_descriptor list[1] PACKED;
};
struct srp_generic {
u8 type;
u8 reserved1[7];
u64 tag;
};
struct srp_login_req {
u8 type;
u8 reserved1[7];
u64 tag;
u32 max_requested_initiator_to_target_iulen;
u32 reserved2;
u16 required_buffer_formats;
u8 reserved3:6;
u8 multi_channel_action:2;
u8 reserved4;
u32 reserved5;
u8 initiator_port_identifier[16];
u8 target_port_identifier[16];
};
struct srp_login_rsp {
u8 type;
u8 reserved1[3];
u32 request_limit_delta;
u64 tag;
u32 max_initiator_to_target_iulen;
u32 max_target_to_initiator_iulen;
u16 supported_buffer_formats;
u8 reserved2:6;
u8 multi_channel_result:2;
u8 reserved3;
u8 reserved4[24];
};
struct srp_login_rej {
u8 type;
u8 reserved1[3];
u32 reason;
u64 tag;
u64 reserved2;
u16 supported_buffer_formats;
u8 reserved3[6];
};
struct srp_i_logout {
u8 type;
u8 reserved1[7];
u64 tag;
};
struct srp_t_logout {
u8 type;
u8 reserved1[3];
u32 reason;
u64 tag;
};
struct srp_tsk_mgmt {
u8 type;
u8 reserved1[7];
u64 tag;
u32 reserved2;
u64 lun PACKED;
u8 reserved3;
u8 reserved4;
u8 task_mgmt_flags;
u8 reserved5;
u64 managed_task_tag;
u64 reserved6;
};
struct srp_cmd {
u8 type;
u32 reserved1 PACKED;
u8 data_out_format:4;
u8 data_in_format:4;
u8 data_out_count;
u8 data_in_count;
u64 tag;
u32 reserved2;
u64 lun PACKED;
u8 reserved3;
u8 reserved4:5;
u8 task_attribute:3;
u8 reserved5;
u8 additional_cdb_len;
u8 cdb[16];
u8 additional_data[0x100 - 0x30];
};
struct srp_rsp {
u8 type;
u8 reserved1[3];
u32 request_limit_delta;
u64 tag;
u16 reserved2;
u8 reserved3:2;
u8 diunder:1;
u8 diover:1;
u8 dounder:1;
u8 doover:1;
u8 snsvalid:1;
u8 rspvalid:1;
u8 status;
u32 data_in_residual_count;
u32 data_out_residual_count;
u32 sense_data_list_length;
u32 response_data_list_length;
u8 sense_and_response_data[18];
};
struct srp_cred_req {
u8 type;
u8 reserved1[3];
u32 request_limit_delta;
u64 tag;
};
struct srp_cred_rsp {
u8 type;
u8 reserved1[7];
u64 tag;
};
struct srp_aer_req {
u8 type;
u8 reserved1[3];
u32 request_limit_delta;
u64 tag;
u32 reserved2;
u64 lun;
u32 sense_data_list_length;
u32 reserved3;
u8 sense_data[20];
};
struct srp_aer_rsp {
u8 type;
u8 reserved1[7];
u64 tag;
};
union srp_iu {
struct srp_generic generic;
struct srp_login_req login_req;
struct srp_login_rsp login_rsp;
struct srp_login_rej login_rej;
struct srp_i_logout i_logout;
struct srp_t_logout t_logout;
struct srp_tsk_mgmt tsk_mgmt;
struct srp_cmd cmd;
struct srp_rsp rsp;
struct srp_cred_req cred_req;
struct srp_cred_rsp cred_rsp;
struct srp_aer_req aer_req;
struct srp_aer_rsp aer_rsp;
};
#endif

View File

@ -33,7 +33,22 @@
/*****************************************************************************/
#ifndef VIOSRP_H
#define VIOSRP_H
#include "srp.h"
#include <scsi/srp.h>
#define SRP_VERSION "16.a"
#define SRP_MAX_IU_LEN 256
union srp_iu {
struct srp_login_req login_req;
struct srp_login_rsp login_rsp;
struct srp_login_rej login_rej;
struct srp_i_logout i_logout;
struct srp_t_logout t_logout;
struct srp_tsk_mgmt tsk_mgmt;
struct srp_cmd cmd;
struct srp_rsp rsp;
u8 reserved[SRP_MAX_IU_LEN];
};
enum viosrp_crq_formats {
VIOSRP_SRP_FORMAT = 0x01,

View File

@ -164,29 +164,6 @@ MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when init
MODULE_LICENSE("GPL");
MODULE_VERSION(IPR_DRIVER_VERSION);
static const char *ipr_gpdd_dev_end_states[] = {
"Command complete",
"Terminated by host",
"Terminated by device reset",
"Terminated by bus reset",
"Unknown",
"Command not started"
};
static const char *ipr_gpdd_dev_bus_phases[] = {
"Bus free",
"Arbitration",
"Selection",
"Message out",
"Command",
"Message in",
"Data out",
"Data in",
"Status",
"Reselection",
"Unknown"
};
/* A constant array of IOASCs/URCs/Error Messages */
static const
struct ipr_error_table_t ipr_error_table[] = {
@ -869,8 +846,8 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
if (res->sdev) {
res->sdev->hostdata = NULL;
res->del_from_ml = 1;
res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
if (ioa_cfg->allow_ml_add_del)
schedule_work(&ioa_cfg->work_q);
} else
@ -1356,8 +1333,8 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
return;
if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
"%s\n", ipr_error_table[error_index].error);
ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
"%s\n", ipr_error_table[error_index].error);
} else {
dev_err(&ioa_cfg->pdev->dev, "%s\n",
ipr_error_table[error_index].error);
@ -2107,7 +2084,6 @@ restart:
did_work = 1;
sdev = res->sdev;
if (!scsi_device_get(sdev)) {
res->sdev = NULL;
list_move_tail(&res->queue, &ioa_cfg->free_res_q);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
scsi_remove_device(sdev);
@ -2124,6 +2100,7 @@ restart:
bus = res->cfgte.res_addr.bus;
target = res->cfgte.res_addr.target;
lun = res->cfgte.res_addr.lun;
res->add_to_ml = 0;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
scsi_add_device(ioa_cfg->host, bus, target, lun);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@ -3214,7 +3191,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
sdev->timeout = IPR_VSET_RW_TIMEOUT;
blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
}
if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
sdev->allow_restart = 1;
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
}
@ -3303,6 +3280,44 @@ static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
return rc;
}
/**
* ipr_device_reset - Reset the device
* @ioa_cfg: ioa config struct
* @res: resource entry struct
*
* This function issues a device reset to the affected device.
* If the device is a SCSI device, a LUN reset will be sent
* to the device first. If that does not work, a target reset
* will be sent.
*
* Return value:
* 0 on success / non-zero on failure
**/
static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_resource_entry *res)
{
struct ipr_cmnd *ipr_cmd;
struct ipr_ioarcb *ioarcb;
struct ipr_cmd_pkt *cmd_pkt;
u32 ioasc;
ENTER;
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioarcb = &ipr_cmd->ioarcb;
cmd_pkt = &ioarcb->cmd_pkt;
ioarcb->res_handle = res->cfgte.res_handle;
cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
LEAVE;
return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
}
/**
* ipr_eh_dev_reset - Reset the device
* @scsi_cmd: scsi command struct
@ -3319,8 +3334,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
struct ipr_cmnd *ipr_cmd;
struct ipr_ioa_cfg *ioa_cfg;
struct ipr_resource_entry *res;
struct ipr_cmd_pkt *cmd_pkt;
u32 ioasc;
int rc;
ENTER;
ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
@ -3347,25 +3361,12 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
}
res->resetting_device = 1;
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
rc = ipr_device_reset(ioa_cfg, res);
res->resetting_device = 0;
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
LEAVE;
return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
return (rc ? FAILED : SUCCESS);
}
static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
@ -3440,7 +3441,7 @@ static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
return;
}
ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ipr_cmd->sibling = reset_cmd;
reset_cmd->sibling = ipr_cmd;
@ -3504,7 +3505,8 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
ipr_cmd->u.sdev = scsi_cmd->device;
ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
scsi_cmd->cmnd[0]);
ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
@ -3815,8 +3817,8 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
scsi_cmd->result |= (DID_ERROR << 16);
ipr_sdev_err(scsi_cmd->device,
"Request Sense failed with IOASC: 0x%08X\n", ioasc);
scmd_printk(KERN_ERR, scsi_cmd,
"Request Sense failed with IOASC: 0x%08X\n", ioasc);
} else {
memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
@ -3938,6 +3940,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
* ipr_dump_ioasa - Dump contents of IOASA
* @ioa_cfg: ioa config struct
* @ipr_cmd: ipr command struct
* @res: resource entry struct
*
* This function is invoked by the interrupt handler when ops
* fail. It will log the IOASA if appropriate. Only called
@ -3947,7 +3950,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
* none
**/
static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_cmnd *ipr_cmd)
struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
{
int i;
u16 data_len;
@ -3975,16 +3978,7 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
return;
}
ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
ipr_error_table[error_index].error);
if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
(ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
ipr_sdev_err(ipr_cmd->scsi_cmd->device,
"Device End state: %s Phase: %s\n",
ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
}
ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
data_len = sizeof(struct ipr_ioasa);
@ -4141,7 +4135,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
}
if (ipr_is_gscsi(res))
ipr_dump_ioasa(ioa_cfg, ipr_cmd);
ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
else
ipr_gen_sense(ipr_cmd);
@ -4540,7 +4534,7 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
ipr_cmd->job_step = ipr_ioa_reset_done;
list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
if (!ipr_is_scsi_disk(res))
continue;
ipr_cmd->u.res = res;
@ -4980,7 +4974,7 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
list_for_each_entry_safe(res, temp, &old_res, queue) {
if (res->sdev) {
res->del_from_ml = 1;
res->sdev->hostdata = NULL;
res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
list_move_tail(&res->queue, &ioa_cfg->used_res_q);
} else {
list_move_tail(&res->queue, &ioa_cfg->free_res_q);

View File

@ -36,8 +36,8 @@
/*
* Literals
*/
#define IPR_DRIVER_VERSION "2.1.2"
#define IPR_DRIVER_DATE "(February 8, 2006)"
#define IPR_DRIVER_VERSION "2.1.3"
#define IPR_DRIVER_DATE "(March 29, 2006)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@ -133,6 +133,7 @@
#define IPR_MAX_SCSI_RATE(width) ((320 * 10) / ((width) / 8))
#define IPR_IOA_RES_HANDLE 0xffffffff
#define IPR_INVALID_RES_HANDLE 0
#define IPR_IOA_RES_ADDR 0x00ffffff
/*
@ -1191,30 +1192,17 @@ struct ipr_ucode_image_header {
*/
#define ipr_err(...) printk(KERN_ERR IPR_NAME ": "__VA_ARGS__)
#define ipr_info(...) printk(KERN_INFO IPR_NAME ": "__VA_ARGS__)
#define ipr_crit(...) printk(KERN_CRIT IPR_NAME ": "__VA_ARGS__)
#define ipr_warn(...) printk(KERN_WARNING IPR_NAME": "__VA_ARGS__)
#define ipr_dbg(...) IPR_DBG_CMD(printk(KERN_INFO IPR_NAME ": "__VA_ARGS__))
#define ipr_sdev_printk(level, sdev, fmt, args...) \
sdev_printk(level, sdev, fmt, ## args)
#define ipr_ra_printk(level, ioa_cfg, ra, fmt, ...) \
printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \
(ra).bus, (ra).target, (ra).lun, ##__VA_ARGS__)
#define ipr_sdev_err(sdev, fmt, ...) \
ipr_sdev_printk(KERN_ERR, sdev, fmt, ##__VA_ARGS__)
#define ipr_sdev_info(sdev, fmt, ...) \
ipr_sdev_printk(KERN_INFO, sdev, fmt, ##__VA_ARGS__)
#define ipr_sdev_dbg(sdev, fmt, ...) \
IPR_DBG_CMD(ipr_sdev_printk(KERN_INFO, sdev, fmt, ##__VA_ARGS__))
#define ipr_res_printk(level, ioa_cfg, res, fmt, ...) \
printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, ioa_cfg->host->host_no, \
res.bus, res.target, res.lun, ##__VA_ARGS__)
#define ipr_ra_err(ioa_cfg, ra, fmt, ...) \
ipr_ra_printk(KERN_ERR, ioa_cfg, ra, fmt, ##__VA_ARGS__)
#define ipr_res_err(ioa_cfg, res, fmt, ...) \
ipr_res_printk(KERN_ERR, ioa_cfg, res, fmt, ##__VA_ARGS__)
#define ipr_res_dbg(ioa_cfg, res, fmt, ...) \
IPR_DBG_CMD(ipr_res_printk(KERN_INFO, ioa_cfg, res, fmt, ##__VA_ARGS__))
ipr_ra_err(ioa_cfg, (res)->cfgte.res_addr, fmt, ##__VA_ARGS__)
#define ipr_phys_res_err(ioa_cfg, res, fmt, ...) \
{ \
@ -1303,6 +1291,22 @@ static inline int ipr_is_gscsi(struct ipr_resource_entry *res)
return 0;
}
/**
* ipr_is_scsi_disk - Determine if a resource is a SCSI disk
* @res: resource entry struct
*
* Return value:
* 1 if SCSI disk / 0 if not SCSI disk
**/
static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res)
{
if (ipr_is_af_dasd_device(res) ||
(ipr_is_gscsi(res) && IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data)))
return 1;
else
return 0;
}
/**
* ipr_is_naca_model - Determine if a resource is using NACA queueing model
* @res: resource entry struct

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -565,7 +565,8 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
/*
* If SCSI-2 or lower, store the LUN value in cmnd.
*/
if (cmd->device->scsi_level <= SCSI_2) {
if (cmd->device->scsi_level <= SCSI_2 &&
cmd->device->scsi_level != SCSI_UNKNOWN) {
cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
(cmd->device->lun << 5 & 0xe0);
}
@ -1243,7 +1244,7 @@ static int __init init_scsi(void)
if (error)
goto cleanup_sysctl;
for_each_cpu(i)
for_each_possible_cpu(i)
INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
printk(KERN_NOTICE "SCSI subsystem initialized\n");

View File

@ -132,7 +132,9 @@ static struct {
{"CMD", "CRA-7280", NULL, BLIST_SPARSELUN}, /* CMD RAID Controller */
{"CNSI", "G7324", NULL, BLIST_SPARSELUN}, /* Chaparral G7324 RAID */
{"CNSi", "G8324", NULL, BLIST_SPARSELUN}, /* Chaparral G8324 RAID */
{"COMPAQ", "LOGICAL VOLUME", NULL, BLIST_FORCELUN},
{"COMPAQ", "ARRAY CONTROLLER", NULL, BLIST_SPARSELUN | BLIST_LARGELUN |
BLIST_MAX_512 | BLIST_REPORTLUN2}, /* Compaq RA4x00 */
{"COMPAQ", "LOGICAL VOLUME", NULL, BLIST_FORCELUN | BLIST_MAX_512}, /* Compaq RA4x00 */
{"COMPAQ", "CR3500", NULL, BLIST_FORCELUN},
{"COMPAQ", "MSA1000", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD},
{"COMPAQ", "MSA1000 VOLUME", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD},

View File

@ -157,180 +157,6 @@ int scsi_set_medium_removal(struct scsi_device *sdev, char state)
}
EXPORT_SYMBOL(scsi_set_medium_removal);
/*
* This interface is deprecated - users should use the scsi generic (sg)
* interface instead, as this is a more flexible approach to performing
* generic SCSI commands on a device.
*
* The structure that we are passed should look like:
*
* struct sdata {
* unsigned int inlen; [i] Length of data to be written to device
* unsigned int outlen; [i] Length of data to be read from device
* unsigned char cmd[x]; [i] SCSI command (6 <= x <= 12).
* [o] Data read from device starts here.
* [o] On error, sense buffer starts here.
* unsigned char wdata[y]; [i] Data written to device starts here.
* };
* Notes:
* - The SCSI command length is determined by examining the 1st byte
* of the given command. There is no way to override this.
* - Data transfers are limited to PAGE_SIZE (4K on i386, 8K on alpha).
* - The length (x + y) must be at least OMAX_SB_LEN bytes long to
* accommodate the sense buffer when an error occurs.
* The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
* old code will not be surprised.
* - If a Unix error occurs (e.g. ENOMEM) then the user will receive
* a negative return and the Unix error code in 'errno'.
* If the SCSI command succeeds then 0 is returned.
* Positive numbers returned are the compacted SCSI error codes (4
* bytes in one int) where the lowest byte is the SCSI status.
* See the drivers/scsi/scsi.h file for more information on this.
*
*/
#define OMAX_SB_LEN 16 /* Old sense buffer length */
int scsi_ioctl_send_command(struct scsi_device *sdev,
struct scsi_ioctl_command __user *sic)
{
char *buf;
unsigned char cmd[MAX_COMMAND_SIZE];
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
char __user *cmd_in;
unsigned char opcode;
unsigned int inlen, outlen, cmdlen;
unsigned int needed, buf_needed;
int timeout, retries, result;
int data_direction;
gfp_t gfp_mask = GFP_KERNEL;
if (!sic)
return -EINVAL;
if (sdev->host->unchecked_isa_dma)
gfp_mask |= GFP_DMA;
/*
* Verify that we can read at least this much.
*/
if (!access_ok(VERIFY_READ, sic, sizeof(Scsi_Ioctl_Command)))
return -EFAULT;
if(__get_user(inlen, &sic->inlen))
return -EFAULT;
if(__get_user(outlen, &sic->outlen))
return -EFAULT;
/*
* We do not transfer more than MAX_BUF with this interface.
* If the user needs to transfer more data than this, they
* should use scsi_generics (sg) instead.
*/
if (inlen > MAX_BUF)
return -EINVAL;
if (outlen > MAX_BUF)
return -EINVAL;
cmd_in = sic->data;
if(get_user(opcode, cmd_in))
return -EFAULT;
needed = buf_needed = (inlen > outlen ? inlen : outlen);
if (buf_needed) {
buf_needed = (buf_needed + 511) & ~511;
if (buf_needed > MAX_BUF)
buf_needed = MAX_BUF;
buf = kzalloc(buf_needed, gfp_mask);
if (!buf)
return -ENOMEM;
if (inlen == 0) {
data_direction = DMA_FROM_DEVICE;
} else if (outlen == 0 ) {
data_direction = DMA_TO_DEVICE;
} else {
/*
* Can this ever happen?
*/
data_direction = DMA_BIDIRECTIONAL;
}
} else {
buf = NULL;
data_direction = DMA_NONE;
}
/*
* Obtain the command from the user's address space.
*/
cmdlen = COMMAND_SIZE(opcode);
result = -EFAULT;
if (!access_ok(VERIFY_READ, cmd_in, cmdlen + inlen))
goto error;
if(__copy_from_user(cmd, cmd_in, cmdlen))
goto error;
/*
* Obtain the data to be sent to the device (if any).
*/
if(inlen && copy_from_user(buf, cmd_in + cmdlen, inlen))
goto error;
switch (opcode) {
case SEND_DIAGNOSTIC:
case FORMAT_UNIT:
timeout = FORMAT_UNIT_TIMEOUT;
retries = 1;
break;
case START_STOP:
timeout = START_STOP_TIMEOUT;
retries = NORMAL_RETRIES;
break;
case MOVE_MEDIUM:
timeout = MOVE_MEDIUM_TIMEOUT;
retries = NORMAL_RETRIES;
break;
case READ_ELEMENT_STATUS:
timeout = READ_ELEMENT_STATUS_TIMEOUT;
retries = NORMAL_RETRIES;
break;
case READ_DEFECT_DATA:
timeout = READ_DEFECT_DATA_TIMEOUT;
retries = 1;
break;
default:
timeout = IOCTL_NORMAL_TIMEOUT;
retries = NORMAL_RETRIES;
break;
}
result = scsi_execute(sdev, cmd, data_direction, buf, needed,
sense, timeout, retries, 0);
/*
* If there was an error condition, pass the info back to the user.
*/
if (result) {
int sb_len = sizeof(*sense);
sb_len = (sb_len > OMAX_SB_LEN) ? OMAX_SB_LEN : sb_len;
if (copy_to_user(cmd_in, sense, sb_len))
result = -EFAULT;
} else {
if (outlen && copy_to_user(cmd_in, buf, outlen))
result = -EFAULT;
}
error:
kfree(buf);
return result;
}
EXPORT_SYMBOL(scsi_ioctl_send_command);
/*
* The scsi_ioctl_get_pci() function places into arg the value
* pci_dev::slot_name (8 characters) for the PCI device (if any).
@ -409,7 +235,7 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
case SCSI_IOCTL_SEND_COMMAND:
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
return scsi_ioctl_send_command(sdev, arg);
return sg_scsi_ioctl(NULL, sdev->request_queue, NULL, arg);
case SCSI_IOCTL_DOORLOCK:
return scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
case SCSI_IOCTL_DOORUNLOCK:

View File

@ -1479,6 +1479,8 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
static void scsi_kill_request(struct request *req, request_queue_t *q)
{
struct scsi_cmnd *cmd = req->special;
struct scsi_device *sdev = cmd->device;
struct Scsi_Host *shost = sdev->host;
blkdev_dequeue_request(req);
@ -1491,6 +1493,19 @@ static void scsi_kill_request(struct request *req, request_queue_t *q)
scsi_init_cmd_errh(cmd);
cmd->result = DID_NO_CONNECT << 16;
atomic_inc(&cmd->device->iorequest_cnt);
/*
* SCSI request completion path will do scsi_device_unbusy(),
* bump busy counts. To bump the counters, we need to dance
* with the locks as normal issue path does.
*/
sdev->device_busy++;
spin_unlock(sdev->request_queue->queue_lock);
spin_lock(shost->host_lock);
shost->host_busy++;
spin_unlock(shost->host_lock);
spin_lock(sdev->request_queue->queue_lock);
__scsi_done(cmd);
}

View File

@ -0,0 +1,38 @@
#ifndef _SCSI_SAS_INTERNAL_H
#define _SCSI_SAS_INTERNAL_H
#define SAS_HOST_ATTRS 0
#define SAS_PORT_ATTRS 17
#define SAS_RPORT_ATTRS 7
#define SAS_END_DEV_ATTRS 3
#define SAS_EXPANDER_ATTRS 7
struct sas_internal {
struct scsi_transport_template t;
struct sas_function_template *f;
struct sas_domain_function_template *dft;
struct class_device_attribute private_host_attrs[SAS_HOST_ATTRS];
struct class_device_attribute private_phy_attrs[SAS_PORT_ATTRS];
struct class_device_attribute private_rphy_attrs[SAS_RPORT_ATTRS];
struct class_device_attribute private_end_dev_attrs[SAS_END_DEV_ATTRS];
struct class_device_attribute private_expander_attrs[SAS_EXPANDER_ATTRS];
struct transport_container phy_attr_cont;
struct transport_container rphy_attr_cont;
struct transport_container end_dev_attr_cont;
struct transport_container expander_attr_cont;
/*
* The array of null terminated pointers to attributes
* needed by scsi_sysfs.c
*/
struct class_device_attribute *host_attrs[SAS_HOST_ATTRS + 1];
struct class_device_attribute *phy_attrs[SAS_PORT_ATTRS + 1];
struct class_device_attribute *rphy_attrs[SAS_RPORT_ATTRS + 1];
struct class_device_attribute *end_dev_attrs[SAS_END_DEV_ATTRS + 1];
struct class_device_attribute *expander_attrs[SAS_EXPANDER_ATTRS + 1];
};
#define to_sas_internal(tmpl) container_of(tmpl, struct sas_internal, t)
#endif

View File

@ -673,6 +673,7 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
case TYPE_MEDIUM_CHANGER:
case TYPE_ENCLOSURE:
case TYPE_COMM:
case TYPE_RAID:
case TYPE_RBC:
sdev->writeable = 1;
break;
@ -737,6 +738,13 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
if (*bflags & BLIST_SELECT_NO_ATN)
sdev->select_no_atn = 1;
/*
* Maximum 512 sector transfer length
* broken RA4x00 Compaq Disk Array
*/
if (*bflags & BLIST_MAX_512)
blk_queue_max_sectors(sdev->request_queue, 512);
/*
* Some devices may not want to have a start command automatically
* issued when a device is added.
@ -1123,10 +1131,13 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
* Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does
* support more than 8 LUNs.
*/
if ((bflags & BLIST_NOREPORTLUN) ||
starget->scsi_level < SCSI_2 ||
(starget->scsi_level < SCSI_3 &&
(!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8)) )
if (bflags & BLIST_NOREPORTLUN)
return 1;
if (starget->scsi_level < SCSI_2 &&
starget->scsi_level != SCSI_UNKNOWN)
return 1;
if (starget->scsi_level < SCSI_3 &&
(!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
return 1;
if (bflags & BLIST_NOLUN)
return 0;

View File

@ -34,6 +34,8 @@
#include <scsi/scsi_cmnd.h>
#include "scsi_priv.h"
static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
/*
* Redefine so that we can have same named attributes in the
* sdev/starget/host objects.
@ -213,10 +215,8 @@ fc_bitfield_name_search(remote_port_roles, fc_remote_port_role_names)
#define FC_MGMTSRVR_PORTID 0x00000a
static void fc_shost_remove_rports(void *data);
static void fc_timeout_deleted_rport(void *data);
static void fc_scsi_scan_rport(void *data);
static void fc_rport_terminate(struct fc_rport *rport);
/*
* Attribute counts pre object type...
@ -288,42 +288,58 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
struct class_device *cdev)
{
struct Scsi_Host *shost = dev_to_shost(dev);
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
/*
* Set default values easily detected by the midlayer as
* failure cases. The scsi lldd is responsible for initializing
* all transport attributes to valid values per host.
*/
fc_host_node_name(shost) = -1;
fc_host_port_name(shost) = -1;
fc_host_permanent_port_name(shost) = -1;
fc_host_supported_classes(shost) = FC_COS_UNSPECIFIED;
memset(fc_host_supported_fc4s(shost), 0,
sizeof(fc_host_supported_fc4s(shost)));
memset(fc_host_symbolic_name(shost), 0,
sizeof(fc_host_symbolic_name(shost)));
fc_host_supported_speeds(shost) = FC_PORTSPEED_UNKNOWN;
fc_host_maxframe_size(shost) = -1;
memset(fc_host_serial_number(shost), 0,
sizeof(fc_host_serial_number(shost)));
fc_host->node_name = -1;
fc_host->port_name = -1;
fc_host->permanent_port_name = -1;
fc_host->supported_classes = FC_COS_UNSPECIFIED;
memset(fc_host->supported_fc4s, 0,
sizeof(fc_host->supported_fc4s));
memset(fc_host->symbolic_name, 0,
sizeof(fc_host->symbolic_name));
fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN;
fc_host->maxframe_size = -1;
memset(fc_host->serial_number, 0,
sizeof(fc_host->serial_number));
fc_host_port_id(shost) = -1;
fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
memset(fc_host_active_fc4s(shost), 0,
sizeof(fc_host_active_fc4s(shost)));
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
fc_host_fabric_name(shost) = -1;
fc_host->port_id = -1;
fc_host->port_type = FC_PORTTYPE_UNKNOWN;
fc_host->port_state = FC_PORTSTATE_UNKNOWN;
memset(fc_host->active_fc4s, 0,
sizeof(fc_host->active_fc4s));
fc_host->speed = FC_PORTSPEED_UNKNOWN;
fc_host->fabric_name = -1;
fc_host_tgtid_bind_type(shost) = FC_TGTID_BIND_BY_WWPN;
fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN;
INIT_LIST_HEAD(&fc_host_rports(shost));
INIT_LIST_HEAD(&fc_host_rport_bindings(shost));
fc_host_next_rport_number(shost) = 0;
fc_host_next_target_id(shost) = 0;
INIT_LIST_HEAD(&fc_host->rports);
INIT_LIST_HEAD(&fc_host->rport_bindings);
fc_host->next_rport_number = 0;
fc_host->next_target_id = 0;
snprintf(fc_host->work_q_name, KOBJ_NAME_LEN, "fc_wq_%d",
shost->host_no);
fc_host->work_q = create_singlethread_workqueue(
fc_host->work_q_name);
if (!fc_host->work_q)
return -ENOMEM;
snprintf(fc_host->devloss_work_q_name, KOBJ_NAME_LEN, "fc_dl_%d",
shost->host_no);
fc_host->devloss_work_q = create_singlethread_workqueue(
fc_host->devloss_work_q_name);
if (!fc_host->devloss_work_q) {
destroy_workqueue(fc_host->work_q);
fc_host->work_q = NULL;
return -ENOMEM;
}
fc_host_flags(shost) = 0;
INIT_WORK(&fc_host_rport_del_work(shost), fc_shost_remove_rports, shost);
return 0;
}
@ -879,9 +895,9 @@ store_fc_private_host_tgtid_bind_type(struct class_device *cdev,
while (!list_empty(&fc_host_rport_bindings(shost))) {
get_list_head_entry(rport,
&fc_host_rport_bindings(shost), peers);
spin_unlock_irqrestore(shost->host_lock, flags);
fc_rport_terminate(rport);
spin_lock_irqsave(shost->host_lock, flags);
list_del(&rport->peers);
rport->port_state = FC_PORTSTATE_DELETED;
fc_queue_work(shost, &rport->rport_delete_work);
}
spin_unlock_irqrestore(shost->host_lock, flags);
}
@ -1262,6 +1278,90 @@ void fc_release_transport(struct scsi_transport_template *t)
}
EXPORT_SYMBOL(fc_release_transport);
/**
* fc_queue_work - Queue work to the fc_host workqueue.
* @shost: Pointer to Scsi_Host bound to fc_host.
* @work: Work to queue for execution.
*
* Return value:
* 0 on success / != 0 for error
**/
static int
fc_queue_work(struct Scsi_Host *shost, struct work_struct *work)
{
if (unlikely(!fc_host_work_q(shost))) {
printk(KERN_ERR
"ERROR: FC host '%s' attempted to queue work, "
"when no workqueue created.\n", shost->hostt->name);
dump_stack();
return -EINVAL;
}
return queue_work(fc_host_work_q(shost), work);
}
/**
* fc_flush_work - Flush a fc_host's workqueue.
* @shost: Pointer to Scsi_Host bound to fc_host.
**/
static void
fc_flush_work(struct Scsi_Host *shost)
{
if (!fc_host_work_q(shost)) {
printk(KERN_ERR
"ERROR: FC host '%s' attempted to flush work, "
"when no workqueue created.\n", shost->hostt->name);
dump_stack();
return;
}
flush_workqueue(fc_host_work_q(shost));
}
/**
* fc_queue_devloss_work - Schedule work for the fc_host devloss workqueue.
* @shost: Pointer to Scsi_Host bound to fc_host.
* @work: Work to queue for execution.
* @delay: jiffies to delay the work queuing
*
* Return value:
* 0 on success / != 0 for error
**/
static int
fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work,
unsigned long delay)
{
if (unlikely(!fc_host_devloss_work_q(shost))) {
printk(KERN_ERR
"ERROR: FC host '%s' attempted to queue work, "
"when no workqueue created.\n", shost->hostt->name);
dump_stack();
return -EINVAL;
}
return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
}
/**
* fc_flush_devloss - Flush a fc_host's devloss workqueue.
* @shost: Pointer to Scsi_Host bound to fc_host.
**/
static void
fc_flush_devloss(struct Scsi_Host *shost)
{
if (!fc_host_devloss_work_q(shost)) {
printk(KERN_ERR
"ERROR: FC host '%s' attempted to flush work, "
"when no workqueue created.\n", shost->hostt->name);
dump_stack();
return;
}
flush_workqueue(fc_host_devloss_work_q(shost));
}
/**
* fc_remove_host - called to terminate any fc_transport-related elements
@ -1283,36 +1383,103 @@ void
fc_remove_host(struct Scsi_Host *shost)
{
struct fc_rport *rport, *next_rport;
struct workqueue_struct *work_q;
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
/* Remove any remote ports */
list_for_each_entry_safe(rport, next_rport,
&fc_host_rports(shost), peers)
fc_rport_terminate(rport);
&fc_host->rports, peers) {
list_del(&rport->peers);
rport->port_state = FC_PORTSTATE_DELETED;
fc_queue_work(shost, &rport->rport_delete_work);
}
list_for_each_entry_safe(rport, next_rport,
&fc_host_rport_bindings(shost), peers)
fc_rport_terminate(rport);
&fc_host->rport_bindings, peers) {
list_del(&rport->peers);
rport->port_state = FC_PORTSTATE_DELETED;
fc_queue_work(shost, &rport->rport_delete_work);
}
/* flush all scan work items */
scsi_flush_work(shost);
/* flush all stgt delete, and rport delete work items, then kill it */
if (fc_host->work_q) {
work_q = fc_host->work_q;
fc_host->work_q = NULL;
destroy_workqueue(work_q);
}
/* flush all devloss work items, then kill it */
if (fc_host->devloss_work_q) {
work_q = fc_host->devloss_work_q;
fc_host->devloss_work_q = NULL;
destroy_workqueue(work_q);
}
}
EXPORT_SYMBOL(fc_remove_host);
/*
* fc_rport_tgt_remove - Removes the scsi target on the remote port
* @rport: The remote port to be operated on
*/
/**
* fc_starget_delete - called to delete the scsi decendents of an rport
* (target and all sdevs)
*
* @data: remote port to be operated on.
**/
static void
fc_rport_tgt_remove(struct fc_rport *rport)
fc_starget_delete(void *data)
{
struct fc_rport *rport = (struct fc_rport *)data;
struct Scsi_Host *shost = rport_to_shost(rport);
unsigned long flags;
scsi_target_unblock(&rport->dev);
/* Stop anything on the workq */
if (!cancel_delayed_work(&rport->dev_loss_work))
flush_scheduled_work();
scsi_flush_work(shost);
spin_lock_irqsave(shost->host_lock, flags);
if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
spin_unlock_irqrestore(shost->host_lock, flags);
if (!cancel_delayed_work(&rport->dev_loss_work))
fc_flush_devloss(shost);
spin_lock_irqsave(shost->host_lock, flags);
rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
}
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_remove_target(&rport->dev);
}
/**
* fc_rport_final_delete - finish rport termination and delete it.
*
* @data: remote port to be deleted.
**/
static void
fc_rport_final_delete(void *data)
{
struct fc_rport *rport = (struct fc_rport *)data;
struct device *dev = &rport->dev;
struct Scsi_Host *shost = rport_to_shost(rport);
/* Delete SCSI target and sdevs */
if (rport->scsi_target_id != -1)
fc_starget_delete(data);
/*
* if a scan is pending, flush the SCSI Host work_q so that
* that we can reclaim the rport scan work element.
*/
if (rport->flags & FC_RPORT_SCAN_PENDING)
scsi_flush_work(shost);
transport_remove_device(dev);
device_del(dev);
transport_destroy_device(dev);
put_device(&shost->shost_gendev);
}
/**
* fc_rport_create - allocates and creates a remote FC port.
* @shost: scsi host the remote port is connected to.
@ -1330,8 +1497,7 @@ struct fc_rport *
fc_rport_create(struct Scsi_Host *shost, int channel,
struct fc_rport_identifiers *ids)
{
struct fc_host_attrs *fc_host =
(struct fc_host_attrs *)shost->shost_data;
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
struct fc_internal *fci = to_fc_internal(shost->transportt);
struct fc_rport *rport;
struct device *dev;
@ -1360,6 +1526,8 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport);
INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport);
INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport);
INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport);
spin_lock_irqsave(shost->host_lock, flags);
@ -1368,7 +1536,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
rport->scsi_target_id = fc_host->next_target_id++;
else
rport->scsi_target_id = -1;
list_add_tail(&rport->peers, &fc_host_rports(shost));
list_add_tail(&rport->peers, &fc_host->rports);
get_device(&shost->shost_gendev);
spin_unlock_irqrestore(shost->host_lock, flags);
@ -1389,9 +1557,11 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
transport_add_device(dev);
transport_configure_device(dev);
if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) {
/* initiate a scan of the target */
rport->flags |= FC_RPORT_SCAN_PENDING;
scsi_queue_work(shost, &rport->scan_work);
}
return rport;
@ -1451,10 +1621,14 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
struct fc_rport_identifiers *ids)
{
struct fc_internal *fci = to_fc_internal(shost->transportt);
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
struct fc_rport *rport;
unsigned long flags;
int match = 0;
/* ensure any stgt delete functions are done */
fc_flush_work(shost);
/*
* Search the list of "active" rports, for an rport that has been
* deleted, but we've held off the real delete while the target
@ -1462,12 +1636,12 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
*/
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(rport, &fc_host_rports(shost), peers) {
list_for_each_entry(rport, &fc_host->rports, peers) {
if ((rport->port_state == FC_PORTSTATE_BLOCKED) &&
(rport->channel == channel)) {
switch (fc_host_tgtid_bind_type(shost)) {
switch (fc_host->tgtid_bind_type) {
case FC_TGTID_BIND_BY_WWPN:
case FC_TGTID_BIND_NONE:
if (rport->port_name == ids->port_name)
@ -1521,27 +1695,34 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
* transaction.
*/
if (!cancel_delayed_work(work))
flush_scheduled_work();
fc_flush_devloss(shost);
spin_lock_irqsave(shost->host_lock, flags);
rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
/* initiate a scan of the target */
rport->flags |= FC_RPORT_SCAN_PENDING;
scsi_queue_work(shost, &rport->scan_work);
spin_unlock_irqrestore(shost->host_lock, flags);
return rport;
}
}
}
/* Search the bindings array */
if (fc_host_tgtid_bind_type(shost) != FC_TGTID_BIND_NONE) {
if (fc_host->tgtid_bind_type != FC_TGTID_BIND_NONE) {
/* search for a matching consistent binding */
list_for_each_entry(rport, &fc_host_rport_bindings(shost),
list_for_each_entry(rport, &fc_host->rport_bindings,
peers) {
if (rport->channel != channel)
continue;
switch (fc_host_tgtid_bind_type(shost)) {
switch (fc_host->tgtid_bind_type) {
case FC_TGTID_BIND_BY_WWPN:
if (rport->port_name == ids->port_name)
match = 1;
@ -1559,8 +1740,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
}
if (match) {
list_move_tail(&rport->peers,
&fc_host_rports(shost));
list_move_tail(&rport->peers, &fc_host->rports);
break;
}
}
@ -1574,15 +1754,17 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
rport->roles = ids->roles;
rport->port_state = FC_PORTSTATE_ONLINE;
spin_unlock_irqrestore(shost->host_lock, flags);
if (fci->f->dd_fcrport_size)
memset(rport->dd_data, 0,
fci->f->dd_fcrport_size);
if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) {
/* initiate a scan of the target */
rport->flags |= FC_RPORT_SCAN_PENDING;
scsi_queue_work(shost, &rport->scan_work);
}
spin_unlock_irqrestore(shost->host_lock, flags);
return rport;
}
@ -1597,30 +1779,6 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
}
EXPORT_SYMBOL(fc_remote_port_add);
/*
* fc_rport_terminate - this routine tears down and deallocates a remote port.
* @rport: The remote port to be terminated
*
* Notes:
* This routine assumes no locks are held on entry.
*/
static void
fc_rport_terminate(struct fc_rport *rport)
{
struct Scsi_Host *shost = rport_to_shost(rport);
struct device *dev = &rport->dev;
unsigned long flags;
fc_rport_tgt_remove(rport);
transport_remove_device(dev);
device_del(dev);
transport_destroy_device(dev);
spin_lock_irqsave(shost->host_lock, flags);
list_del(&rport->peers);
spin_unlock_irqrestore(shost->host_lock, flags);
put_device(&shost->shost_gendev);
}
/**
* fc_remote_port_delete - notifies the fc transport that a remote
@ -1675,20 +1833,39 @@ fc_rport_terminate(struct fc_rport *rport)
void
fc_remote_port_delete(struct fc_rport *rport)
{
struct Scsi_Host *shost = rport_to_shost(rport);
int timeout = rport->dev_loss_tmo;
unsigned long flags;
/*
* No need to flush the fc_host work_q's, as all adds are synchronous.
*
* We do need to reclaim the rport scan work element, so eventually
* (in fc_rport_final_delete()) we'll flush the scsi host work_q if
* there's still a scan pending.
*/
spin_lock_irqsave(shost->host_lock, flags);
/* If no scsi target id mapping, delete it */
if (rport->scsi_target_id == -1) {
fc_rport_terminate(rport);
list_del(&rport->peers);
rport->port_state = FC_PORTSTATE_DELETED;
fc_queue_work(shost, &rport->rport_delete_work);
spin_unlock_irqrestore(shost->host_lock, flags);
return;
}
rport->port_state = FC_PORTSTATE_BLOCKED;
rport->flags |= FC_RPORT_DEVLOSS_PENDING;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_target_block(&rport->dev);
/* cap the length the devices can be blocked until they are deleted */
schedule_delayed_work(&rport->dev_loss_work, timeout * HZ);
rport->port_state = FC_PORTSTATE_BLOCKED;
fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ);
}
EXPORT_SYMBOL(fc_remote_port_delete);
@ -1716,8 +1893,7 @@ void
fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
{
struct Scsi_Host *shost = rport_to_shost(rport);
struct fc_host_attrs *fc_host =
(struct fc_host_attrs *)shost->shost_data;
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
unsigned long flags;
int create = 0;
@ -1729,10 +1905,11 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
} else if (!(rport->roles & FC_RPORT_ROLE_FCP_TARGET))
create = 1;
}
spin_unlock_irqrestore(shost->host_lock, flags);
rport->roles = roles;
spin_unlock_irqrestore(shost->host_lock, flags);
if (create) {
/*
* There may have been a delete timer running on the
@ -1747,10 +1924,20 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
* transaction.
*/
if (!cancel_delayed_work(&rport->dev_loss_work))
flush_scheduled_work();
fc_flush_devloss(shost);
spin_lock_irqsave(shost->host_lock, flags);
rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
spin_unlock_irqrestore(shost->host_lock, flags);
/* ensure any stgt delete functions are done */
fc_flush_work(shost);
/* initiate a scan of the target */
spin_lock_irqsave(shost->host_lock, flags);
rport->flags |= FC_RPORT_SCAN_PENDING;
scsi_queue_work(shost, &rport->scan_work);
spin_unlock_irqrestore(shost->host_lock, flags);
}
}
EXPORT_SYMBOL(fc_remote_port_rolechg);
@ -1767,22 +1954,24 @@ fc_timeout_deleted_rport(void *data)
{
struct fc_rport *rport = (struct fc_rport *)data;
struct Scsi_Host *shost = rport_to_shost(rport);
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
/*
* If the port is ONLINE, then it came back, but was no longer an
* FCP target. Thus we need to tear down the scsi_target on it.
* If the port is ONLINE, then it came back. Validate it's still an
* FCP target. If not, tear down the scsi_target on it.
*/
if (rport->port_state == FC_PORTSTATE_ONLINE) {
spin_unlock_irqrestore(shost->host_lock, flags);
if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
!(rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
dev_printk(KERN_ERR, &rport->dev,
"blocked FC remote port time out: removing target\n");
fc_rport_tgt_remove(rport);
"blocked FC remote port time out: no longer"
" a FCP target, removing starget\n");
fc_queue_work(shost, &rport->stgt_delete_work);
spin_unlock_irqrestore(shost->host_lock, flags);
return;
}
@ -1793,11 +1982,13 @@ fc_timeout_deleted_rport(void *data)
return;
}
if (fc_host_tgtid_bind_type(shost) == FC_TGTID_BIND_NONE) {
spin_unlock_irqrestore(shost->host_lock, flags);
if (fc_host->tgtid_bind_type == FC_TGTID_BIND_NONE) {
list_del(&rport->peers);
rport->port_state = FC_PORTSTATE_DELETED;
dev_printk(KERN_ERR, &rport->dev,
"blocked FC remote port time out: removing target\n");
fc_rport_terminate(rport);
fc_queue_work(shost, &rport->rport_delete_work);
spin_unlock_irqrestore(shost->host_lock, flags);
return;
}
@ -1805,7 +1996,7 @@ fc_timeout_deleted_rport(void *data)
"blocked FC remote port time out: removing target and "
"saving binding\n");
list_move_tail(&rport->peers, &fc_host_rport_bindings(shost));
list_move_tail(&rport->peers, &fc_host->rport_bindings);
/*
* Note: We do not remove or clear the hostdata area. This allows
@ -1819,10 +2010,10 @@ fc_timeout_deleted_rport(void *data)
rport->maxframe_size = -1;
rport->supported_classes = FC_COS_UNSPECIFIED;
rport->roles = FC_RPORT_ROLE_UNKNOWN;
rport->port_state = FC_PORTSTATE_DELETED;
rport->port_state = FC_PORTSTATE_NOTPRESENT;
/* remove the identifiers that aren't used in the consisting binding */
switch (fc_host_tgtid_bind_type(shost)) {
switch (fc_host->tgtid_bind_type) {
case FC_TGTID_BIND_BY_WWPN:
rport->node_name = -1;
rport->port_id = -1;
@ -1843,17 +2034,8 @@ fc_timeout_deleted_rport(void *data)
* As this only occurs if the remote port (scsi target)
* went away and didn't come back - we'll remove
* all attached scsi devices.
*
* We'll schedule the shost work item to perform the actual removal
* to avoid recursion in the different flush calls if we perform
* the removal in each target - and there are lots of targets
* whose timeouts fire at the same time.
*/
if ( !(fc_host_flags(shost) & FC_SHOST_RPORT_DEL_SCHEDULED)) {
fc_host_flags(shost) |= FC_SHOST_RPORT_DEL_SCHEDULED;
scsi_queue_work(shost, &fc_host_rport_del_work(shost));
}
fc_queue_work(shost, &rport->stgt_delete_work);
spin_unlock_irqrestore(shost->host_lock, flags);
}
@ -1870,44 +2052,18 @@ static void
fc_scsi_scan_rport(void *data)
{
struct fc_rport *rport = (struct fc_rport *)data;
scsi_target_unblock(&rport->dev);
scsi_scan_target(&rport->dev, rport->channel, rport->scsi_target_id,
SCAN_WILD_CARD, 1);
}
/**
* fc_shost_remove_rports - called to remove all rports that are marked
* as in a deleted (not connected) state.
*
* @data: shost whose rports are to be looked at
**/
static void
fc_shost_remove_rports(void *data)
{
struct Scsi_Host *shost = (struct Scsi_Host *)data;
struct fc_rport *rport, *next_rport;
struct Scsi_Host *shost = rport_to_shost(rport);
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
while (fc_host_flags(shost) & FC_SHOST_RPORT_DEL_SCHEDULED) {
fc_host_flags(shost) &= ~FC_SHOST_RPORT_DEL_SCHEDULED;
restart_search:
list_for_each_entry_safe(rport, next_rport,
&fc_host_rport_bindings(shost), peers) {
if (rport->port_state == FC_PORTSTATE_DELETED) {
rport->port_state = FC_PORTSTATE_NOTPRESENT;
spin_unlock_irqrestore(shost->host_lock, flags);
fc_rport_tgt_remove(rport);
spin_lock_irqsave(shost->host_lock, flags);
goto restart_search;
}
}
if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
(rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
scsi_target_unblock(&rport->dev);
scsi_scan_target(&rport->dev, rport->channel,
rport->scsi_target_id, SCAN_WILD_CARD, 1);
}
spin_lock_irqsave(shost->host_lock, flags);
rport->flags &= ~FC_RPORT_SCAN_PENDING;
spin_unlock_irqrestore(shost->host_lock, flags);
}

View File

@ -35,40 +35,7 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#define SAS_HOST_ATTRS 0
#define SAS_PORT_ATTRS 17
#define SAS_RPORT_ATTRS 7
#define SAS_END_DEV_ATTRS 3
#define SAS_EXPANDER_ATTRS 7
struct sas_internal {
struct scsi_transport_template t;
struct sas_function_template *f;
struct class_device_attribute private_host_attrs[SAS_HOST_ATTRS];
struct class_device_attribute private_phy_attrs[SAS_PORT_ATTRS];
struct class_device_attribute private_rphy_attrs[SAS_RPORT_ATTRS];
struct class_device_attribute private_end_dev_attrs[SAS_END_DEV_ATTRS];
struct class_device_attribute private_expander_attrs[SAS_EXPANDER_ATTRS];
struct transport_container phy_attr_cont;
struct transport_container rphy_attr_cont;
struct transport_container end_dev_attr_cont;
struct transport_container expander_attr_cont;
/*
* The array of null terminated pointers to attributes
* needed by scsi_sysfs.c
*/
struct class_device_attribute *host_attrs[SAS_HOST_ATTRS + 1];
struct class_device_attribute *phy_attrs[SAS_PORT_ATTRS + 1];
struct class_device_attribute *rphy_attrs[SAS_RPORT_ATTRS + 1];
struct class_device_attribute *end_dev_attrs[SAS_END_DEV_ATTRS + 1];
struct class_device_attribute *expander_attrs[SAS_EXPANDER_ATTRS + 1];
};
#define to_sas_internal(tmpl) container_of(tmpl, struct sas_internal, t)
#include "scsi_sas_internal.h"
struct sas_host_attrs {
struct list_head rphy_list;
struct mutex lock;
@ -406,8 +373,6 @@ struct sas_phy *sas_phy_alloc(struct device *parent, int number)
if (!phy)
return NULL;
get_device(parent);
phy->number = number;
device_initialize(&phy->dev);
@ -459,10 +424,7 @@ EXPORT_SYMBOL(sas_phy_add);
void sas_phy_free(struct sas_phy *phy)
{
transport_destroy_device(&phy->dev);
put_device(phy->dev.parent);
put_device(phy->dev.parent);
put_device(phy->dev.parent);
kfree(phy);
put_device(&phy->dev);
}
EXPORT_SYMBOL(sas_phy_free);
@ -484,7 +446,7 @@ sas_phy_delete(struct sas_phy *phy)
transport_remove_device(dev);
device_del(dev);
transport_destroy_device(dev);
put_device(dev->parent);
put_device(dev);
}
EXPORT_SYMBOL(sas_phy_delete);
@ -800,7 +762,6 @@ struct sas_rphy *sas_end_device_alloc(struct sas_phy *parent)
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
if (!rdev) {
put_device(&parent->dev);
return NULL;
}
@ -836,7 +797,6 @@ struct sas_rphy *sas_expander_alloc(struct sas_phy *parent,
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
if (!rdev) {
put_device(&parent->dev);
return NULL;
}
@ -885,6 +845,8 @@ int sas_rphy_add(struct sas_rphy *rphy)
(identify->target_port_protocols &
(SAS_PROTOCOL_SSP|SAS_PROTOCOL_STP|SAS_PROTOCOL_SATA)))
rphy->scsi_target_id = sas_host->next_target_id++;
else if (identify->device_type == SAS_END_DEVICE)
rphy->scsi_target_id = -1;
mutex_unlock(&sas_host->lock);
if (identify->device_type == SAS_END_DEVICE &&
@ -910,6 +872,7 @@ EXPORT_SYMBOL(sas_rphy_add);
*/
void sas_rphy_free(struct sas_rphy *rphy)
{
struct device *dev = &rphy->dev;
struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
@ -917,21 +880,9 @@ void sas_rphy_free(struct sas_rphy *rphy)
list_del(&rphy->list);
mutex_unlock(&sas_host->lock);
transport_destroy_device(&rphy->dev);
put_device(rphy->dev.parent);
put_device(rphy->dev.parent);
put_device(rphy->dev.parent);
if (rphy->identify.device_type == SAS_END_DEVICE) {
struct sas_end_device *edev = rphy_to_end_device(rphy);
transport_destroy_device(dev);
kfree(edev);
} else {
/* must be expander */
struct sas_expander_device *edev =
rphy_to_expander_device(rphy);
kfree(edev);
}
put_device(dev);
}
EXPORT_SYMBOL(sas_rphy_free);
@ -971,7 +922,7 @@ sas_rphy_delete(struct sas_rphy *rphy)
parent->rphy = NULL;
put_device(&parent->dev);
put_device(dev);
}
EXPORT_SYMBOL(sas_rphy_delete);

View File

@ -748,6 +748,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
/*
* most likely out of mem, but could also be a bad map
*/
sg_finish_rem_req(srp);
return -ENOMEM;
} else
return 0;
@ -1044,7 +1045,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
if (!sg_allow_access(opcode, sdp->device->type))
return -EPERM;
}
return scsi_ioctl_send_command(sdp->device, p);
return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
if (result)
@ -1798,8 +1799,10 @@ sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
res = st_map_user_pages(schp->buffer, mx_sc_elems,
(unsigned long)hp->dxferp, dxfer_len,
(SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0);
if (res <= 0)
if (res <= 0) {
sg_remove_scat(schp);
return 1;
}
schp->k_use_sg = res;
schp->dio_in_use = 1;
hp->info |= SG_INFO_DIRECT_IO;

View File

@ -40,7 +40,7 @@
#ifndef SYM_DEFS_H
#define SYM_DEFS_H
#define SYM_VERSION "2.2.2"
#define SYM_VERSION "2.2.3"
#define SYM_DRIVER_NAME "sym-" SYM_VERSION
/*

View File

@ -134,66 +134,17 @@ static void sym2_setup_params(void)
}
}
/*
* We used to try to deal with 64-bit BARs here, but don't any more.
* There are many parts of this driver which would need to be modified
* to handle a 64-bit base address, including scripts. I'm uncomfortable
* with making those changes when I have no way of testing it, so I'm
* just going to disable it.
*
* Note that some machines (eg HP rx8620 and Superdome) have bus addresses
* below 4GB and physical addresses above 4GB. These will continue to work.
*/
static int __devinit
pci_get_base_address(struct pci_dev *pdev, int index, unsigned long *basep)
{
u32 tmp;
unsigned long base;
#define PCI_BAR_OFFSET(index) (PCI_BASE_ADDRESS_0 + (index<<2))
pci_read_config_dword(pdev, PCI_BAR_OFFSET(index++), &tmp);
base = tmp;
if ((tmp & 0x7) == PCI_BASE_ADDRESS_MEM_TYPE_64) {
pci_read_config_dword(pdev, PCI_BAR_OFFSET(index++), &tmp);
if (tmp > 0) {
dev_err(&pdev->dev,
"BAR %d is 64-bit, disabling\n", index - 1);
base = 0;
}
}
if ((base & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
base &= PCI_BASE_ADDRESS_IO_MASK;
} else {
base &= PCI_BASE_ADDRESS_MEM_MASK;
}
*basep = base;
return index;
#undef PCI_BAR_OFFSET
}
static struct scsi_transport_template *sym2_transport_template = NULL;
/*
* Used by the eh thread to wait for command completion.
* It is allocated on the eh thread stack.
*/
struct sym_eh_wait {
struct completion done;
struct timer_list timer;
void (*old_done)(struct scsi_cmnd *);
int to_do;
int timed_out;
};
/*
* Driver private area in the SCSI command structure.
*/
struct sym_ucmd { /* Override the SCSI pointer structure */
dma_addr_t data_mapping;
u_char data_mapped;
struct sym_eh_wait *eh_wait;
dma_addr_t data_mapping;
unsigned char data_mapped;
unsigned char to_do; /* For error handling */
void (*old_done)(struct scsi_cmnd *); /* For error handling */
struct completion *eh_done; /* For error handling */
};
#define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)(&(cmd)->SCp))
@ -514,8 +465,6 @@ static inline int sym_setup_cdb(struct sym_hcb *np, struct scsi_cmnd *cmd, struc
*/
int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
{
struct sym_tcb *tp = &np->target[cp->target];
struct sym_lcb *lp = sym_lp(tp, cp->lun);
u32 lastp, goalp;
int dir;
@ -596,7 +545,7 @@ int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct s
/*
* activate this job.
*/
sym_start_next_ccbs(np, lp, 2);
sym_put_start_queue(np, cp);
return 0;
out_abort:
@ -751,44 +700,22 @@ static void sym53c8xx_timer(unsigned long npref)
* What we will do regarding the involved SCSI command.
*/
#define SYM_EH_DO_IGNORE 0
#define SYM_EH_DO_COMPLETE 1
#define SYM_EH_DO_WAIT 2
/*
* Our general completion handler.
* scsi_done() alias when error recovery is in progress.
*/
static void __sym_eh_done(struct scsi_cmnd *cmd, int timed_out)
static void sym_eh_done(struct scsi_cmnd *cmd)
{
struct sym_eh_wait *ep = SYM_UCMD_PTR(cmd)->eh_wait;
if (!ep)
return;
struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd);
BUILD_BUG_ON(sizeof(struct scsi_pointer) < sizeof(struct sym_ucmd));
/* Try to avoid a race here (not 100% safe) */
if (!timed_out) {
ep->timed_out = 0;
if (ep->to_do == SYM_EH_DO_WAIT && !del_timer(&ep->timer))
return;
}
cmd->scsi_done = ucmd->old_done;
/* Revert everything */
SYM_UCMD_PTR(cmd)->eh_wait = NULL;
cmd->scsi_done = ep->old_done;
/* Wake up the eh thread if it wants to sleep */
if (ep->to_do == SYM_EH_DO_WAIT)
complete(&ep->done);
if (ucmd->to_do == SYM_EH_DO_WAIT)
complete(ucmd->eh_done);
}
/*
* scsi_done() alias when error recovery is in progress.
*/
static void sym_eh_done(struct scsi_cmnd *cmd) { __sym_eh_done(cmd, 0); }
/*
* Some timeout handler to avoid waiting too long.
*/
static void sym_eh_timeout(u_long p) { __sym_eh_done((struct scsi_cmnd *)p, 1); }
/*
* Generic method for our eh processing.
* The 'op' argument tells what we have to do.
@ -796,35 +723,31 @@ static void sym_eh_timeout(u_long p) { __sym_eh_done((struct scsi_cmnd *)p, 1);
static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd)
{
struct sym_hcb *np = SYM_SOFTC_PTR(cmd);
struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd);
struct Scsi_Host *host = cmd->device->host;
SYM_QUEHEAD *qp;
int to_do = SYM_EH_DO_IGNORE;
int sts = -1;
struct sym_eh_wait eh, *ep = &eh;
struct completion eh_done;
dev_warn(&cmd->device->sdev_gendev, "%s operation started.\n", opname);
spin_lock_irq(host->host_lock);
/* This one is queued in some place -> to wait for completion */
FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
if (cp->cmd == cmd) {
to_do = SYM_EH_DO_WAIT;
goto prepare;
break;
}
}
prepare:
/* Prepare stuff to either ignore, complete or wait for completion */
switch(to_do) {
default:
case SYM_EH_DO_IGNORE:
break;
case SYM_EH_DO_WAIT:
init_completion(&ep->done);
/* fall through */
case SYM_EH_DO_COMPLETE:
ep->old_done = cmd->scsi_done;
if (to_do == SYM_EH_DO_WAIT) {
init_completion(&eh_done);
ucmd->old_done = cmd->scsi_done;
ucmd->eh_done = &eh_done;
wmb();
cmd->scsi_done = sym_eh_done;
SYM_UCMD_PTR(cmd)->eh_wait = ep;
}
/* Try to proceed the operation we have been asked for */
@ -851,29 +774,19 @@ prepare:
/* On error, restore everything and cross fingers :) */
if (sts) {
SYM_UCMD_PTR(cmd)->eh_wait = NULL;
cmd->scsi_done = ep->old_done;
cmd->scsi_done = ucmd->old_done;
to_do = SYM_EH_DO_IGNORE;
}
ep->to_do = to_do;
/* Complete the command with locks held as required by the driver */
if (to_do == SYM_EH_DO_COMPLETE)
sym_xpt_done2(np, cmd, DID_ABORT);
ucmd->to_do = to_do;
spin_unlock_irq(host->host_lock);
/* Wait for completion with locks released, as required by kernel */
if (to_do == SYM_EH_DO_WAIT) {
init_timer(&ep->timer);
ep->timer.expires = jiffies + (5*HZ);
ep->timer.function = sym_eh_timeout;
ep->timer.data = (u_long)cmd;
ep->timed_out = 1; /* Be pessimistic for once :) */
add_timer(&ep->timer);
spin_unlock_irq(np->s.host->host_lock);
wait_for_completion(&ep->done);
spin_lock_irq(np->s.host->host_lock);
if (ep->timed_out)
if (!wait_for_completion_timeout(&eh_done, 5*HZ)) {
ucmd->to_do = SYM_EH_DO_IGNORE;
wmb();
sts = -2;
}
}
dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname,
sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed");
@ -886,46 +799,22 @@ prepare:
*/
static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd)
{
int rc;
spin_lock_irq(cmd->device->host->host_lock);
rc = sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd);
spin_unlock_irq(cmd->device->host->host_lock);
return rc;
return sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd);
}
static int sym53c8xx_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
int rc;
spin_lock_irq(cmd->device->host->host_lock);
rc = sym_eh_handler(SYM_EH_DEVICE_RESET, "DEVICE RESET", cmd);
spin_unlock_irq(cmd->device->host->host_lock);
return rc;
return sym_eh_handler(SYM_EH_DEVICE_RESET, "DEVICE RESET", cmd);
}
static int sym53c8xx_eh_bus_reset_handler(struct scsi_cmnd *cmd)
{
int rc;
spin_lock_irq(cmd->device->host->host_lock);
rc = sym_eh_handler(SYM_EH_BUS_RESET, "BUS RESET", cmd);
spin_unlock_irq(cmd->device->host->host_lock);
return rc;
return sym_eh_handler(SYM_EH_BUS_RESET, "BUS RESET", cmd);
}
static int sym53c8xx_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
int rc;
spin_lock_irq(cmd->device->host->host_lock);
rc = sym_eh_handler(SYM_EH_HOST_RESET, "HOST RESET", cmd);
spin_unlock_irq(cmd->device->host->host_lock);
return rc;
return sym_eh_handler(SYM_EH_HOST_RESET, "HOST RESET", cmd);
}
/*
@ -944,15 +833,12 @@ static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags)
if (reqtags > lp->s.scdev_depth)
reqtags = lp->s.scdev_depth;
lp->started_limit = reqtags ? reqtags : 2;
lp->started_max = 1;
lp->s.reqtags = reqtags;
if (reqtags != oldtags) {
dev_info(&tp->starget->dev,
"tagged command queuing %s, command queue depth %d.\n",
lp->s.reqtags ? "enabled" : "disabled",
lp->started_limit);
lp->s.reqtags ? "enabled" : "disabled", reqtags);
}
}
@ -1866,15 +1752,25 @@ static int __devinit sym_set_workarounds(struct sym_device *device)
static void __devinit
sym_init_device(struct pci_dev *pdev, struct sym_device *device)
{
int i;
int i = 2;
struct pci_bus_region bus_addr;
device->host_id = SYM_SETUP_HOST_ID;
device->pdev = pdev;
i = pci_get_base_address(pdev, 1, &device->mmio_base);
pci_get_base_address(pdev, i, &device->ram_base);
pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[1]);
device->mmio_base = bus_addr.start;
#ifndef CONFIG_SCSI_SYM53C8XX_IOMAPPED
/*
* If the BAR is 64-bit, resource 2 will be occupied by the
* upper 32 bits
*/
if (!pdev->resource[i].flags)
i++;
pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[i]);
device->ram_base = bus_addr.start;
#ifdef CONFIG_SCSI_SYM53C8XX_MMIO
if (device->mmio_base)
device->s.ioaddr = pci_iomap(pdev, 1,
pci_resource_len(pdev, 1));
@ -1978,7 +1874,8 @@ static struct scsi_host_template sym2_template = {
.eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler,
.eh_host_reset_handler = sym53c8xx_eh_host_reset_handler,
.this_id = 7,
.use_clustering = DISABLE_CLUSTERING,
.use_clustering = ENABLE_CLUSTERING,
.max_sectors = 0xFFFF,
#ifdef SYM_LINUX_PROC_INFO_SUPPORT
.proc_info = sym53c8xx_proc_info,
.proc_name = NAME53C8XX,

View File

@ -68,7 +68,7 @@
*/
#define SYM_CONF_TIMER_INTERVAL ((HZ+1)/2)
#define SYM_OPT_HANDLE_DEVICE_QUEUEING
#undef SYM_OPT_HANDLE_DEVICE_QUEUEING
#define SYM_OPT_LIMIT_COMMAND_REORDERING
/*

View File

@ -72,7 +72,10 @@ static void sym_printl_hex(u_char *p, int n)
static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg)
{
sym_print_addr(cp->cmd, "%s: ", label);
if (label)
sym_print_addr(cp->cmd, "%s: ", label);
else
sym_print_addr(cp->cmd, "");
spi_print_msg(msg);
printf("\n");
@ -472,7 +475,7 @@ static int sym_getpciclock (struct sym_hcb *np)
* calculations more simple.
*/
#define _5M 5000000
static u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M};
static const u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M};
/*
* Get clock factor and sync divisor for a given
@ -644,6 +647,37 @@ static void sym_save_initial_setting (struct sym_hcb *np)
np->sv_ctest5 = INB(np, nc_ctest5) & 0x24;
}
/*
* Set SCSI BUS mode.
* - LVD capable chips (895/895A/896/1010) report the current BUS mode
* through the STEST4 IO register.
* - For previous generation chips (825/825A/875), the user has to tell us
* how to check against HVD, since a 100% safe algorithm is not possible.
*/
static void sym_set_bus_mode(struct sym_hcb *np, struct sym_nvram *nvram)
{
if (np->scsi_mode)
return;
np->scsi_mode = SMODE_SE;
if (np->features & (FE_ULTRA2|FE_ULTRA3))
np->scsi_mode = (np->sv_stest4 & SMODE);
else if (np->features & FE_DIFF) {
if (SYM_SETUP_SCSI_DIFF == 1) {
if (np->sv_scntl3) {
if (np->sv_stest2 & 0x20)
np->scsi_mode = SMODE_HVD;
} else if (nvram->type == SYM_SYMBIOS_NVRAM) {
if (!(INB(np, nc_gpreg) & 0x08))
np->scsi_mode = SMODE_HVD;
}
} else if (SYM_SETUP_SCSI_DIFF == 2)
np->scsi_mode = SMODE_HVD;
}
if (np->scsi_mode == SMODE_HVD)
np->rv_stest2 |= 0x20;
}
/*
* Prepare io register values used by sym_start_up()
* according to selected and supported features.
@ -654,10 +688,7 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru
u32 period;
int i;
/*
* Wide ?
*/
np->maxwide = (np->features & FE_WIDE)? 1 : 0;
np->maxwide = (np->features & FE_WIDE) ? 1 : 0;
/*
* Guess the frequency of the chip's clock.
@ -838,6 +869,7 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru
* Get parity checking, host ID and verbose mode from NVRAM
*/
np->myaddr = 255;
np->scsi_mode = 0;
sym_nvram_setup_host(shost, np, nvram);
/*
@ -854,33 +886,7 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru
*/
sym_init_burst(np, burst_max);
/*
* Set SCSI BUS mode.
* - LVD capable chips (895/895A/896/1010) report the
* current BUS mode through the STEST4 IO register.
* - For previous generation chips (825/825A/875),
* user has to tell us how to check against HVD,
* since a 100% safe algorithm is not possible.
*/
np->scsi_mode = SMODE_SE;
if (np->features & (FE_ULTRA2|FE_ULTRA3))
np->scsi_mode = (np->sv_stest4 & SMODE);
else if (np->features & FE_DIFF) {
if (SYM_SETUP_SCSI_DIFF == 1) {
if (np->sv_scntl3) {
if (np->sv_stest2 & 0x20)
np->scsi_mode = SMODE_HVD;
}
else if (nvram->type == SYM_SYMBIOS_NVRAM) {
if (!(INB(np, nc_gpreg) & 0x08))
np->scsi_mode = SMODE_HVD;
}
}
else if (SYM_SETUP_SCSI_DIFF == 2)
np->scsi_mode = SMODE_HVD;
}
if (np->scsi_mode == SMODE_HVD)
np->rv_stest2 |= 0x20;
sym_set_bus_mode(np, nvram);
/*
* Set LED support from SCRIPTS.
@ -973,8 +979,8 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru
*
* Has to be called with interrupts disabled.
*/
#ifndef CONFIG_SCSI_SYM53C8XX_IOMAPPED
static int sym_regtest (struct sym_hcb *np)
#ifdef CONFIG_SCSI_SYM53C8XX_MMIO
static int sym_regtest(struct sym_hcb *np)
{
register volatile u32 data;
/*
@ -992,20 +998,25 @@ static int sym_regtest (struct sym_hcb *np)
#endif
printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n",
(unsigned) data);
return (0x10);
return 0x10;
}
return (0);
return 0;
}
#else
static inline int sym_regtest(struct sym_hcb *np)
{
return 0;
}
#endif
static int sym_snooptest (struct sym_hcb *np)
static int sym_snooptest(struct sym_hcb *np)
{
u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat;
int i, err=0;
#ifndef CONFIG_SCSI_SYM53C8XX_IOMAPPED
err |= sym_regtest (np);
if (err) return (err);
#endif
u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat;
int i, err;
err = sym_regtest(np);
if (err)
return err;
restart_test:
/*
* Enable Master Parity Checking as we intend
@ -1094,7 +1105,7 @@ restart_test:
err |= 4;
}
return (err);
return err;
}
/*
@ -1464,7 +1475,7 @@ static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgp
/*
* Insert a job into the start queue.
*/
static void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp)
void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp)
{
u_short qidx;
@ -4481,7 +4492,7 @@ static void sym_int_sir (struct sym_hcb *np)
switch (np->msgin [2]) {
case M_X_MODIFY_DP:
if (DEBUG_FLAGS & DEBUG_POINTER)
sym_print_msg(cp,"modify DP",np->msgin);
sym_print_msg(cp, NULL, np->msgin);
tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) +
(np->msgin[5]<<8) + (np->msgin[6]);
sym_modify_dp(np, tp, cp, tmp);
@ -4508,7 +4519,7 @@ static void sym_int_sir (struct sym_hcb *np)
*/
case M_IGN_RESIDUE:
if (DEBUG_FLAGS & DEBUG_POINTER)
sym_print_msg(cp,"ign wide residue", np->msgin);
sym_print_msg(cp, NULL, np->msgin);
if (cp->host_flags & HF_SENSE)
OUTL_DSP(np, SCRIPTA_BA(np, clrack));
else
@ -4597,7 +4608,8 @@ struct sym_ccb *sym_get_ccb (struct sym_hcb *np, struct scsi_cmnd *cmd, u_char t
* Debugging purpose.
*/
#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
assert(lp->busy_itl == 0);
if (lp->busy_itl != 0)
goto out_free;
#endif
/*
* Allocate resources for tags if not yet.
@ -4642,7 +4654,8 @@ struct sym_ccb *sym_get_ccb (struct sym_hcb *np, struct scsi_cmnd *cmd, u_char t
* Debugging purpose.
*/
#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
assert(lp->busy_itl == 0 && lp->busy_itlq == 0);
if (lp->busy_itl != 0 || lp->busy_itlq != 0)
goto out_free;
#endif
/*
* Count this nexus for this LUN.

View File

@ -1049,6 +1049,8 @@ int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int);
struct sym_chip *sym_lookup_chip_table(u_short device_id, u_char revision);
#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn);
#else
void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp);
#endif
void sym_start_up(struct sym_hcb *np, int reason);
void sym_interrupt(struct sym_hcb *np);

View File

@ -17,6 +17,8 @@
#include <asm/scatterlist.h>
struct scsi_ioctl_command;
struct request_queue;
typedef struct request_queue request_queue_t;
struct elevator_queue;
@ -611,6 +613,8 @@ extern void blk_plug_device(request_queue_t *);
extern int blk_remove_plug(request_queue_t *);
extern void blk_recount_segments(request_queue_t *, struct bio *);
extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);
extern int sg_scsi_ioctl(struct file *, struct request_queue *,
struct gendisk *, struct scsi_ioctl_command __user *);
extern void blk_start_queue(request_queue_t *q);
extern void blk_stop_queue(request_queue_t *q);
extern void blk_sync_queue(struct request_queue *q);

View File

@ -28,4 +28,5 @@
#define BLIST_NO_ULD_ATTACH 0x100000 /* device is actually for RAID config */
#define BLIST_SELECT_NO_ATN 0x200000 /* select without ATN */
#define BLIST_RETRY_HWERROR 0x400000 /* retry HARDWARE_ERROR */
#define BLIST_MAX_512 0x800000 /* maximum 512 sector cdb length */
#endif

View File

@ -41,8 +41,6 @@ typedef struct scsi_fctargaddress {
} Scsi_FCTargAddress;
extern int scsi_ioctl(struct scsi_device *, int, void __user *);
extern int scsi_ioctl_send_command(struct scsi_device *,
struct scsi_ioctl_command __user *);
extern int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
void __user *arg, struct file *filp);

View File

@ -202,12 +202,19 @@ struct fc_rport { /* aka fc_starget_attrs */
/* internal data */
unsigned int channel;
u32 number;
u8 flags;
struct list_head peers;
struct device dev;
struct work_struct dev_loss_work;
struct work_struct scan_work;
struct work_struct stgt_delete_work;
struct work_struct rport_delete_work;
} __attribute__((aligned(sizeof(unsigned long))));
/* bit field values for struct fc_rport "flags" field: */
#define FC_RPORT_DEVLOSS_PENDING 0x01
#define FC_RPORT_SCAN_PENDING 0x02
#define dev_to_rport(d) \
container_of(d, struct fc_rport, dev)
#define transport_class_to_rport(classdev) \
@ -327,13 +334,16 @@ struct fc_host_attrs {
struct list_head rport_bindings;
u32 next_rport_number;
u32 next_target_id;
u8 flags;
struct work_struct rport_del_work;
/* work queues for rport state manipulation */
char work_q_name[KOBJ_NAME_LEN];
struct workqueue_struct *work_q;
char devloss_work_q_name[KOBJ_NAME_LEN];
struct workqueue_struct *devloss_work_q;
};
/* values for struct fc_host_attrs "flags" field: */
#define FC_SHOST_RPORT_DEL_SCHEDULED 0x01
#define shost_to_fc_host(x) \
((struct fc_host_attrs *)(x)->shost_data)
#define fc_host_node_name(x) \
(((struct fc_host_attrs *)(x)->shost_data)->node_name)
@ -375,10 +385,14 @@ struct fc_host_attrs {
(((struct fc_host_attrs *)(x)->shost_data)->next_rport_number)
#define fc_host_next_target_id(x) \
(((struct fc_host_attrs *)(x)->shost_data)->next_target_id)
#define fc_host_flags(x) \
(((struct fc_host_attrs *)(x)->shost_data)->flags)
#define fc_host_rport_del_work(x) \
(((struct fc_host_attrs *)(x)->shost_data)->rport_del_work)
#define fc_host_work_q_name(x) \
(((struct fc_host_attrs *)(x)->shost_data)->work_q_name)
#define fc_host_work_q(x) \
(((struct fc_host_attrs *)(x)->shost_data)->work_q)
#define fc_host_devloss_work_q_name(x) \
(((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q_name)
#define fc_host_devloss_work_q(x) \
(((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q)
/* The functions by which the transport class and the driver communicate */
@ -461,10 +475,15 @@ fc_remote_port_chkready(struct fc_rport *rport)
switch (rport->port_state) {
case FC_PORTSTATE_ONLINE:
result = 0;
if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
result = 0;
else if (rport->flags & FC_RPORT_DEVLOSS_PENDING)
result = DID_IMM_RETRY << 16;
else
result = DID_NO_CONNECT << 16;
break;
case FC_PORTSTATE_BLOCKED:
result = DID_BUS_BUSY << 16;
result = DID_IMM_RETRY << 16;
break;
default:
result = DID_NO_CONNECT << 16;