SCSI misc on 20220324
This series consists of the usual driver updates (qla2xxx, pm8001, libsas, smartpqi, scsi_debug, lpfc, iscsi, mpi3mr) plus minor updates and bug fixes. The high blast radius core update is the removal of write same, which affects block and several non-SCSI devices. The other big change, which is more local, is the removal of the SCSI pointer. Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com> -----BEGIN PGP SIGNATURE----- iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCYjzDQyYcamFtZXMuYm90 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishQMYAQDEWUGV 6U0+736AHVtOfiMNfiRN79B1HfXVoHvemnPcTwD/UlndwFfy/3GGOtoZmqEpc73J Ec1HDuUCE18H1H2QAh0= =/Ty9 -----END PGP SIGNATURE----- Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull SCSI updates from James Bottomley: "This series consists of the usual driver updates (qla2xxx, pm8001, libsas, smartpqi, scsi_debug, lpfc, iscsi, mpi3mr) plus minor updates and bug fixes. The high blast radius core update is the removal of write same, which affects block and several non-SCSI devices. The other big change, which is more local, is the removal of the SCSI pointer" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (281 commits) scsi: scsi_ioctl: Drop needless assignment in sg_io() scsi: bsg: Drop needless assignment in scsi_bsg_sg_io_fn() scsi: lpfc: Copyright updates for 14.2.0.0 patches scsi: lpfc: Update lpfc version to 14.2.0.0 scsi: lpfc: SLI path split: Refactor BSG paths scsi: lpfc: SLI path split: Refactor Abort paths scsi: lpfc: SLI path split: Refactor SCSI paths scsi: lpfc: SLI path split: Refactor CT paths scsi: lpfc: SLI path split: Refactor misc ELS paths scsi: lpfc: SLI path split: Refactor VMID paths scsi: lpfc: SLI path split: Refactor FDISC paths scsi: lpfc: SLI path split: Refactor LS_RJT paths scsi: lpfc: SLI path split: Refactor LS_ACC paths scsi: lpfc: SLI path split: Refactor the RSCN/SCR/RDF/EDC/FARPR paths scsi: lpfc: SLI path split: Refactor PLOGI/PRLI/ADISC/LOGO paths scsi: lpfc: SLI path split: Refactor base ELS paths and the FLOGI path scsi: lpfc: SLI path split: Introduce lpfc_prep_wqe scsi: lpfc: SLI path split: Refactor fast and slow paths to native SLI4 scsi: lpfc: SLI path split: Refactor lpfc_iocbq scsi: lpfc: Use kcalloc() ...
This commit is contained in:
commit
6f2689a766
|
@ -207,7 +207,6 @@ Management Functions (TMFs) described in SAM::
|
|||
/* Task Management Functions. Must be called from process context. */
|
||||
int (*lldd_abort_task)(struct sas_task *);
|
||||
int (*lldd_abort_task_set)(struct domain_device *, u8 *lun);
|
||||
int (*lldd_clear_aca)(struct domain_device *, u8 *lun);
|
||||
int (*lldd_clear_task_set)(struct domain_device *, u8 *lun);
|
||||
int (*lldd_I_T_nexus_reset)(struct domain_device *);
|
||||
int (*lldd_lu_reset)(struct domain_device *, u8 *lun);
|
||||
|
@ -262,7 +261,6 @@ can look like this (called last thing from probe())
|
|||
|
||||
my_ha->sas_ha.lldd_abort_task = my_abort_task;
|
||||
my_ha->sas_ha.lldd_abort_task_set = my_abort_task_set;
|
||||
my_ha->sas_ha.lldd_clear_aca = my_clear_aca;
|
||||
my_ha->sas_ha.lldd_clear_task_set = my_clear_task_set;
|
||||
my_ha->sas_ha.lldd_I_T_nexus_reset= NULL; (2)
|
||||
my_ha->sas_ha.lldd_lu_reset = my_lu_reset;
|
||||
|
|
|
@ -95,19 +95,18 @@ function
|
|||
|
||||
- BLK_EH_RESET_TIMER
|
||||
This indicates that more time is required to finish the
|
||||
command. Timer is restarted. This action is counted as a
|
||||
retry and only allowed scmd->allowed + 1(!) times. Once the
|
||||
limit is reached, action for BLK_EH_DONE is taken instead.
|
||||
command. Timer is restarted.
|
||||
|
||||
- BLK_EH_DONE
|
||||
eh_timed_out() callback did not handle the command.
|
||||
Step #2 is taken.
|
||||
|
||||
2. scsi_abort_command() is invoked to schedule an asynchrous abort.
|
||||
Asynchronous abort are not invoked for commands which the
|
||||
SCSI_EH_ABORT_SCHEDULED flag is set (this indicates that the command
|
||||
already had been aborted once, and this is a retry which failed),
|
||||
or when the EH deadline is expired. In these case Step #3 is taken.
|
||||
2. scsi_abort_command() is invoked to schedule an asynchronous abort which may
|
||||
issue a retry scmd->allowed + 1 times. Asynchronous aborts are not invoked
|
||||
for commands for which the SCSI_EH_ABORT_SCHEDULED flag is set (this
|
||||
indicates that the command already had been aborted once, and this is a
|
||||
retry which failed), when retries are exceeded, or when the EH deadline is
|
||||
expired. In these cases Step #3 is taken.
|
||||
|
||||
3. scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD) is invoked for the
|
||||
command. See [1-4] for more information.
|
||||
|
|
|
@ -10,8 +10,8 @@ Universal Flash Storage
|
|||
1. Overview
|
||||
2. UFS Architecture Overview
|
||||
2.1 Application Layer
|
||||
2.2 UFS Transport Protocol(UTP) layer
|
||||
2.3 UFS Interconnect(UIC) Layer
|
||||
2.2 UFS Transport Protocol (UTP) layer
|
||||
2.3 UFS Interconnect (UIC) Layer
|
||||
3. UFSHCD Overview
|
||||
3.1 UFS controller initialization
|
||||
3.2 UTP Transfer requests
|
||||
|
@ -22,15 +22,15 @@ Universal Flash Storage
|
|||
1. Overview
|
||||
===========
|
||||
|
||||
Universal Flash Storage(UFS) is a storage specification for flash devices.
|
||||
It is aimed to provide a universal storage interface for both
|
||||
embedded and removable flash memory based storage in mobile
|
||||
Universal Flash Storage (UFS) is a storage specification for flash devices.
|
||||
It aims to provide a universal storage interface for both
|
||||
embedded and removable flash memory-based storage in mobile
|
||||
devices such as smart phones and tablet computers. The specification
|
||||
is defined by JEDEC Solid State Technology Association. UFS is based
|
||||
on MIPI M-PHY physical layer standard. UFS uses MIPI M-PHY as the
|
||||
on the MIPI M-PHY physical layer standard. UFS uses MIPI M-PHY as the
|
||||
physical layer and MIPI Unipro as the link layer.
|
||||
|
||||
The main goals of UFS is to provide:
|
||||
The main goals of UFS are to provide:
|
||||
|
||||
* Optimized performance:
|
||||
|
||||
|
@ -53,17 +53,17 @@ The main goals of UFS is to provide:
|
|||
UFS has a layered communication architecture which is based on SCSI
|
||||
SAM-5 architectural model.
|
||||
|
||||
UFS communication architecture consists of following layers,
|
||||
UFS communication architecture consists of the following layers.
|
||||
|
||||
2.1 Application Layer
|
||||
---------------------
|
||||
|
||||
The Application layer is composed of UFS command set layer(UCS),
|
||||
The Application layer is composed of the UFS command set layer (UCS),
|
||||
Task Manager and Device manager. The UFS interface is designed to be
|
||||
protocol agnostic, however SCSI has been selected as a baseline
|
||||
protocol for versions 1.0 and 1.1 of UFS protocol layer.
|
||||
protocol for versions 1.0 and 1.1 of the UFS protocol layer.
|
||||
|
||||
UFS supports subset of SCSI commands defined by SPC-4 and SBC-3.
|
||||
UFS supports a subset of SCSI commands defined by SPC-4 and SBC-3.
|
||||
|
||||
* UCS:
|
||||
It handles SCSI commands supported by UFS specification.
|
||||
|
@ -78,10 +78,10 @@ UFS communication architecture consists of following layers,
|
|||
requests which are used to modify and retrieve configuration
|
||||
information of the device.
|
||||
|
||||
2.2 UFS Transport Protocol(UTP) layer
|
||||
-------------------------------------
|
||||
2.2 UFS Transport Protocol (UTP) layer
|
||||
--------------------------------------
|
||||
|
||||
UTP layer provides services for
|
||||
The UTP layer provides services for
|
||||
the higher layers through Service Access Points. UTP defines 3
|
||||
service access points for higher layers.
|
||||
|
||||
|
@ -89,19 +89,19 @@ UFS communication architecture consists of following layers,
|
|||
manager for device level operations. These device level operations
|
||||
are done through query requests.
|
||||
* UTP_CMD_SAP: Command service access point is exposed to UFS command
|
||||
set layer(UCS) to transport commands.
|
||||
set layer (UCS) to transport commands.
|
||||
* UTP_TM_SAP: Task management service access point is exposed to task
|
||||
manager to transport task management functions.
|
||||
|
||||
UTP transports messages through UFS protocol information unit(UPIU).
|
||||
UTP transports messages through UFS protocol information unit (UPIU).
|
||||
|
||||
2.3 UFS Interconnect(UIC) Layer
|
||||
-------------------------------
|
||||
2.3 UFS Interconnect (UIC) Layer
|
||||
--------------------------------
|
||||
|
||||
UIC is the lowest layer of UFS layered architecture. It handles
|
||||
connection between UFS host and UFS device. UIC consists of
|
||||
UIC is the lowest layer of the UFS layered architecture. It handles
|
||||
the connection between UFS host and UFS device. UIC consists of
|
||||
MIPI UniPro and MIPI M-PHY. UIC provides 2 service access points
|
||||
to upper layer,
|
||||
to upper layer:
|
||||
|
||||
* UIC_SAP: To transport UPIU between UFS host and UFS device.
|
||||
* UIO_SAP: To issue commands to Unipro layers.
|
||||
|
@ -110,25 +110,25 @@ UFS communication architecture consists of following layers,
|
|||
3. UFSHCD Overview
|
||||
==================
|
||||
|
||||
The UFS host controller driver is based on Linux SCSI Framework.
|
||||
UFSHCD is a low level device driver which acts as an interface between
|
||||
SCSI Midlayer and PCIe based UFS host controllers.
|
||||
The UFS host controller driver is based on the Linux SCSI Framework.
|
||||
UFSHCD is a low-level device driver which acts as an interface between
|
||||
the SCSI Midlayer and PCIe-based UFS host controllers.
|
||||
|
||||
The current UFSHCD implementation supports following functionality,
|
||||
The current UFSHCD implementation supports the following functionality:
|
||||
|
||||
3.1 UFS controller initialization
|
||||
---------------------------------
|
||||
|
||||
The initialization module brings UFS host controller to active state
|
||||
and prepares the controller to transfer commands/response between
|
||||
The initialization module brings the UFS host controller to active state
|
||||
and prepares the controller to transfer commands/responses between
|
||||
UFSHCD and UFS device.
|
||||
|
||||
3.2 UTP Transfer requests
|
||||
-------------------------
|
||||
|
||||
Transfer request handling module of UFSHCD receives SCSI commands
|
||||
from SCSI Midlayer, forms UPIUs and issues the UPIUs to UFS Host
|
||||
controller. Also, the module decodes, responses received from UFS
|
||||
from the SCSI Midlayer, forms UPIUs and issues the UPIUs to the UFS Host
|
||||
controller. Also, the module decodes responses received from the UFS
|
||||
host controller in the form of UPIUs and intimates the SCSI Midlayer
|
||||
of the status of the command.
|
||||
|
||||
|
@ -136,19 +136,19 @@ The current UFSHCD implementation supports following functionality,
|
|||
----------------------
|
||||
|
||||
Error handling module handles Host controller fatal errors,
|
||||
Device fatal errors and UIC interconnect layer related errors.
|
||||
Device fatal errors and UIC interconnect layer-related errors.
|
||||
|
||||
3.4 SCSI Error handling
|
||||
-----------------------
|
||||
|
||||
This is done through UFSHCD SCSI error handling routines registered
|
||||
with SCSI Midlayer. Examples of some of the error handling commands
|
||||
issues by SCSI Midlayer are Abort task, Lun reset and host reset.
|
||||
with the SCSI Midlayer. Examples of some of the error handling commands
|
||||
issues by the SCSI Midlayer are Abort task, LUN reset and host reset.
|
||||
UFSHCD Routines to perform these tasks are registered with
|
||||
SCSI Midlayer through .eh_abort_handler, .eh_device_reset_handler and
|
||||
.eh_host_reset_handler.
|
||||
|
||||
In this version of UFSHCD Query requests and power management
|
||||
In this version of UFSHCD, Query requests and power management
|
||||
functionality are not implemented.
|
||||
|
||||
4. BSG Support
|
||||
|
@ -182,14 +182,14 @@ If you wish to read or write a descriptor, use the appropriate xferp of
|
|||
sg_io_v4.
|
||||
|
||||
The userspace tool that interacts with the ufs-bsg endpoint and uses its
|
||||
upiu-based protocol is available at:
|
||||
UPIU-based protocol is available at:
|
||||
|
||||
https://github.com/westerndigitalcorporation/ufs-tool
|
||||
|
||||
For more detailed information about the tool and its supported
|
||||
features, please see the tool's README.
|
||||
|
||||
UFS Specifications can be found at:
|
||||
UFS specifications can be found at:
|
||||
|
||||
- UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf
|
||||
- UFSHCI - http://www.jedec.org/sites/default/files/docs/JESD223.pdf
|
||||
|
|
|
@ -123,7 +123,6 @@ static const char *const blk_op_name[] = {
|
|||
REQ_OP_NAME(ZONE_CLOSE),
|
||||
REQ_OP_NAME(ZONE_FINISH),
|
||||
REQ_OP_NAME(ZONE_APPEND),
|
||||
REQ_OP_NAME(WRITE_SAME),
|
||||
REQ_OP_NAME(WRITE_ZEROES),
|
||||
REQ_OP_NAME(DRV_IN),
|
||||
REQ_OP_NAME(DRV_OUT),
|
||||
|
@ -828,10 +827,6 @@ void submit_bio_noacct(struct bio *bio)
|
|||
if (!blk_queue_secure_erase(q))
|
||||
goto not_supported;
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
if (!q->limits.max_write_same_sectors)
|
||||
goto not_supported;
|
||||
break;
|
||||
case REQ_OP_ZONE_APPEND:
|
||||
status = blk_check_zone_append(q, bio);
|
||||
if (status != BLK_STS_OK)
|
||||
|
@ -903,13 +898,7 @@ void submit_bio(struct bio *bio)
|
|||
* go through the normal accounting stuff before submission.
|
||||
*/
|
||||
if (bio_has_data(bio)) {
|
||||
unsigned int count;
|
||||
|
||||
if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
|
||||
count = queue_logical_block_size(
|
||||
bdev_get_queue(bio->bi_bdev)) >> 9;
|
||||
else
|
||||
count = bio_sectors(bio);
|
||||
unsigned int count = bio_sectors(bio);
|
||||
|
||||
if (op_is_write(bio_op(bio))) {
|
||||
count_vm_events(PGPGOUT, count);
|
||||
|
|
|
@ -132,94 +132,6 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
|||
}
|
||||
EXPORT_SYMBOL(blkdev_issue_discard);
|
||||
|
||||
/**
|
||||
* __blkdev_issue_write_same - generate number of bios with same page
|
||||
* @bdev: target blockdev
|
||||
* @sector: start sector
|
||||
* @nr_sects: number of sectors to write
|
||||
* @gfp_mask: memory allocation flags (for bio_alloc)
|
||||
* @page: page containing data to write
|
||||
* @biop: pointer to anchor bio
|
||||
*
|
||||
* Description:
|
||||
* Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
|
||||
*/
|
||||
static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
|
||||
sector_t nr_sects, gfp_t gfp_mask, struct page *page,
|
||||
struct bio **biop)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
unsigned int max_write_same_sectors;
|
||||
struct bio *bio = *biop;
|
||||
sector_t bs_mask;
|
||||
|
||||
if (bdev_read_only(bdev))
|
||||
return -EPERM;
|
||||
|
||||
bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
|
||||
if ((sector | nr_sects) & bs_mask)
|
||||
return -EINVAL;
|
||||
|
||||
if (!bdev_write_same(bdev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Ensure that max_write_same_sectors doesn't overflow bi_size */
|
||||
max_write_same_sectors = bio_allowed_max_sectors(q);
|
||||
|
||||
while (nr_sects) {
|
||||
bio = blk_next_bio(bio, bdev, 1, REQ_OP_WRITE_SAME, gfp_mask);
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio->bi_vcnt = 1;
|
||||
bio->bi_io_vec->bv_page = page;
|
||||
bio->bi_io_vec->bv_offset = 0;
|
||||
bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
|
||||
|
||||
if (nr_sects > max_write_same_sectors) {
|
||||
bio->bi_iter.bi_size = max_write_same_sectors << 9;
|
||||
nr_sects -= max_write_same_sectors;
|
||||
sector += max_write_same_sectors;
|
||||
} else {
|
||||
bio->bi_iter.bi_size = nr_sects << 9;
|
||||
nr_sects = 0;
|
||||
}
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
*biop = bio;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* blkdev_issue_write_same - queue a write same operation
|
||||
* @bdev: target blockdev
|
||||
* @sector: start sector
|
||||
* @nr_sects: number of sectors to write
|
||||
* @gfp_mask: memory allocation flags (for bio_alloc)
|
||||
* @page: page containing data
|
||||
*
|
||||
* Description:
|
||||
* Issue a write same request for the sectors in question.
|
||||
*/
|
||||
int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
|
||||
sector_t nr_sects, gfp_t gfp_mask,
|
||||
struct page *page)
|
||||
{
|
||||
struct bio *bio = NULL;
|
||||
struct blk_plug plug;
|
||||
int ret;
|
||||
|
||||
blk_start_plug(&plug);
|
||||
ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
|
||||
&bio);
|
||||
if (ret == 0 && bio) {
|
||||
ret = submit_bio_wait(bio);
|
||||
bio_put(bio);
|
||||
}
|
||||
blk_finish_plug(&plug);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_issue_write_same);
|
||||
|
||||
static int __blkdev_issue_write_zeroes(struct block_device *bdev,
|
||||
sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
|
||||
struct bio **biop, unsigned flags)
|
||||
|
|
|
@ -153,22 +153,6 @@ static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
|
|||
return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
|
||||
}
|
||||
|
||||
static struct bio *blk_bio_write_same_split(struct request_queue *q,
|
||||
struct bio *bio,
|
||||
struct bio_set *bs,
|
||||
unsigned *nsegs)
|
||||
{
|
||||
*nsegs = 1;
|
||||
|
||||
if (!q->limits.max_write_same_sectors)
|
||||
return NULL;
|
||||
|
||||
if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
|
||||
return NULL;
|
||||
|
||||
return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the maximum number of sectors from the start of a bio that may be
|
||||
* submitted as a single request to a block device. If enough sectors remain,
|
||||
|
@ -352,10 +336,6 @@ void __blk_queue_split(struct request_queue *q, struct bio **bio,
|
|||
split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
|
||||
nr_segs);
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
split = blk_bio_write_same_split(q, *bio, &q->bio_split,
|
||||
nr_segs);
|
||||
break;
|
||||
default:
|
||||
split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
|
||||
break;
|
||||
|
@ -415,8 +395,6 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
|
|||
return 1;
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
return 0;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
return 1;
|
||||
}
|
||||
|
||||
rq_for_each_bvec(bv, rq, iter)
|
||||
|
@ -554,8 +532,6 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
|||
|
||||
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
|
||||
nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
|
||||
else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
|
||||
nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
|
||||
else if (rq->bio)
|
||||
nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
|
||||
|
||||
|
@ -762,13 +738,6 @@ static enum elv_merge blk_try_req_merge(struct request *req,
|
|||
return ELEVATOR_NO_MERGE;
|
||||
}
|
||||
|
||||
static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
|
||||
{
|
||||
if (bio_page(a) == bio_page(b) && bio_offset(a) == bio_offset(b))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* For non-mq, this has to be called with the request spinlock acquired.
|
||||
* For mq with scheduling, the appropriate queue wide lock should be held.
|
||||
|
@ -785,10 +754,6 @@ static struct request *attempt_merge(struct request_queue *q,
|
|||
if (rq_data_dir(req) != rq_data_dir(next))
|
||||
return NULL;
|
||||
|
||||
if (req_op(req) == REQ_OP_WRITE_SAME &&
|
||||
!blk_write_same_mergeable(req->bio, next->bio))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Don't allow merge of different write hints, or for a hint with
|
||||
* non-hint IO.
|
||||
|
@ -921,11 +886,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
|
|||
if (!bio_crypt_rq_ctx_compatible(rq, bio))
|
||||
return false;
|
||||
|
||||
/* must be using the same buffer */
|
||||
if (req_op(rq) == REQ_OP_WRITE_SAME &&
|
||||
!blk_write_same_mergeable(rq->bio, bio))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Don't allow merge of different write hints, or for a hint with
|
||||
* non-hint IO.
|
||||
|
|
|
@ -42,7 +42,6 @@ void blk_set_default_limits(struct queue_limits *lim)
|
|||
lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
|
||||
lim->max_dev_sectors = 0;
|
||||
lim->chunk_sectors = 0;
|
||||
lim->max_write_same_sectors = 0;
|
||||
lim->max_write_zeroes_sectors = 0;
|
||||
lim->max_zone_append_sectors = 0;
|
||||
lim->max_discard_sectors = 0;
|
||||
|
@ -79,7 +78,6 @@ void blk_set_stacking_limits(struct queue_limits *lim)
|
|||
lim->max_segment_size = UINT_MAX;
|
||||
lim->max_sectors = UINT_MAX;
|
||||
lim->max_dev_sectors = UINT_MAX;
|
||||
lim->max_write_same_sectors = UINT_MAX;
|
||||
lim->max_write_zeroes_sectors = UINT_MAX;
|
||||
lim->max_zone_append_sectors = UINT_MAX;
|
||||
}
|
||||
|
@ -178,18 +176,6 @@ void blk_queue_max_discard_sectors(struct request_queue *q,
|
|||
}
|
||||
EXPORT_SYMBOL(blk_queue_max_discard_sectors);
|
||||
|
||||
/**
|
||||
* blk_queue_max_write_same_sectors - set max sectors for a single write same
|
||||
* @q: the request queue for the device
|
||||
* @max_write_same_sectors: maximum number of sectors to write per command
|
||||
**/
|
||||
void blk_queue_max_write_same_sectors(struct request_queue *q,
|
||||
unsigned int max_write_same_sectors)
|
||||
{
|
||||
q->limits.max_write_same_sectors = max_write_same_sectors;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
|
||||
|
||||
/**
|
||||
* blk_queue_max_write_zeroes_sectors - set max sectors for a single
|
||||
* write zeroes
|
||||
|
@ -519,8 +505,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
|||
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
|
||||
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
|
||||
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
|
||||
t->max_write_same_sectors = min(t->max_write_same_sectors,
|
||||
b->max_write_same_sectors);
|
||||
t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
|
||||
b->max_write_zeroes_sectors);
|
||||
t->max_zone_append_sectors = min(t->max_zone_append_sectors,
|
||||
|
|
|
@ -214,8 +214,7 @@ static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *pag
|
|||
|
||||
static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
|
||||
{
|
||||
return sprintf(page, "%llu\n",
|
||||
(unsigned long long)q->limits.max_write_same_sectors << 9);
|
||||
return queue_var_show(0, page);
|
||||
}
|
||||
|
||||
static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
|
||||
|
|
|
@ -65,7 +65,6 @@ bool blk_req_needs_zone_write_lock(struct request *rq)
|
|||
|
||||
switch (req_op(rq)) {
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
case REQ_OP_WRITE:
|
||||
return blk_rq_zone_is_seq(rq);
|
||||
default:
|
||||
|
|
|
@ -286,7 +286,6 @@ static inline bool blk_may_split(struct request_queue *q, struct bio *bio)
|
|||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
return true; /* non-trivial splitting decisions */
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -178,9 +178,6 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
|
|||
case REQ_OP_SECURE_ERASE:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
|
||||
break;
|
||||
default:
|
||||
bio_for_each_segment(bv, bio_src, iter)
|
||||
bio->bi_io_vec[bio->bi_vcnt++] = bv;
|
||||
|
|
|
@ -1022,7 +1022,9 @@ void ata_scsi_sdev_config(struct scsi_device *sdev)
|
|||
*/
|
||||
bool ata_scsi_dma_need_drain(struct request *rq)
|
||||
{
|
||||
return atapi_cmd_type(scsi_req(rq)->cmd[0]) == ATAPI_MISC;
|
||||
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
return atapi_cmd_type(scmd->cmnd[0]) == ATAPI_MISC;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
|
||||
|
||||
|
|
|
@ -912,7 +912,7 @@ assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p,
|
|||
p->qlim->io_min = cpu_to_be32(queue_io_min(q));
|
||||
p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
|
||||
p->qlim->discard_enabled = blk_queue_discard(q);
|
||||
p->qlim->write_same_capable = !!q->limits.max_write_same_sectors;
|
||||
p->qlim->write_same_capable = 0;
|
||||
} else {
|
||||
q = device->rq_queue;
|
||||
p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
|
||||
|
@ -1591,9 +1591,6 @@ static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
|
|||
? 0 : MSG_MORE);
|
||||
if (err)
|
||||
return err;
|
||||
/* REQ_OP_WRITE_SAME has only one segment */
|
||||
if (bio_op(bio) == REQ_OP_WRITE_SAME)
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1612,9 +1609,6 @@ static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *b
|
|||
bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
|
||||
if (err)
|
||||
return err;
|
||||
/* REQ_OP_WRITE_SAME has only one segment */
|
||||
if (bio_op(bio) == REQ_OP_WRITE_SAME)
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1646,7 +1640,6 @@ static u32 bio_flags_to_wire(struct drbd_connection *connection,
|
|||
return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
|
||||
(bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
|
||||
(bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
|
||||
(bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
|
||||
(bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0) |
|
||||
(bio_op(bio) == REQ_OP_WRITE_ZEROES ?
|
||||
((connection->agreed_features & DRBD_FF_WZEROES) ?
|
||||
|
@ -1665,7 +1658,6 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
|
|||
struct drbd_device *device = peer_device->device;
|
||||
struct drbd_socket *sock;
|
||||
struct p_data *p;
|
||||
struct p_wsame *wsame = NULL;
|
||||
void *digest_out;
|
||||
unsigned int dp_flags = 0;
|
||||
int digest_size;
|
||||
|
@ -1703,29 +1695,14 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
|
|||
err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*t), NULL, 0);
|
||||
goto out;
|
||||
}
|
||||
if (dp_flags & DP_WSAME) {
|
||||
/* this will only work if DRBD_FF_WSAME is set AND the
|
||||
* handshake agreed that all nodes and backend devices are
|
||||
* WRITE_SAME capable and agree on logical_block_size */
|
||||
wsame = (struct p_wsame*)p;
|
||||
digest_out = wsame + 1;
|
||||
wsame->size = cpu_to_be32(req->i.size);
|
||||
} else
|
||||
digest_out = p + 1;
|
||||
digest_out = p + 1;
|
||||
|
||||
/* our digest is still only over the payload.
|
||||
* TRIM does not carry any payload. */
|
||||
if (digest_size)
|
||||
drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest_out);
|
||||
if (wsame) {
|
||||
err =
|
||||
__send_command(peer_device->connection, device->vnr, sock, P_WSAME,
|
||||
sizeof(*wsame) + digest_size, NULL,
|
||||
bio_iovec(req->master_bio).bv_len);
|
||||
} else
|
||||
err =
|
||||
__send_command(peer_device->connection, device->vnr, sock, P_DATA,
|
||||
sizeof(*p) + digest_size, NULL, req->i.size);
|
||||
err = __send_command(peer_device->connection, device->vnr, sock, P_DATA,
|
||||
sizeof(*p) + digest_size, NULL, req->i.size);
|
||||
if (!err) {
|
||||
/* For protocol A, we have to memcpy the payload into
|
||||
* socket buffers, as we may complete right away
|
||||
|
|
|
@ -1265,71 +1265,6 @@ static void fixup_write_zeroes(struct drbd_device *device, struct request_queue
|
|||
q->limits.max_write_zeroes_sectors = 0;
|
||||
}
|
||||
|
||||
static void decide_on_write_same_support(struct drbd_device *device,
|
||||
struct request_queue *q,
|
||||
struct request_queue *b, struct o_qlim *o,
|
||||
bool disable_write_same)
|
||||
{
|
||||
struct drbd_peer_device *peer_device = first_peer_device(device);
|
||||
struct drbd_connection *connection = peer_device->connection;
|
||||
bool can_do = b ? b->limits.max_write_same_sectors : true;
|
||||
|
||||
if (can_do && disable_write_same) {
|
||||
can_do = false;
|
||||
drbd_info(peer_device, "WRITE_SAME disabled by config\n");
|
||||
}
|
||||
|
||||
if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_WSAME)) {
|
||||
can_do = false;
|
||||
drbd_info(peer_device, "peer does not support WRITE_SAME\n");
|
||||
}
|
||||
|
||||
if (o) {
|
||||
/* logical block size; queue_logical_block_size(NULL) is 512 */
|
||||
unsigned int peer_lbs = be32_to_cpu(o->logical_block_size);
|
||||
unsigned int me_lbs_b = queue_logical_block_size(b);
|
||||
unsigned int me_lbs = queue_logical_block_size(q);
|
||||
|
||||
if (me_lbs_b != me_lbs) {
|
||||
drbd_warn(device,
|
||||
"logical block size of local backend does not match (drbd:%u, backend:%u); was this a late attach?\n",
|
||||
me_lbs, me_lbs_b);
|
||||
/* rather disable write same than trigger some BUG_ON later in the scsi layer. */
|
||||
can_do = false;
|
||||
}
|
||||
if (me_lbs_b != peer_lbs) {
|
||||
drbd_warn(peer_device, "logical block sizes do not match (me:%u, peer:%u); this may cause problems.\n",
|
||||
me_lbs, peer_lbs);
|
||||
if (can_do) {
|
||||
drbd_dbg(peer_device, "logical block size mismatch: WRITE_SAME disabled.\n");
|
||||
can_do = false;
|
||||
}
|
||||
me_lbs = max(me_lbs, me_lbs_b);
|
||||
/* We cannot change the logical block size of an in-use queue.
|
||||
* We can only hope that access happens to be properly aligned.
|
||||
* If not, the peer will likely produce an IO error, and detach. */
|
||||
if (peer_lbs > me_lbs) {
|
||||
if (device->state.role != R_PRIMARY) {
|
||||
blk_queue_logical_block_size(q, peer_lbs);
|
||||
drbd_warn(peer_device, "logical block size set to %u\n", peer_lbs);
|
||||
} else {
|
||||
drbd_warn(peer_device,
|
||||
"current Primary must NOT adjust logical block size (%u -> %u); hope for the best.\n",
|
||||
me_lbs, peer_lbs);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (can_do && !o->write_same_capable) {
|
||||
/* If we introduce an open-coded write-same loop on the receiving side,
|
||||
* the peer would present itself as "capable". */
|
||||
drbd_dbg(peer_device, "WRITE_SAME disabled (peer device not capable)\n");
|
||||
can_do = false;
|
||||
}
|
||||
}
|
||||
|
||||
blk_queue_max_write_same_sectors(q, can_do ? DRBD_MAX_BBIO_SECTORS : 0);
|
||||
}
|
||||
|
||||
static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
|
||||
unsigned int max_bio_size, struct o_qlim *o)
|
||||
{
|
||||
|
@ -1339,7 +1274,6 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
|
|||
struct request_queue *b = NULL;
|
||||
struct disk_conf *dc;
|
||||
bool discard_zeroes_if_aligned = true;
|
||||
bool disable_write_same = false;
|
||||
|
||||
if (bdev) {
|
||||
b = bdev->backing_bdev->bd_disk->queue;
|
||||
|
@ -1349,7 +1283,6 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
|
|||
dc = rcu_dereference(device->ldev->disk_conf);
|
||||
max_segments = dc->max_bio_bvecs;
|
||||
discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
|
||||
disable_write_same = dc->disable_write_same;
|
||||
rcu_read_unlock();
|
||||
|
||||
blk_set_stacking_limits(&q->limits);
|
||||
|
@ -1360,7 +1293,6 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
|
|||
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
|
||||
blk_queue_segment_boundary(q, PAGE_SIZE-1);
|
||||
decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
|
||||
decide_on_write_same_support(device, q, b, o, disable_write_same);
|
||||
|
||||
if (b) {
|
||||
blk_stack_limits(&q->limits, &b->limits, 0);
|
||||
|
@ -1666,8 +1598,8 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
|
|||
if (write_ordering_changed(old_disk_conf, new_disk_conf))
|
||||
drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
|
||||
|
||||
if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned
|
||||
|| old_disk_conf->disable_write_same != new_disk_conf->disable_write_same)
|
||||
if (old_disk_conf->discard_zeroes_if_aligned !=
|
||||
new_disk_conf->discard_zeroes_if_aligned)
|
||||
drbd_reconsider_queue_parameters(device, device->ldev, NULL);
|
||||
|
||||
drbd_md_sync(device);
|
||||
|
|
|
@ -1604,19 +1604,7 @@ static void drbd_issue_peer_discard_or_zero_out(struct drbd_device *device, stru
|
|||
drbd_endio_write_sec_final(peer_req);
|
||||
}
|
||||
|
||||
static void drbd_issue_peer_wsame(struct drbd_device *device,
|
||||
struct drbd_peer_request *peer_req)
|
||||
{
|
||||
struct block_device *bdev = device->ldev->backing_bdev;
|
||||
sector_t s = peer_req->i.sector;
|
||||
sector_t nr = peer_req->i.size >> 9;
|
||||
if (blkdev_issue_write_same(bdev, s, nr, GFP_NOIO, peer_req->pages))
|
||||
peer_req->flags |= EE_WAS_ERROR;
|
||||
drbd_endio_write_sec_final(peer_req);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
/**
|
||||
* drbd_submit_peer_request()
|
||||
* @device: DRBD device.
|
||||
* @peer_req: peer request
|
||||
|
@ -1651,7 +1639,7 @@ int drbd_submit_peer_request(struct drbd_device *device,
|
|||
* Correctness first, performance later. Next step is to code an
|
||||
* asynchronous variant of the same.
|
||||
*/
|
||||
if (peer_req->flags & (EE_TRIM|EE_WRITE_SAME|EE_ZEROOUT)) {
|
||||
if (peer_req->flags & (EE_TRIM | EE_ZEROOUT)) {
|
||||
/* wait for all pending IO completions, before we start
|
||||
* zeroing things out. */
|
||||
conn_wait_active_ee_empty(peer_req->peer_device->connection);
|
||||
|
@ -1668,10 +1656,7 @@ int drbd_submit_peer_request(struct drbd_device *device,
|
|||
spin_unlock_irq(&device->resource->req_lock);
|
||||
}
|
||||
|
||||
if (peer_req->flags & (EE_TRIM|EE_ZEROOUT))
|
||||
drbd_issue_peer_discard_or_zero_out(device, peer_req);
|
||||
else /* EE_WRITE_SAME */
|
||||
drbd_issue_peer_wsame(device, peer_req);
|
||||
drbd_issue_peer_discard_or_zero_out(device, peer_req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1854,7 +1839,6 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
|
|||
unsigned long *data;
|
||||
struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL;
|
||||
struct p_trim *zeroes = (pi->cmd == P_ZEROES) ? pi->data : NULL;
|
||||
struct p_trim *wsame = (pi->cmd == P_WSAME) ? pi->data : NULL;
|
||||
|
||||
digest_size = 0;
|
||||
if (!trim && peer_device->connection->peer_integrity_tfm) {
|
||||
|
@ -1869,7 +1853,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
|
|||
data_size -= digest_size;
|
||||
}
|
||||
|
||||
/* assume request_size == data_size, but special case trim and wsame. */
|
||||
/* assume request_size == data_size, but special case trim. */
|
||||
ds = data_size;
|
||||
if (trim) {
|
||||
if (!expect(data_size == 0))
|
||||
|
@ -1879,23 +1863,11 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
|
|||
if (!expect(data_size == 0))
|
||||
return NULL;
|
||||
ds = be32_to_cpu(zeroes->size);
|
||||
} else if (wsame) {
|
||||
if (data_size != queue_logical_block_size(device->rq_queue)) {
|
||||
drbd_err(peer_device, "data size (%u) != drbd logical block size (%u)\n",
|
||||
data_size, queue_logical_block_size(device->rq_queue));
|
||||
return NULL;
|
||||
}
|
||||
if (data_size != bdev_logical_block_size(device->ldev->backing_bdev)) {
|
||||
drbd_err(peer_device, "data size (%u) != backend logical block size (%u)\n",
|
||||
data_size, bdev_logical_block_size(device->ldev->backing_bdev));
|
||||
return NULL;
|
||||
}
|
||||
ds = be32_to_cpu(wsame->size);
|
||||
}
|
||||
|
||||
if (!expect(IS_ALIGNED(ds, 512)))
|
||||
return NULL;
|
||||
if (trim || wsame || zeroes) {
|
||||
if (trim || zeroes) {
|
||||
if (!expect(ds <= (DRBD_MAX_BBIO_SECTORS << 9)))
|
||||
return NULL;
|
||||
} else if (!expect(ds <= DRBD_MAX_BIO_SIZE))
|
||||
|
@ -1927,8 +1899,6 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
|
|||
peer_req->flags |= EE_ZEROOUT;
|
||||
return peer_req;
|
||||
}
|
||||
if (wsame)
|
||||
peer_req->flags |= EE_WRITE_SAME;
|
||||
|
||||
/* receive payload size bytes into page chain */
|
||||
ds = data_size;
|
||||
|
@ -2427,8 +2397,6 @@ static unsigned long wire_flags_to_bio_op(u32 dpf)
|
|||
return REQ_OP_WRITE_ZEROES;
|
||||
if (dpf & DP_DISCARD)
|
||||
return REQ_OP_DISCARD;
|
||||
if (dpf & DP_WSAME)
|
||||
return REQ_OP_WRITE_SAME;
|
||||
else
|
||||
return REQ_OP_WRITE;
|
||||
}
|
||||
|
@ -2695,11 +2663,11 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
|
|||
update_peer_seq(peer_device, peer_seq);
|
||||
spin_lock_irq(&device->resource->req_lock);
|
||||
}
|
||||
/* TRIM and WRITE_SAME are processed synchronously,
|
||||
/* TRIM and is processed synchronously,
|
||||
* we wait for all pending requests, respectively wait for
|
||||
* active_ee to become empty in drbd_submit_peer_request();
|
||||
* better not add ourselves here. */
|
||||
if ((peer_req->flags & (EE_TRIM|EE_WRITE_SAME|EE_ZEROOUT)) == 0)
|
||||
if ((peer_req->flags & (EE_TRIM | EE_ZEROOUT)) == 0)
|
||||
list_add_tail(&peer_req->w.list, &device->active_ee);
|
||||
spin_unlock_irq(&device->resource->req_lock);
|
||||
|
||||
|
@ -5068,7 +5036,6 @@ static struct data_cmd drbd_cmd_handler[] = {
|
|||
[P_TRIM] = { 0, sizeof(struct p_trim), receive_Data },
|
||||
[P_ZEROES] = { 0, sizeof(struct p_trim), receive_Data },
|
||||
[P_RS_DEALLOCATED] = { 0, sizeof(struct p_block_desc), receive_rs_deallocated },
|
||||
[P_WSAME] = { 1, sizeof(struct p_wsame), receive_Data },
|
||||
};
|
||||
|
||||
static void drbdd(struct drbd_connection *connection)
|
||||
|
|
|
@ -36,7 +36,6 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio
|
|||
req->private_bio->bi_end_io = drbd_request_endio;
|
||||
|
||||
req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
|
||||
| (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0)
|
||||
| (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_ZEROES : 0)
|
||||
| (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0);
|
||||
req->device = device;
|
||||
|
|
|
@ -329,11 +329,6 @@ void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
|
|||
src = bvec_kmap_local(&bvec);
|
||||
crypto_shash_update(desc, src, bvec.bv_len);
|
||||
kunmap_local(src);
|
||||
|
||||
/* REQ_OP_WRITE_SAME has only one segment,
|
||||
* checksum the payload only once. */
|
||||
if (bio_op(bio) == REQ_OP_WRITE_SAME)
|
||||
break;
|
||||
}
|
||||
crypto_shash_final(desc, digest);
|
||||
shash_desc_zero(desc);
|
||||
|
|
|
@ -693,6 +693,7 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
|
|||
static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(pd->bdev);
|
||||
struct scsi_cmnd *scmd;
|
||||
struct request *rq;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -700,6 +701,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
|
|||
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
scmd = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
if (cgc->buflen) {
|
||||
ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
|
||||
|
@ -708,15 +710,15 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
|
|||
goto out;
|
||||
}
|
||||
|
||||
scsi_req(rq)->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
|
||||
memcpy(scsi_req(rq)->cmd, cgc->cmd, CDROM_PACKET_SIZE);
|
||||
scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
|
||||
memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE);
|
||||
|
||||
rq->timeout = 60*HZ;
|
||||
if (cgc->quiet)
|
||||
rq->rq_flags |= RQF_QUIET;
|
||||
|
||||
blk_execute_rq(rq, false);
|
||||
if (scsi_req(rq)->result)
|
||||
if (scmd->result)
|
||||
ret = -EIO;
|
||||
out:
|
||||
blk_mq_free_request(rq);
|
||||
|
|
|
@ -79,7 +79,6 @@ static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
|
|||
dev->nsectors = le64_to_cpu(rsp->nsectors);
|
||||
dev->logical_block_size = le16_to_cpu(rsp->logical_block_size);
|
||||
dev->physical_block_size = le16_to_cpu(rsp->physical_block_size);
|
||||
dev->max_write_same_sectors = le32_to_cpu(rsp->max_write_same_sectors);
|
||||
dev->max_discard_sectors = le32_to_cpu(rsp->max_discard_sectors);
|
||||
dev->discard_granularity = le32_to_cpu(rsp->discard_granularity);
|
||||
dev->discard_alignment = le32_to_cpu(rsp->discard_alignment);
|
||||
|
@ -1355,8 +1354,6 @@ static void setup_request_queue(struct rnbd_clt_dev *dev)
|
|||
blk_queue_logical_block_size(dev->queue, dev->logical_block_size);
|
||||
blk_queue_physical_block_size(dev->queue, dev->physical_block_size);
|
||||
blk_queue_max_hw_sectors(dev->queue, dev->max_hw_sectors);
|
||||
blk_queue_max_write_same_sectors(dev->queue,
|
||||
dev->max_write_same_sectors);
|
||||
|
||||
/*
|
||||
* we don't support discards to "discontiguous" segments
|
||||
|
@ -1606,10 +1603,10 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
|
|||
}
|
||||
|
||||
rnbd_clt_info(dev,
|
||||
"map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n",
|
||||
"map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n",
|
||||
dev->gd->disk_name, dev->nsectors,
|
||||
dev->logical_block_size, dev->physical_block_size,
|
||||
dev->max_write_same_sectors, dev->max_discard_sectors,
|
||||
dev->max_discard_sectors,
|
||||
dev->discard_granularity, dev->discard_alignment,
|
||||
dev->secure_discard, dev->max_segments,
|
||||
dev->max_hw_sectors, dev->wc, dev->fua);
|
||||
|
|
|
@ -121,7 +121,6 @@ struct rnbd_clt_dev {
|
|||
bool wc;
|
||||
bool fua;
|
||||
u32 max_hw_sectors;
|
||||
u32 max_write_same_sectors;
|
||||
u32 max_discard_sectors;
|
||||
u32 discard_granularity;
|
||||
u32 discard_alignment;
|
||||
|
|
|
@ -249,9 +249,6 @@ static inline u32 rnbd_to_bio_flags(u32 rnbd_opf)
|
|||
case RNBD_OP_SECURE_ERASE:
|
||||
bio_opf = REQ_OP_SECURE_ERASE;
|
||||
break;
|
||||
case RNBD_OP_WRITE_SAME:
|
||||
bio_opf = REQ_OP_WRITE_SAME;
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Unknown RNBD type: %d (flags %d)\n",
|
||||
rnbd_op(rnbd_opf), rnbd_opf);
|
||||
|
@ -284,9 +281,6 @@ static inline u32 rq_to_rnbd_flags(struct request *rq)
|
|||
case REQ_OP_SECURE_ERASE:
|
||||
rnbd_opf = RNBD_OP_SECURE_ERASE;
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
rnbd_opf = RNBD_OP_WRITE_SAME;
|
||||
break;
|
||||
case REQ_OP_FLUSH:
|
||||
rnbd_opf = RNBD_OP_FLUSH;
|
||||
break;
|
||||
|
|
|
@ -548,8 +548,7 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
|
|||
cpu_to_le16(rnbd_dev_get_max_segs(rnbd_dev));
|
||||
rsp->max_hw_sectors =
|
||||
cpu_to_le32(rnbd_dev_get_max_hw_sects(rnbd_dev));
|
||||
rsp->max_write_same_sectors =
|
||||
cpu_to_le32(bdev_write_same(rnbd_dev->bdev));
|
||||
rsp->max_write_same_sectors = 0;
|
||||
rsp->max_discard_sectors =
|
||||
cpu_to_le32(rnbd_dev_get_max_discard_sects(rnbd_dev));
|
||||
rsp->discard_granularity =
|
||||
|
|
|
@ -284,7 +284,6 @@
|
|||
#include <linux/times.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <scsi/scsi_common.h>
|
||||
#include <scsi/scsi_request.h>
|
||||
|
||||
/* used to tell the module to turn on full debugging messages */
|
||||
static bool debug;
|
||||
|
|
|
@ -971,6 +971,7 @@ static struct scsi_host_template iscsi_iser_sht = {
|
|||
.proc_name = "iscsi_iser",
|
||||
.this_id = -1,
|
||||
.track_queue_depth = 1,
|
||||
.cmd_size = sizeof(struct iscsi_cmd),
|
||||
};
|
||||
|
||||
static struct iscsi_transport iscsi_iser_transport = {
|
||||
|
|
|
@ -142,7 +142,6 @@ struct mapped_device {
|
|||
#define DMF_EMULATE_ZONE_APPEND 9
|
||||
|
||||
void disable_discard(struct mapped_device *md);
|
||||
void disable_write_same(struct mapped_device *md);
|
||||
void disable_write_zeroes(struct mapped_device *md);
|
||||
|
||||
static inline sector_t dm_get_size(struct mapped_device *md)
|
||||
|
|
|
@ -1997,7 +1997,6 @@ static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
|
|||
*/
|
||||
switch (bio_op(ctx->bio_in)) {
|
||||
case REQ_OP_WRITE:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
return true;
|
||||
default:
|
||||
|
|
|
@ -335,7 +335,6 @@ static int ebs_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
ti->num_flush_bios = 1;
|
||||
ti->num_discard_bios = 1;
|
||||
ti->num_secure_erase_bios = 0;
|
||||
ti->num_write_same_bios = 0;
|
||||
ti->num_write_zeroes_bios = 0;
|
||||
return 0;
|
||||
bad:
|
||||
|
|
|
@ -304,7 +304,6 @@ static void do_region(int op, int op_flags, unsigned region,
|
|||
unsigned num_bvecs;
|
||||
sector_t remaining = where->count;
|
||||
struct request_queue *q = bdev_get_queue(where->bdev);
|
||||
unsigned short logical_block_size = queue_logical_block_size(q);
|
||||
sector_t num_sectors;
|
||||
unsigned int special_cmd_max_sectors;
|
||||
|
||||
|
@ -315,10 +314,8 @@ static void do_region(int op, int op_flags, unsigned region,
|
|||
special_cmd_max_sectors = q->limits.max_discard_sectors;
|
||||
else if (op == REQ_OP_WRITE_ZEROES)
|
||||
special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
|
||||
else if (op == REQ_OP_WRITE_SAME)
|
||||
special_cmd_max_sectors = q->limits.max_write_same_sectors;
|
||||
if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
|
||||
op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) {
|
||||
if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) &&
|
||||
special_cmd_max_sectors == 0) {
|
||||
atomic_inc(&io->count);
|
||||
dec_count(io, region, BLK_STS_NOTSUPP);
|
||||
return;
|
||||
|
@ -337,9 +334,6 @@ static void do_region(int op, int op_flags, unsigned region,
|
|||
case REQ_OP_WRITE_ZEROES:
|
||||
num_bvecs = 0;
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
num_bvecs = 1;
|
||||
break;
|
||||
default:
|
||||
num_bvecs = bio_max_segs(dm_sector_div_up(remaining,
|
||||
(PAGE_SIZE >> SECTOR_SHIFT)));
|
||||
|
@ -355,18 +349,6 @@ static void do_region(int op, int op_flags, unsigned region,
|
|||
num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
|
||||
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
|
||||
remaining -= num_sectors;
|
||||
} else if (op == REQ_OP_WRITE_SAME) {
|
||||
/*
|
||||
* WRITE SAME only uses a single page.
|
||||
*/
|
||||
dp->get_page(dp, &page, &len, &offset);
|
||||
bio_add_page(bio, page, logical_block_size, offset);
|
||||
num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
|
||||
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
|
||||
|
||||
offset = 0;
|
||||
remaining -= num_sectors;
|
||||
dp->next_page(dp);
|
||||
} else while (remaining) {
|
||||
/*
|
||||
* Try and add as many pages as possible.
|
||||
|
|
|
@ -60,7 +60,6 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
ti->num_flush_bios = 1;
|
||||
ti->num_discard_bios = 1;
|
||||
ti->num_secure_erase_bios = 1;
|
||||
ti->num_write_same_bios = 1;
|
||||
ti->num_write_zeroes_bios = 1;
|
||||
ti->private = lc;
|
||||
return 0;
|
||||
|
|
|
@ -1249,7 +1249,6 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|||
|
||||
ti->num_flush_bios = 1;
|
||||
ti->num_discard_bios = 1;
|
||||
ti->num_write_same_bios = 1;
|
||||
ti->num_write_zeroes_bios = 1;
|
||||
if (m->queue_mode == DM_TYPE_BIO_BASED)
|
||||
ti->per_io_data_size = multipath_per_bio_data_size();
|
||||
|
|
|
@ -217,9 +217,6 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
|
|||
if (req_op(clone) == REQ_OP_DISCARD &&
|
||||
!clone->q->limits.max_discard_sectors)
|
||||
disable_discard(tio->md);
|
||||
else if (req_op(clone) == REQ_OP_WRITE_SAME &&
|
||||
!clone->q->limits.max_write_same_sectors)
|
||||
disable_write_same(tio->md);
|
||||
else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
|
||||
!clone->q->limits.max_write_zeroes_sectors)
|
||||
disable_write_zeroes(tio->md);
|
||||
|
|
|
@ -157,7 +157,6 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
ti->num_flush_bios = stripes;
|
||||
ti->num_discard_bios = stripes;
|
||||
ti->num_secure_erase_bios = stripes;
|
||||
ti->num_write_same_bios = stripes;
|
||||
ti->num_write_zeroes_bios = stripes;
|
||||
|
||||
sc->chunk_size = chunk_size;
|
||||
|
@ -284,8 +283,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
|
|||
}
|
||||
if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
|
||||
unlikely(bio_op(bio) == REQ_OP_SECURE_ERASE) ||
|
||||
unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES) ||
|
||||
unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) {
|
||||
unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES)) {
|
||||
target_bio_nr = dm_bio_get_target_bio_nr(bio);
|
||||
BUG_ON(target_bio_nr >= sc->stripes);
|
||||
return stripe_map_range(sc, bio, target_bio_nr);
|
||||
|
|
|
@ -1833,33 +1833,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
|
|||
return !blk_queue_add_random(q);
|
||||
}
|
||||
|
||||
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||
sector_t start, sector_t len, void *data)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||
|
||||
return !q->limits.max_write_same_sectors;
|
||||
}
|
||||
|
||||
static bool dm_table_supports_write_same(struct dm_table *t)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < dm_table_get_num_targets(t); i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
|
||||
if (!ti->num_write_same_bios)
|
||||
return false;
|
||||
|
||||
if (!ti->type->iterate_devices ||
|
||||
ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||
sector_t start, sector_t len, void *data)
|
||||
{
|
||||
|
@ -2038,8 +2011,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
|||
else
|
||||
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
|
||||
|
||||
if (!dm_table_supports_write_same(t))
|
||||
q->limits.max_write_same_sectors = 0;
|
||||
if (!dm_table_supports_write_zeroes(t))
|
||||
q->limits.max_write_zeroes_sectors = 0;
|
||||
|
||||
|
|
|
@ -130,7 +130,6 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
|
|||
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
case REQ_OP_WRITE:
|
||||
return !op_is_flush(bio->bi_opf) && bio_sectors(bio);
|
||||
default:
|
||||
|
@ -390,7 +389,6 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
|
|||
case REQ_OP_ZONE_FINISH:
|
||||
return true;
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
case REQ_OP_WRITE:
|
||||
/* Writes must be aligned to the zone write pointer */
|
||||
if ((clone->bi_iter.bi_sector & (zsectors - 1)) != zwp_offset)
|
||||
|
@ -446,7 +444,6 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
|
|||
blk_queue_zone_sectors(md->queue));
|
||||
return BLK_STS_OK;
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
case REQ_OP_WRITE:
|
||||
WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors);
|
||||
return BLK_STS_OK;
|
||||
|
@ -503,7 +500,6 @@ static bool dm_need_zone_wp_tracking(struct bio *orig_bio)
|
|||
return false;
|
||||
switch (bio_op(orig_bio)) {
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
case REQ_OP_WRITE:
|
||||
case REQ_OP_ZONE_RESET:
|
||||
case REQ_OP_ZONE_FINISH:
|
||||
|
|
|
@ -952,14 +952,6 @@ void disable_discard(struct mapped_device *md)
|
|||
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
|
||||
}
|
||||
|
||||
void disable_write_same(struct mapped_device *md)
|
||||
{
|
||||
struct queue_limits *limits = dm_get_queue_limits(md);
|
||||
|
||||
/* device doesn't really support WRITE SAME, disable it */
|
||||
limits->max_write_same_sectors = 0;
|
||||
}
|
||||
|
||||
void disable_write_zeroes(struct mapped_device *md)
|
||||
{
|
||||
struct queue_limits *limits = dm_get_queue_limits(md);
|
||||
|
@ -986,9 +978,6 @@ static void clone_endio(struct bio *bio)
|
|||
if (bio_op(bio) == REQ_OP_DISCARD &&
|
||||
!q->limits.max_discard_sectors)
|
||||
disable_discard(md);
|
||||
else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
|
||||
!q->limits.max_write_same_sectors)
|
||||
disable_write_same(md);
|
||||
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
|
||||
!q->limits.max_write_zeroes_sectors)
|
||||
disable_write_zeroes(md);
|
||||
|
@ -1429,7 +1418,6 @@ static bool is_abnormal_io(struct bio *bio)
|
|||
switch (bio_op(bio)) {
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
r = true;
|
||||
break;
|
||||
|
@ -1450,9 +1438,6 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
|
|||
case REQ_OP_SECURE_ERASE:
|
||||
num_bios = ti->num_secure_erase_bios;
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
num_bios = ti->num_write_same_bios;
|
||||
break;
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
num_bios = ti->num_write_zeroes_bios;
|
||||
break;
|
||||
|
|
|
@ -259,7 +259,6 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
|
|||
if (mddev->gendisk)
|
||||
trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
|
||||
bio_sector);
|
||||
mddev_check_writesame(mddev, bio);
|
||||
mddev_check_write_zeroes(mddev, bio);
|
||||
submit_bio_noacct(bio);
|
||||
}
|
||||
|
|
|
@ -127,7 +127,6 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
|
|||
mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
|
||||
mp_bh->bio.bi_end_io = multipath_end_request;
|
||||
mp_bh->bio.bi_private = mp_bh;
|
||||
mddev_check_writesame(mddev, &mp_bh->bio);
|
||||
mddev_check_write_zeroes(mddev, &mp_bh->bio);
|
||||
submit_bio_noacct(&mp_bh->bio);
|
||||
return true;
|
||||
|
|
|
@ -797,13 +797,6 @@ static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
|
|||
mddev->flags &= ~unsupported_flags;
|
||||
}
|
||||
|
||||
static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
|
||||
{
|
||||
if (bio_op(bio) == REQ_OP_WRITE_SAME &&
|
||||
!bio->bi_bdev->bd_disk->queue->limits.max_write_same_sectors)
|
||||
mddev->queue->limits.max_write_same_sectors = 0;
|
||||
}
|
||||
|
||||
static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
|
||||
{
|
||||
if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
|
||||
|
|
|
@ -402,7 +402,6 @@ static int raid0_run(struct mddev *mddev)
|
|||
bool discard_supported = false;
|
||||
|
||||
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
|
||||
blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
|
||||
blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
|
||||
blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
|
||||
|
||||
|
@ -594,7 +593,6 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
|
|||
if (mddev->gendisk)
|
||||
trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
|
||||
bio_sector);
|
||||
mddev_check_writesame(mddev, bio);
|
||||
mddev_check_write_zeroes(mddev, bio);
|
||||
submit_bio_noacct(bio);
|
||||
return true;
|
||||
|
|
|
@ -3135,10 +3135,8 @@ static int raid1_run(struct mddev *mddev)
|
|||
if (IS_ERR(conf))
|
||||
return PTR_ERR(conf);
|
||||
|
||||
if (mddev->queue) {
|
||||
blk_queue_max_write_same_sectors(mddev->queue, 0);
|
||||
if (mddev->queue)
|
||||
blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
|
||||
}
|
||||
|
||||
rdev_for_each(rdev, mddev) {
|
||||
if (!mddev->gendisk)
|
||||
|
|
|
@ -4102,7 +4102,6 @@ static int raid10_run(struct mddev *mddev)
|
|||
if (mddev->queue) {
|
||||
blk_queue_max_discard_sectors(mddev->queue,
|
||||
UINT_MAX);
|
||||
blk_queue_max_write_same_sectors(mddev->queue, 0);
|
||||
blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
|
||||
blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
|
||||
raid10_set_io_opt(conf);
|
||||
|
|
|
@ -7758,7 +7758,6 @@ static int raid5_run(struct mddev *mddev)
|
|||
mddev->queue->limits.discard_alignment = stripe;
|
||||
mddev->queue->limits.discard_granularity = stripe;
|
||||
|
||||
blk_queue_max_write_same_sectors(mddev->queue, 0);
|
||||
blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
|
||||
|
||||
rdev_for_each(rdev, mddev) {
|
||||
|
|
|
@ -3680,7 +3680,7 @@ mptsas_expander_add(MPT_ADAPTER *ioc, u16 handle)
|
|||
MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)))
|
||||
return NULL;
|
||||
|
||||
port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_ATOMIC);
|
||||
port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
|
||||
if (!port_info) {
|
||||
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
|
||||
"%s: exit at line=%d\n", ioc->name,
|
||||
|
|
|
@ -1493,7 +1493,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
/* SCSI needs scsi_cmnd lookup table!
|
||||
* (with size equal to req_depth*PtrSz!)
|
||||
*/
|
||||
ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_ATOMIC);
|
||||
ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_KERNEL);
|
||||
if (!ioc->ScsiLookup) {
|
||||
error = -ENOMEM;
|
||||
goto out_mptspi_probe;
|
||||
|
|
|
@ -1791,8 +1791,6 @@ static int NCR_700_queuecommand_lck(struct scsi_cmnd *SCp)
|
|||
slot->cmnd = SCp;
|
||||
|
||||
SCp->host_scribble = (unsigned char *)slot;
|
||||
SCp->SCp.ptr = NULL;
|
||||
SCp->SCp.buffer = NULL;
|
||||
|
||||
#ifdef NCR_700_DEBUG
|
||||
printk("53c700: scsi%d, command ", SCp->device->host->host_no);
|
||||
|
|
|
@ -84,8 +84,7 @@
|
|||
* On command termination, the done function will be called as
|
||||
* appropriate.
|
||||
*
|
||||
* SCSI pointers are maintained in the SCp field of SCSI command
|
||||
* structures, being initialized after the command is connected
|
||||
* The command data pointer is initialized after the command is connected
|
||||
* in NCR5380_select, and set as appropriate in NCR5380_information_transfer.
|
||||
* Note that in violation of the standard, an implicit SAVE POINTERS operation
|
||||
* is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS.
|
||||
|
@ -145,40 +144,38 @@ static void bus_reset_cleanup(struct Scsi_Host *);
|
|||
|
||||
static inline void initialize_SCp(struct scsi_cmnd *cmd)
|
||||
{
|
||||
/*
|
||||
* Initialize the Scsi Pointer field so that all of the commands in the
|
||||
* various queues are valid.
|
||||
*/
|
||||
struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
|
||||
|
||||
if (scsi_bufflen(cmd)) {
|
||||
cmd->SCp.buffer = scsi_sglist(cmd);
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
ncmd->buffer = scsi_sglist(cmd);
|
||||
ncmd->ptr = sg_virt(ncmd->buffer);
|
||||
ncmd->this_residual = ncmd->buffer->length;
|
||||
} else {
|
||||
cmd->SCp.buffer = NULL;
|
||||
cmd->SCp.ptr = NULL;
|
||||
cmd->SCp.this_residual = 0;
|
||||
ncmd->buffer = NULL;
|
||||
ncmd->ptr = NULL;
|
||||
ncmd->this_residual = 0;
|
||||
}
|
||||
|
||||
cmd->SCp.Status = 0;
|
||||
cmd->SCp.Message = 0;
|
||||
ncmd->status = 0;
|
||||
ncmd->message = 0;
|
||||
}
|
||||
|
||||
static inline void advance_sg_buffer(struct scsi_cmnd *cmd)
|
||||
static inline void advance_sg_buffer(struct NCR5380_cmd *ncmd)
|
||||
{
|
||||
struct scatterlist *s = cmd->SCp.buffer;
|
||||
struct scatterlist *s = ncmd->buffer;
|
||||
|
||||
if (!cmd->SCp.this_residual && s && !sg_is_last(s)) {
|
||||
cmd->SCp.buffer = sg_next(s);
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
if (!ncmd->this_residual && s && !sg_is_last(s)) {
|
||||
ncmd->buffer = sg_next(s);
|
||||
ncmd->ptr = sg_virt(ncmd->buffer);
|
||||
ncmd->this_residual = ncmd->buffer->length;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void set_resid_from_SCp(struct scsi_cmnd *cmd)
|
||||
{
|
||||
int resid = cmd->SCp.this_residual;
|
||||
struct scatterlist *s = cmd->SCp.buffer;
|
||||
struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
|
||||
int resid = ncmd->this_residual;
|
||||
struct scatterlist *s = ncmd->buffer;
|
||||
|
||||
if (s)
|
||||
while (!sg_is_last(s)) {
|
||||
|
@ -564,7 +561,7 @@ static int NCR5380_queue_command(struct Scsi_Host *instance,
|
|||
struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct NCR5380_hostdata *hostdata = shost_priv(instance);
|
||||
struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
|
||||
struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
|
||||
unsigned long flags;
|
||||
|
||||
#if (NDEBUG & NDEBUG_NO_WRITE)
|
||||
|
@ -672,7 +669,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
|
|||
static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct NCR5380_hostdata *hostdata = shost_priv(instance);
|
||||
struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
|
||||
struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
|
||||
|
||||
if (hostdata->sensing == cmd) {
|
||||
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
|
||||
|
@ -757,6 +754,7 @@ static void NCR5380_main(struct work_struct *work)
|
|||
static void NCR5380_dma_complete(struct Scsi_Host *instance)
|
||||
{
|
||||
struct NCR5380_hostdata *hostdata = shost_priv(instance);
|
||||
struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(hostdata->connected);
|
||||
int transferred;
|
||||
unsigned char **data;
|
||||
int *count;
|
||||
|
@ -764,7 +762,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
|
|||
unsigned char p;
|
||||
|
||||
if (hostdata->read_overruns) {
|
||||
p = hostdata->connected->SCp.phase;
|
||||
p = ncmd->phase;
|
||||
if (p & SR_IO) {
|
||||
udelay(10);
|
||||
if ((NCR5380_read(BUS_AND_STATUS_REG) &
|
||||
|
@ -801,8 +799,8 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
|
|||
transferred = hostdata->dma_len - NCR5380_dma_residual(hostdata);
|
||||
hostdata->dma_len = 0;
|
||||
|
||||
data = (unsigned char **)&hostdata->connected->SCp.ptr;
|
||||
count = &hostdata->connected->SCp.this_residual;
|
||||
data = (unsigned char **)&ncmd->ptr;
|
||||
count = &ncmd->this_residual;
|
||||
*data += transferred;
|
||||
*count -= transferred;
|
||||
|
||||
|
@ -1498,7 +1496,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
|
|||
return -1;
|
||||
}
|
||||
|
||||
hostdata->connected->SCp.phase = p;
|
||||
NCR5380_to_ncmd(hostdata->connected)->phase = p;
|
||||
|
||||
if (p & SR_IO) {
|
||||
if (hostdata->read_overruns)
|
||||
|
@ -1690,7 +1688,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
|||
#endif
|
||||
|
||||
while ((cmd = hostdata->connected)) {
|
||||
struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
|
||||
struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
|
||||
|
||||
tmp = NCR5380_read(STATUS_REG);
|
||||
/* We only have a valid SCSI phase when REQ is asserted */
|
||||
|
@ -1705,17 +1703,17 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
|||
sun3_dma_setup_done != cmd) {
|
||||
int count;
|
||||
|
||||
advance_sg_buffer(cmd);
|
||||
advance_sg_buffer(ncmd);
|
||||
|
||||
count = sun3scsi_dma_xfer_len(hostdata, cmd);
|
||||
|
||||
if (count > 0) {
|
||||
if (cmd->sc_data_direction == DMA_TO_DEVICE)
|
||||
sun3scsi_dma_send_setup(hostdata,
|
||||
cmd->SCp.ptr, count);
|
||||
ncmd->ptr, count);
|
||||
else
|
||||
sun3scsi_dma_recv_setup(hostdata,
|
||||
cmd->SCp.ptr, count);
|
||||
ncmd->ptr, count);
|
||||
sun3_dma_setup_done = cmd;
|
||||
}
|
||||
#ifdef SUN3_SCSI_VME
|
||||
|
@ -1755,11 +1753,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
|||
* scatter-gather list, move onto the next one.
|
||||
*/
|
||||
|
||||
advance_sg_buffer(cmd);
|
||||
advance_sg_buffer(ncmd);
|
||||
dsprintk(NDEBUG_INFORMATION, instance,
|
||||
"this residual %d, sg ents %d\n",
|
||||
cmd->SCp.this_residual,
|
||||
sg_nents(cmd->SCp.buffer));
|
||||
ncmd->this_residual,
|
||||
sg_nents(ncmd->buffer));
|
||||
|
||||
/*
|
||||
* The preferred transfer method is going to be
|
||||
|
@ -1778,7 +1776,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
|||
if (transfersize > 0) {
|
||||
len = transfersize;
|
||||
if (NCR5380_transfer_dma(instance, &phase,
|
||||
&len, (unsigned char **)&cmd->SCp.ptr)) {
|
||||
&len, (unsigned char **)&ncmd->ptr)) {
|
||||
/*
|
||||
* If the watchdog timer fires, all future
|
||||
* accesses to this device will use the
|
||||
|
@ -1794,13 +1792,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
|||
/* Transfer a small chunk so that the
|
||||
* irq mode lock is not held too long.
|
||||
*/
|
||||
transfersize = min(cmd->SCp.this_residual,
|
||||
transfersize = min(ncmd->this_residual,
|
||||
NCR5380_PIO_CHUNK_SIZE);
|
||||
len = transfersize;
|
||||
NCR5380_transfer_pio(instance, &phase, &len,
|
||||
(unsigned char **)&cmd->SCp.ptr,
|
||||
(unsigned char **)&ncmd->ptr,
|
||||
0);
|
||||
cmd->SCp.this_residual -= transfersize - len;
|
||||
ncmd->this_residual -= transfersize - len;
|
||||
}
|
||||
#ifdef CONFIG_SUN3
|
||||
if (sun3_dma_setup_done == cmd)
|
||||
|
@ -1811,7 +1809,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
|||
len = 1;
|
||||
data = &tmp;
|
||||
NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
|
||||
cmd->SCp.Message = tmp;
|
||||
ncmd->message = tmp;
|
||||
|
||||
switch (tmp) {
|
||||
case ABORT:
|
||||
|
@ -1828,15 +1826,15 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
|||
hostdata->connected = NULL;
|
||||
hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
|
||||
|
||||
set_status_byte(cmd, cmd->SCp.Status);
|
||||
set_status_byte(cmd, ncmd->status);
|
||||
|
||||
set_resid_from_SCp(cmd);
|
||||
|
||||
if (cmd->cmnd[0] == REQUEST_SENSE)
|
||||
complete_cmd(instance, cmd);
|
||||
else {
|
||||
if (cmd->SCp.Status == SAM_STAT_CHECK_CONDITION ||
|
||||
cmd->SCp.Status == SAM_STAT_COMMAND_TERMINATED) {
|
||||
if (ncmd->status == SAM_STAT_CHECK_CONDITION ||
|
||||
ncmd->status == SAM_STAT_COMMAND_TERMINATED) {
|
||||
dsprintk(NDEBUG_QUEUES, instance, "autosense: adding cmd %p to tail of autosense queue\n",
|
||||
cmd);
|
||||
list_add_tail(&ncmd->list,
|
||||
|
@ -2000,7 +1998,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
|||
len = 1;
|
||||
data = &tmp;
|
||||
NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
|
||||
cmd->SCp.Status = tmp;
|
||||
ncmd->status = tmp;
|
||||
break;
|
||||
default:
|
||||
shost_printk(KERN_ERR, instance, "unknown phase\n");
|
||||
|
@ -2153,17 +2151,17 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
|
|||
if (sun3_dma_setup_done != tmp) {
|
||||
int count;
|
||||
|
||||
advance_sg_buffer(tmp);
|
||||
advance_sg_buffer(ncmd);
|
||||
|
||||
count = sun3scsi_dma_xfer_len(hostdata, tmp);
|
||||
|
||||
if (count > 0) {
|
||||
if (tmp->sc_data_direction == DMA_TO_DEVICE)
|
||||
sun3scsi_dma_send_setup(hostdata,
|
||||
tmp->SCp.ptr, count);
|
||||
ncmd->ptr, count);
|
||||
else
|
||||
sun3scsi_dma_recv_setup(hostdata,
|
||||
tmp->SCp.ptr, count);
|
||||
ncmd->ptr, count);
|
||||
sun3_dma_setup_done = tmp;
|
||||
}
|
||||
}
|
||||
|
@ -2206,7 +2204,7 @@ static bool list_del_cmd(struct list_head *haystack,
|
|||
struct scsi_cmnd *needle)
|
||||
{
|
||||
if (list_find_cmd(haystack, needle)) {
|
||||
struct NCR5380_cmd *ncmd = scsi_cmd_priv(needle);
|
||||
struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(needle);
|
||||
|
||||
list_del(&ncmd->list);
|
||||
return true;
|
||||
|
|
|
@ -227,11 +227,15 @@ struct NCR5380_hostdata {
|
|||
};
|
||||
|
||||
struct NCR5380_cmd {
|
||||
char *ptr;
|
||||
int this_residual;
|
||||
struct scatterlist *buffer;
|
||||
int status;
|
||||
int message;
|
||||
int phase;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
#define NCR5380_CMD_SIZE (sizeof(struct NCR5380_cmd))
|
||||
|
||||
#define NCR5380_PIO_CHUNK_SIZE 256
|
||||
|
||||
/* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */
|
||||
|
@ -242,6 +246,11 @@ static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
|
|||
return ((struct scsi_cmnd *)ncmd_ptr) - 1;
|
||||
}
|
||||
|
||||
static inline struct NCR5380_cmd *NCR5380_to_ncmd(struct scsi_cmnd *cmd)
|
||||
{
|
||||
return scsi_cmd_priv(cmd);
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
#define NDEBUG (0)
|
||||
#endif
|
||||
|
|
|
@ -12,7 +12,11 @@
|
|||
#include <asm/amigaints.h>
|
||||
#include <asm/amigahw.h>
|
||||
|
||||
#include "scsi.h"
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include "wd33c93.h"
|
||||
#include "a2091.h"
|
||||
|
||||
|
@ -40,16 +44,17 @@ static irqreturn_t a2091_intr(int irq, void *data)
|
|||
|
||||
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
|
||||
struct Scsi_Host *instance = cmd->device->host;
|
||||
struct a2091_hostdata *hdata = shost_priv(instance);
|
||||
struct WD33C93_hostdata *wh = &hdata->wh;
|
||||
struct a2091_scsiregs *regs = hdata->regs;
|
||||
unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
|
||||
unsigned long addr = virt_to_bus(cmd->SCp.ptr);
|
||||
unsigned long addr = virt_to_bus(scsi_pointer->ptr);
|
||||
|
||||
/* don't allow DMA if the physical address is bad */
|
||||
if (addr & A2091_XFER_MASK) {
|
||||
wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
|
||||
wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
|
||||
wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
|
||||
GFP_KERNEL);
|
||||
|
||||
|
@ -73,8 +78,8 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
|||
|
||||
if (!dir_in) {
|
||||
/* copy to bounce buffer for a write */
|
||||
memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
|
||||
cmd->SCp.this_residual);
|
||||
memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
|
||||
scsi_pointer->this_residual);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -92,10 +97,10 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
|||
|
||||
if (dir_in) {
|
||||
/* invalidate any cache */
|
||||
cache_clear(addr, cmd->SCp.this_residual);
|
||||
cache_clear(addr, scsi_pointer->this_residual);
|
||||
} else {
|
||||
/* push any dirty cache */
|
||||
cache_push(addr, cmd->SCp.this_residual);
|
||||
cache_push(addr, scsi_pointer->this_residual);
|
||||
}
|
||||
/* start DMA */
|
||||
regs->ST_DMA = 1;
|
||||
|
@ -107,6 +112,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
|||
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
|
||||
int status)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt);
|
||||
struct a2091_hostdata *hdata = shost_priv(instance);
|
||||
struct WD33C93_hostdata *wh = &hdata->wh;
|
||||
struct a2091_scsiregs *regs = hdata->regs;
|
||||
|
@ -139,8 +145,8 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
|
|||
/* copy from a bounce buffer, if necessary */
|
||||
if (status && wh->dma_bounce_buffer) {
|
||||
if (wh->dma_dir)
|
||||
memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
|
||||
SCpnt->SCp.this_residual);
|
||||
memcpy(scsi_pointer->ptr, wh->dma_bounce_buffer,
|
||||
scsi_pointer->this_residual);
|
||||
kfree(wh->dma_bounce_buffer);
|
||||
wh->dma_bounce_buffer = NULL;
|
||||
wh->dma_bounce_len = 0;
|
||||
|
@ -161,6 +167,7 @@ static struct scsi_host_template a2091_scsi_template = {
|
|||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = CMD_PER_LUN,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.cmd_size = sizeof(struct scsi_pointer),
|
||||
};
|
||||
|
||||
static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
|
||||
|
|
|
@ -13,7 +13,11 @@
|
|||
#include <asm/amigaints.h>
|
||||
#include <asm/amigahw.h>
|
||||
|
||||
#include "scsi.h"
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include "wd33c93.h"
|
||||
#include "a3000.h"
|
||||
|
||||
|
@ -44,12 +48,13 @@ static irqreturn_t a3000_intr(int irq, void *data)
|
|||
|
||||
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
|
||||
struct Scsi_Host *instance = cmd->device->host;
|
||||
struct a3000_hostdata *hdata = shost_priv(instance);
|
||||
struct WD33C93_hostdata *wh = &hdata->wh;
|
||||
struct a3000_scsiregs *regs = hdata->regs;
|
||||
unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
|
||||
unsigned long addr = virt_to_bus(cmd->SCp.ptr);
|
||||
unsigned long addr = virt_to_bus(scsi_pointer->ptr);
|
||||
|
||||
/*
|
||||
* if the physical address has the wrong alignment, or if
|
||||
|
@ -58,7 +63,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
|||
* buffer
|
||||
*/
|
||||
if (addr & A3000_XFER_MASK) {
|
||||
wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
|
||||
wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
|
||||
wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
|
||||
GFP_KERNEL);
|
||||
|
||||
|
@ -70,8 +75,8 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
|||
|
||||
if (!dir_in) {
|
||||
/* copy to bounce buffer for a write */
|
||||
memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
|
||||
cmd->SCp.this_residual);
|
||||
memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
|
||||
scsi_pointer->this_residual);
|
||||
}
|
||||
|
||||
addr = virt_to_bus(wh->dma_bounce_buffer);
|
||||
|
@ -91,10 +96,10 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
|||
|
||||
if (dir_in) {
|
||||
/* invalidate any cache */
|
||||
cache_clear(addr, cmd->SCp.this_residual);
|
||||
cache_clear(addr, scsi_pointer->this_residual);
|
||||
} else {
|
||||
/* push any dirty cache */
|
||||
cache_push(addr, cmd->SCp.this_residual);
|
||||
cache_push(addr, scsi_pointer->this_residual);
|
||||
}
|
||||
|
||||
/* start DMA */
|
||||
|
@ -109,6 +114,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
|||
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
|
||||
int status)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt);
|
||||
struct a3000_hostdata *hdata = shost_priv(instance);
|
||||
struct WD33C93_hostdata *wh = &hdata->wh;
|
||||
struct a3000_scsiregs *regs = hdata->regs;
|
||||
|
@ -149,8 +155,8 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
|
|||
if (status && wh->dma_bounce_buffer) {
|
||||
if (SCpnt) {
|
||||
if (wh->dma_dir && SCpnt)
|
||||
memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
|
||||
SCpnt->SCp.this_residual);
|
||||
memcpy(scsi_pointer->ptr, wh->dma_bounce_buffer,
|
||||
scsi_pointer->this_residual);
|
||||
kfree(wh->dma_bounce_buffer);
|
||||
wh->dma_bounce_buffer = NULL;
|
||||
wh->dma_bounce_len = 0;
|
||||
|
@ -175,6 +181,7 @@ static struct scsi_host_template amiga_a3000_scsi_template = {
|
|||
.this_id = 7,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = CMD_PER_LUN,
|
||||
.cmd_size = sizeof(struct scsi_pointer),
|
||||
};
|
||||
|
||||
static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -338,7 +338,7 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
|
|||
aac_fib_complete(fibptr);
|
||||
return 0;
|
||||
}
|
||||
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
|
||||
aac_priv(scsicmd)->owner = AAC_OWNER_MIDLEVEL;
|
||||
device = scsicmd->device;
|
||||
if (unlikely(!device)) {
|
||||
dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
|
||||
|
@ -592,7 +592,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
|
|||
|
||||
aac_fib_init(cmd_fibcontext);
|
||||
dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
|
||||
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
|
||||
aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
|
||||
|
||||
dinfo->command = cpu_to_le32(VM_ContainerConfig);
|
||||
dinfo->type = cpu_to_le32(CT_READ_NAME);
|
||||
|
@ -634,14 +634,15 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
|
|||
{
|
||||
struct fsa_dev_info *fsa_dev_ptr;
|
||||
int (*callback)(struct scsi_cmnd *);
|
||||
struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
|
||||
struct scsi_cmnd *scsicmd = context;
|
||||
struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd);
|
||||
int i;
|
||||
|
||||
|
||||
if (!aac_valid_context(scsicmd, fibptr))
|
||||
return;
|
||||
|
||||
scsicmd->SCp.Status = 0;
|
||||
cmd_priv->status = 0;
|
||||
fsa_dev_ptr = fibptr->dev->fsa_dev;
|
||||
if (fsa_dev_ptr) {
|
||||
struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
|
||||
|
@ -679,12 +680,12 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
|
|||
}
|
||||
if ((fsa_dev_ptr->valid & 1) == 0)
|
||||
fsa_dev_ptr->valid = 0;
|
||||
scsicmd->SCp.Status = le32_to_cpu(dresp->count);
|
||||
cmd_priv->status = le32_to_cpu(dresp->count);
|
||||
}
|
||||
aac_fib_complete(fibptr);
|
||||
aac_fib_free(fibptr);
|
||||
callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
|
||||
scsicmd->SCp.ptr = NULL;
|
||||
callback = cmd_priv->callback;
|
||||
cmd_priv->callback = NULL;
|
||||
(*callback)(scsicmd);
|
||||
return;
|
||||
}
|
||||
|
@ -722,7 +723,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
|
|||
|
||||
dinfo->count = cpu_to_le32(scmd_id(scsicmd));
|
||||
dinfo->type = cpu_to_le32(FT_FILESYS);
|
||||
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
|
||||
aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
|
||||
|
||||
status = aac_fib_send(ContainerCommand,
|
||||
fibptr,
|
||||
|
@ -743,6 +744,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
|
|||
|
||||
static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
|
||||
{
|
||||
struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd);
|
||||
struct fib * fibptr;
|
||||
int status = -ENOMEM;
|
||||
|
||||
|
@ -761,8 +763,8 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
|
|||
|
||||
dinfo->count = cpu_to_le32(scmd_id(scsicmd));
|
||||
dinfo->type = cpu_to_le32(FT_FILESYS);
|
||||
scsicmd->SCp.ptr = (char *)callback;
|
||||
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
|
||||
cmd_priv->callback = callback;
|
||||
cmd_priv->owner = AAC_OWNER_FIRMWARE;
|
||||
|
||||
status = aac_fib_send(ContainerCommand,
|
||||
fibptr,
|
||||
|
@ -778,7 +780,7 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
|
|||
return 0;
|
||||
|
||||
if (status < 0) {
|
||||
scsicmd->SCp.ptr = NULL;
|
||||
cmd_priv->callback = NULL;
|
||||
aac_fib_complete(fibptr);
|
||||
aac_fib_free(fibptr);
|
||||
}
|
||||
|
@ -817,6 +819,7 @@ static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd)
|
|||
int aac_probe_container(struct aac_dev *dev, int cid)
|
||||
{
|
||||
struct scsi_cmnd *scsicmd = kzalloc(sizeof(*scsicmd), GFP_KERNEL);
|
||||
struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd);
|
||||
struct scsi_device *scsidev = kzalloc(sizeof(*scsidev), GFP_KERNEL);
|
||||
int status;
|
||||
|
||||
|
@ -835,7 +838,7 @@ int aac_probe_container(struct aac_dev *dev, int cid)
|
|||
while (scsicmd->device == scsidev)
|
||||
schedule();
|
||||
kfree(scsidev);
|
||||
status = scsicmd->SCp.Status;
|
||||
status = cmd_priv->status;
|
||||
kfree(scsicmd);
|
||||
return status;
|
||||
}
|
||||
|
@ -1128,7 +1131,7 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
|
|||
dinfo->command = cpu_to_le32(VM_ContainerConfig);
|
||||
dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
|
||||
dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
|
||||
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
|
||||
aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
|
||||
|
||||
status = aac_fib_send(ContainerCommand,
|
||||
cmd_fibcontext,
|
||||
|
@ -2486,7 +2489,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
|
|||
* Alocate and initialize a Fib
|
||||
*/
|
||||
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
|
||||
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
|
||||
aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
|
||||
status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
|
||||
|
||||
/*
|
||||
|
@ -2577,7 +2580,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
|
|||
* Allocate and initialize a Fib then setup a BlockWrite command
|
||||
*/
|
||||
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
|
||||
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
|
||||
aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
|
||||
status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
|
||||
|
||||
/*
|
||||
|
@ -2660,7 +2663,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
|
|||
synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
|
||||
synchronizecmd->count =
|
||||
cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
|
||||
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
|
||||
aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
|
||||
|
||||
/*
|
||||
* Now send the Fib to the adapter
|
||||
|
@ -2736,7 +2739,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
|
|||
pmcmd->cid = cpu_to_le32(sdev_id(sdev));
|
||||
pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
|
||||
cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
|
||||
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
|
||||
aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
|
||||
|
||||
/*
|
||||
* Now send the Fib to the adapter
|
||||
|
@ -3695,7 +3698,7 @@ out:
|
|||
aac_fib_complete(fibptr);
|
||||
|
||||
if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)
|
||||
scsicmd->SCp.sent_command = 1;
|
||||
aac_priv(scsicmd)->sent_command = 1;
|
||||
else
|
||||
aac_scsi_done(scsicmd);
|
||||
}
|
||||
|
@ -3725,7 +3728,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
|
|||
* Allocate and initialize a Fib then setup a BlockWrite command
|
||||
*/
|
||||
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
|
||||
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
|
||||
aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
|
||||
status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
|
||||
|
||||
/*
|
||||
|
@ -3769,7 +3772,7 @@ static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
|
|||
if (!cmd_fibcontext)
|
||||
return -1;
|
||||
|
||||
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
|
||||
aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
|
||||
status = aac_adapter_hba(cmd_fibcontext, scsicmd);
|
||||
|
||||
/*
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/completion.h>
|
||||
#include <linux/pci.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
|
||||
/*------------------------------------------------------------------------------
|
||||
* D E F I N E S
|
||||
|
@ -2673,11 +2674,24 @@ static inline void aac_cancel_rescan_worker(struct aac_dev *dev)
|
|||
cancel_delayed_work_sync(&dev->src_reinit_aif_worker);
|
||||
}
|
||||
|
||||
/* SCp.phase values */
|
||||
#define AAC_OWNER_MIDLEVEL 0x101
|
||||
#define AAC_OWNER_LOWLEVEL 0x102
|
||||
#define AAC_OWNER_ERROR_HANDLER 0x103
|
||||
#define AAC_OWNER_FIRMWARE 0x106
|
||||
enum aac_cmd_owner {
|
||||
AAC_OWNER_MIDLEVEL = 0x101,
|
||||
AAC_OWNER_LOWLEVEL = 0x102,
|
||||
AAC_OWNER_ERROR_HANDLER = 0x103,
|
||||
AAC_OWNER_FIRMWARE = 0x106,
|
||||
};
|
||||
|
||||
struct aac_cmd_priv {
|
||||
int (*callback)(struct scsi_cmnd *);
|
||||
int status;
|
||||
enum aac_cmd_owner owner;
|
||||
bool sent_command;
|
||||
};
|
||||
|
||||
static inline struct aac_cmd_priv *aac_priv(struct scsi_cmnd *cmd)
|
||||
{
|
||||
return scsi_cmd_priv(cmd);
|
||||
}
|
||||
|
||||
void aac_safw_rescan_worker(struct work_struct *work);
|
||||
void aac_src_reinit_aif_worker(struct work_struct *work);
|
||||
|
|
|
@ -276,7 +276,7 @@ static bool wait_for_io_iter(struct scsi_cmnd *cmd, void *data, bool rsvd)
|
|||
{
|
||||
int *active = data;
|
||||
|
||||
if (cmd->SCp.phase == AAC_OWNER_FIRMWARE)
|
||||
if (aac_priv(cmd)->owner == AAC_OWNER_FIRMWARE)
|
||||
*active = *active + 1;
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -241,10 +241,9 @@ static struct aac_driver_ident aac_drivers[] = {
|
|||
static int aac_queuecommand(struct Scsi_Host *shost,
|
||||
struct scsi_cmnd *cmd)
|
||||
{
|
||||
int r = 0;
|
||||
cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
|
||||
r = (aac_scsi_cmd(cmd) ? FAILED : 0);
|
||||
return r;
|
||||
aac_priv(cmd)->owner = AAC_OWNER_LOWLEVEL;
|
||||
|
||||
return aac_scsi_cmd(cmd) ? FAILED : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -638,7 +637,7 @@ static bool fib_count_iter(struct scsi_cmnd *scmnd, void *data, bool reserved)
|
|||
{
|
||||
struct fib_count_data *fib_count = data;
|
||||
|
||||
switch (scmnd->SCp.phase) {
|
||||
switch (aac_priv(scmnd)->owner) {
|
||||
case AAC_OWNER_FIRMWARE:
|
||||
fib_count->fwcnt++;
|
||||
break;
|
||||
|
@ -680,6 +679,7 @@ static int get_num_of_incomplete_fibs(struct aac_dev *aac)
|
|||
|
||||
static int aac_eh_abort(struct scsi_cmnd* cmd)
|
||||
{
|
||||
struct aac_cmd_priv *cmd_priv = aac_priv(cmd);
|
||||
struct scsi_device * dev = cmd->device;
|
||||
struct Scsi_Host * host = dev->host;
|
||||
struct aac_dev * aac = (struct aac_dev *)host->hostdata;
|
||||
|
@ -732,7 +732,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
|
|||
tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
|
||||
|
||||
fib->hbacmd_size = sizeof(*tmf);
|
||||
cmd->SCp.sent_command = 0;
|
||||
cmd_priv->sent_command = 0;
|
||||
|
||||
status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
|
||||
(fib_callback) aac_hba_callback,
|
||||
|
@ -744,7 +744,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
|
|||
}
|
||||
/* Wait up to 15 secs for completion */
|
||||
for (count = 0; count < 15; ++count) {
|
||||
if (cmd->SCp.sent_command) {
|
||||
if (cmd_priv->sent_command) {
|
||||
ret = SUCCESS;
|
||||
break;
|
||||
}
|
||||
|
@ -784,7 +784,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
|
|||
(fib->callback_data == cmd)) {
|
||||
fib->flags |=
|
||||
FIB_CONTEXT_FLAG_TIMED_OUT;
|
||||
cmd->SCp.phase =
|
||||
cmd_priv->owner =
|
||||
AAC_OWNER_ERROR_HANDLER;
|
||||
ret = SUCCESS;
|
||||
}
|
||||
|
@ -811,7 +811,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
|
|||
(command->device == cmd->device)) {
|
||||
fib->flags |=
|
||||
FIB_CONTEXT_FLAG_TIMED_OUT;
|
||||
command->SCp.phase =
|
||||
aac_priv(command)->owner =
|
||||
AAC_OWNER_ERROR_HANDLER;
|
||||
if (command == cmd)
|
||||
ret = SUCCESS;
|
||||
|
@ -864,7 +864,7 @@ static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info,
|
|||
rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
|
||||
fib->hbacmd_size = sizeof(*rst);
|
||||
|
||||
return HBA_IU_TYPE_SATA_REQ;
|
||||
return HBA_IU_TYPE_SATA_REQ;
|
||||
}
|
||||
|
||||
static void aac_tmf_callback(void *context, struct fib *fibptr)
|
||||
|
@ -1058,7 +1058,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
|
|||
if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
|
||||
info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
|
||||
fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
|
||||
cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
|
||||
aac_priv(cmd)->owner = AAC_OWNER_ERROR_HANDLER;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1507,6 +1507,7 @@ static struct scsi_host_template aac_driver_template = {
|
|||
#endif
|
||||
.emulated = 1,
|
||||
.no_write_same = 1,
|
||||
.cmd_size = sizeof(struct aac_cmd_priv),
|
||||
};
|
||||
|
||||
static void __aac_shutdown(struct aac_dev * aac)
|
||||
|
|
|
@ -2277,6 +2277,15 @@ struct asc_board {
|
|||
dvc_var.adv_dvc_var)
|
||||
#define adv_dvc_to_pdev(adv_dvc) to_pci_dev(adv_dvc_to_board(adv_dvc)->dev)
|
||||
|
||||
struct advansys_cmd {
|
||||
dma_addr_t dma_handle;
|
||||
};
|
||||
|
||||
static struct advansys_cmd *advansys_cmd(struct scsi_cmnd *cmd)
|
||||
{
|
||||
return scsi_cmd_priv(cmd);
|
||||
}
|
||||
|
||||
#ifdef ADVANSYS_DEBUG
|
||||
static int asc_dbglvl = 3;
|
||||
|
||||
|
@ -6681,7 +6690,7 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
|
|||
|
||||
ASC_STATS(boardp->shost, callback);
|
||||
|
||||
dma_unmap_single(boardp->dev, scp->SCp.dma_handle,
|
||||
dma_unmap_single(boardp->dev, advansys_cmd(scp)->dma_handle,
|
||||
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
|
||||
/*
|
||||
* 'qdonep' contains the command's ending status.
|
||||
|
@ -7399,15 +7408,15 @@ static int advansys_slave_configure(struct scsi_device *sdev)
|
|||
static __le32 asc_get_sense_buffer_dma(struct scsi_cmnd *scp)
|
||||
{
|
||||
struct asc_board *board = shost_priv(scp->device->host);
|
||||
struct advansys_cmd *acmd = advansys_cmd(scp);
|
||||
|
||||
scp->SCp.dma_handle = dma_map_single(board->dev, scp->sense_buffer,
|
||||
SCSI_SENSE_BUFFERSIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(board->dev, scp->SCp.dma_handle)) {
|
||||
acmd->dma_handle = dma_map_single(board->dev, scp->sense_buffer,
|
||||
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(board->dev, acmd->dma_handle)) {
|
||||
ASC_DBG(1, "failed to map sense buffer\n");
|
||||
return 0;
|
||||
}
|
||||
return cpu_to_le32(scp->SCp.dma_handle);
|
||||
return cpu_to_le32(acmd->dma_handle);
|
||||
}
|
||||
|
||||
static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
|
||||
|
@ -10604,6 +10613,7 @@ static struct scsi_host_template advansys_template = {
|
|||
.eh_host_reset_handler = advansys_reset,
|
||||
.bios_param = advansys_biosparam,
|
||||
.slave_configure = advansys_slave_configure,
|
||||
.cmd_size = sizeof(struct advansys_cmd),
|
||||
};
|
||||
|
||||
static int advansys_wide_init_chip(struct Scsi_Host *shost)
|
||||
|
|
|
@ -243,13 +243,16 @@
|
|||
#include <linux/workqueue.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <scsi/scsicam.h>
|
||||
|
||||
#include "scsi.h"
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_dbg.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_transport_spi.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include <scsi/scsi_transport_spi.h>
|
||||
#include <scsi/scsicam.h>
|
||||
#include "aha152x.h"
|
||||
|
||||
static LIST_HEAD(aha152x_host_list);
|
||||
|
@ -313,6 +316,17 @@ enum {
|
|||
check_condition = 0x0800, /* requesting sense after CHECK CONDITION */
|
||||
};
|
||||
|
||||
struct aha152x_cmd_priv {
|
||||
struct scsi_pointer scsi_pointer;
|
||||
};
|
||||
|
||||
static struct scsi_pointer *aha152x_scsi_pointer(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct aha152x_cmd_priv *acmd = scsi_cmd_priv(cmd);
|
||||
|
||||
return &acmd->scsi_pointer;
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Jürgen Fischer");
|
||||
MODULE_DESCRIPTION(AHA152X_REVID);
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -876,14 +890,17 @@ void aha152x_release(struct Scsi_Host *shpnt)
|
|||
static int setup_expected_interrupts(struct Scsi_Host *shpnt)
|
||||
{
|
||||
if(CURRENT_SC) {
|
||||
CURRENT_SC->SCp.phase |= 1 << 16;
|
||||
struct scsi_pointer *scsi_pointer =
|
||||
aha152x_scsi_pointer(CURRENT_SC);
|
||||
|
||||
if(CURRENT_SC->SCp.phase & selecting) {
|
||||
scsi_pointer->phase |= 1 << 16;
|
||||
|
||||
if (scsi_pointer->phase & selecting) {
|
||||
SETPORT(SSTAT1, SELTO);
|
||||
SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0));
|
||||
SETPORT(SIMODE1, ENSELTIMO);
|
||||
} else {
|
||||
SETPORT(SIMODE0, (CURRENT_SC->SCp.phase & spiordy) ? ENSPIORDY : 0);
|
||||
SETPORT(SIMODE0, (scsi_pointer->phase & spiordy) ? ENSPIORDY : 0);
|
||||
SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE);
|
||||
}
|
||||
} else if(STATE==seldi) {
|
||||
|
@ -907,16 +924,17 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
|
|||
static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
|
||||
struct completion *complete, int phase)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(SCpnt);
|
||||
struct Scsi_Host *shpnt = SCpnt->device->host;
|
||||
unsigned long flags;
|
||||
|
||||
SCpnt->SCp.phase = not_issued | phase;
|
||||
SCpnt->SCp.Status = 0x1; /* Ilegal status by SCSI standard */
|
||||
SCpnt->SCp.Message = 0;
|
||||
SCpnt->SCp.have_data_in = 0;
|
||||
SCpnt->SCp.sent_command = 0;
|
||||
scsi_pointer->phase = not_issued | phase;
|
||||
scsi_pointer->Status = 0x1; /* Ilegal status by SCSI standard */
|
||||
scsi_pointer->Message = 0;
|
||||
scsi_pointer->have_data_in = 0;
|
||||
scsi_pointer->sent_command = 0;
|
||||
|
||||
if(SCpnt->SCp.phase & (resetting|check_condition)) {
|
||||
if (scsi_pointer->phase & (resetting | check_condition)) {
|
||||
if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) {
|
||||
scmd_printk(KERN_ERR, SCpnt, "cannot reuse command\n");
|
||||
return FAILED;
|
||||
|
@ -939,15 +957,15 @@ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
|
|||
SCp.phase : current state of the command */
|
||||
|
||||
if ((phase & resetting) || !scsi_sglist(SCpnt)) {
|
||||
SCpnt->SCp.ptr = NULL;
|
||||
SCpnt->SCp.this_residual = 0;
|
||||
scsi_pointer->ptr = NULL;
|
||||
scsi_pointer->this_residual = 0;
|
||||
scsi_set_resid(SCpnt, 0);
|
||||
SCpnt->SCp.buffer = NULL;
|
||||
scsi_pointer->buffer = NULL;
|
||||
} else {
|
||||
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
|
||||
SCpnt->SCp.buffer = scsi_sglist(SCpnt);
|
||||
SCpnt->SCp.ptr = SG_ADDRESS(SCpnt->SCp.buffer);
|
||||
SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
|
||||
scsi_pointer->buffer = scsi_sglist(SCpnt);
|
||||
scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer);
|
||||
scsi_pointer->this_residual = scsi_pointer->buffer->length;
|
||||
}
|
||||
|
||||
DO_LOCK(flags);
|
||||
|
@ -997,7 +1015,7 @@ static void reset_done(struct scsi_cmnd *SCpnt)
|
|||
|
||||
static void aha152x_scsi_done(struct scsi_cmnd *SCpnt)
|
||||
{
|
||||
if (SCpnt->SCp.phase & resetting)
|
||||
if (aha152x_scsi_pointer(SCpnt)->phase & resetting)
|
||||
reset_done(SCpnt);
|
||||
else
|
||||
scsi_done(SCpnt);
|
||||
|
@ -1083,7 +1101,7 @@ static int aha152x_device_reset(struct scsi_cmnd * SCpnt)
|
|||
|
||||
DO_LOCK(flags);
|
||||
|
||||
if(SCpnt->SCp.phase & resetted) {
|
||||
if (aha152x_scsi_pointer(SCpnt)->phase & resetted) {
|
||||
HOSTDATA(shpnt)->commands--;
|
||||
if (!HOSTDATA(shpnt)->commands)
|
||||
SETPORT(PORTA, 0);
|
||||
|
@ -1377,28 +1395,31 @@ static void busfree_run(struct Scsi_Host *shpnt)
|
|||
SETPORT(SSTAT1, CLRBUSFREE);
|
||||
|
||||
if(CURRENT_SC) {
|
||||
struct scsi_pointer *scsi_pointer =
|
||||
aha152x_scsi_pointer(CURRENT_SC);
|
||||
|
||||
#if defined(AHA152X_STAT)
|
||||
action++;
|
||||
#endif
|
||||
CURRENT_SC->SCp.phase &= ~syncneg;
|
||||
scsi_pointer->phase &= ~syncneg;
|
||||
|
||||
if(CURRENT_SC->SCp.phase & completed) {
|
||||
if (scsi_pointer->phase & completed) {
|
||||
/* target sent COMMAND COMPLETE */
|
||||
done(shpnt, CURRENT_SC->SCp.Status, DID_OK);
|
||||
done(shpnt, scsi_pointer->Status, DID_OK);
|
||||
|
||||
} else if(CURRENT_SC->SCp.phase & aborted) {
|
||||
done(shpnt, CURRENT_SC->SCp.Status, DID_ABORT);
|
||||
} else if (scsi_pointer->phase & aborted) {
|
||||
done(shpnt, scsi_pointer->Status, DID_ABORT);
|
||||
|
||||
} else if(CURRENT_SC->SCp.phase & resetted) {
|
||||
done(shpnt, CURRENT_SC->SCp.Status, DID_RESET);
|
||||
} else if (scsi_pointer->phase & resetted) {
|
||||
done(shpnt, scsi_pointer->Status, DID_RESET);
|
||||
|
||||
} else if(CURRENT_SC->SCp.phase & disconnected) {
|
||||
} else if (scsi_pointer->phase & disconnected) {
|
||||
/* target sent DISCONNECT */
|
||||
#if defined(AHA152X_STAT)
|
||||
HOSTDATA(shpnt)->disconnections++;
|
||||
#endif
|
||||
append_SC(&DISCONNECTED_SC, CURRENT_SC);
|
||||
CURRENT_SC->SCp.phase |= 1 << 16;
|
||||
scsi_pointer->phase |= 1 << 16;
|
||||
CURRENT_SC = NULL;
|
||||
|
||||
} else {
|
||||
|
@ -1417,23 +1438,24 @@ static void busfree_run(struct Scsi_Host *shpnt)
|
|||
action++;
|
||||
#endif
|
||||
|
||||
if(DONE_SC->SCp.phase & check_condition) {
|
||||
if (aha152x_scsi_pointer(DONE_SC)->phase & check_condition) {
|
||||
struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC;
|
||||
struct aha152x_scdata *sc = SCDATA(cmd);
|
||||
|
||||
scsi_eh_restore_cmnd(cmd, &sc->ses);
|
||||
|
||||
cmd->SCp.Status = SAM_STAT_CHECK_CONDITION;
|
||||
aha152x_scsi_pointer(cmd)->Status = SAM_STAT_CHECK_CONDITION;
|
||||
|
||||
HOSTDATA(shpnt)->commands--;
|
||||
if (!HOSTDATA(shpnt)->commands)
|
||||
SETPORT(PORTA, 0); /* turn led off */
|
||||
} else if(DONE_SC->SCp.Status==SAM_STAT_CHECK_CONDITION) {
|
||||
} else if (aha152x_scsi_pointer(DONE_SC)->Status ==
|
||||
SAM_STAT_CHECK_CONDITION) {
|
||||
#if defined(AHA152X_STAT)
|
||||
HOSTDATA(shpnt)->busfree_with_check_condition++;
|
||||
#endif
|
||||
|
||||
if(!(DONE_SC->SCp.phase & not_issued)) {
|
||||
if(!(aha152x_scsi_pointer(DONE_SC)->phase & not_issued)) {
|
||||
struct aha152x_scdata *sc;
|
||||
struct scsi_cmnd *ptr = DONE_SC;
|
||||
DONE_SC=NULL;
|
||||
|
@ -1458,7 +1480,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
|
|||
if (!HOSTDATA(shpnt)->commands)
|
||||
SETPORT(PORTA, 0); /* turn led off */
|
||||
|
||||
if (!(ptr->SCp.phase & resetting)) {
|
||||
if (!(aha152x_scsi_pointer(ptr)->phase & resetting)) {
|
||||
kfree(ptr->host_scribble);
|
||||
ptr->host_scribble=NULL;
|
||||
}
|
||||
|
@ -1481,10 +1503,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
|
|||
DO_UNLOCK(flags);
|
||||
|
||||
if(CURRENT_SC) {
|
||||
struct scsi_pointer *scsi_pointer =
|
||||
aha152x_scsi_pointer(CURRENT_SC);
|
||||
|
||||
#if defined(AHA152X_STAT)
|
||||
action++;
|
||||
#endif
|
||||
CURRENT_SC->SCp.phase |= selecting;
|
||||
scsi_pointer->phase |= selecting;
|
||||
|
||||
/* clear selection timeout */
|
||||
SETPORT(SSTAT1, SELTO);
|
||||
|
@ -1512,11 +1537,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
|
|||
*/
|
||||
static void seldo_run(struct Scsi_Host *shpnt)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
|
||||
|
||||
SETPORT(SCSISIG, 0);
|
||||
SETPORT(SSTAT1, CLRBUSFREE);
|
||||
SETPORT(SSTAT1, CLRPHASECHG);
|
||||
|
||||
CURRENT_SC->SCp.phase &= ~(selecting|not_issued);
|
||||
scsi_pointer->phase &= ~(selecting | not_issued);
|
||||
|
||||
SETPORT(SCSISEQ, 0);
|
||||
|
||||
|
@ -1531,12 +1558,12 @@ static void seldo_run(struct Scsi_Host *shpnt)
|
|||
|
||||
ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
|
||||
|
||||
if (CURRENT_SC->SCp.phase & aborting) {
|
||||
if (scsi_pointer->phase & aborting) {
|
||||
ADDMSGO(ABORT);
|
||||
} else if (CURRENT_SC->SCp.phase & resetting) {
|
||||
} else if (scsi_pointer->phase & resetting) {
|
||||
ADDMSGO(BUS_DEVICE_RESET);
|
||||
} else if (SYNCNEG==0 && SYNCHRONOUS) {
|
||||
CURRENT_SC->SCp.phase |= syncneg;
|
||||
scsi_pointer->phase |= syncneg;
|
||||
MSGOLEN += spi_populate_sync_msg(&MSGO(MSGOLEN), 50, 8);
|
||||
SYNCNEG=1; /* negotiation in progress */
|
||||
}
|
||||
|
@ -1551,15 +1578,17 @@ static void seldo_run(struct Scsi_Host *shpnt)
|
|||
*/
|
||||
static void selto_run(struct Scsi_Host *shpnt)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
|
||||
|
||||
SETPORT(SCSISEQ, 0);
|
||||
SETPORT(SSTAT1, CLRSELTIMO);
|
||||
|
||||
if (!CURRENT_SC)
|
||||
return;
|
||||
|
||||
CURRENT_SC->SCp.phase &= ~selecting;
|
||||
scsi_pointer->phase &= ~selecting;
|
||||
|
||||
if (CURRENT_SC->SCp.phase & aborted)
|
||||
if (scsi_pointer->phase & aborted)
|
||||
done(shpnt, SAM_STAT_GOOD, DID_ABORT);
|
||||
else if (TESTLO(SSTAT0, SELINGO))
|
||||
done(shpnt, SAM_STAT_GOOD, DID_BUS_BUSY);
|
||||
|
@ -1587,7 +1616,10 @@ static void seldi_run(struct Scsi_Host *shpnt)
|
|||
SETPORT(SSTAT1, CLRPHASECHG);
|
||||
|
||||
if(CURRENT_SC) {
|
||||
if(!(CURRENT_SC->SCp.phase & not_issued))
|
||||
struct scsi_pointer *scsi_pointer =
|
||||
aha152x_scsi_pointer(CURRENT_SC);
|
||||
|
||||
if (!(scsi_pointer->phase & not_issued))
|
||||
scmd_printk(KERN_ERR, CURRENT_SC,
|
||||
"command should not have been issued yet\n");
|
||||
|
||||
|
@ -1644,6 +1676,7 @@ static void seldi_run(struct Scsi_Host *shpnt)
|
|||
static void msgi_run(struct Scsi_Host *shpnt)
|
||||
{
|
||||
for(;;) {
|
||||
struct scsi_pointer *scsi_pointer;
|
||||
int sstat1 = GETPORT(SSTAT1);
|
||||
|
||||
if(sstat1 & (PHASECHG|PHASEMIS|BUSFREE) || !(sstat1 & REQINIT))
|
||||
|
@ -1681,8 +1714,9 @@ static void msgi_run(struct Scsi_Host *shpnt)
|
|||
continue;
|
||||
}
|
||||
|
||||
CURRENT_SC->SCp.Message = MSGI(0);
|
||||
CURRENT_SC->SCp.phase &= ~disconnected;
|
||||
scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
|
||||
scsi_pointer->Message = MSGI(0);
|
||||
scsi_pointer->phase &= ~disconnected;
|
||||
|
||||
MSGILEN=0;
|
||||
|
||||
|
@ -1690,7 +1724,8 @@ static void msgi_run(struct Scsi_Host *shpnt)
|
|||
continue;
|
||||
}
|
||||
|
||||
CURRENT_SC->SCp.Message = MSGI(0);
|
||||
scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
|
||||
scsi_pointer->Message = MSGI(0);
|
||||
|
||||
switch (MSGI(0)) {
|
||||
case DISCONNECT:
|
||||
|
@ -1698,11 +1733,11 @@ static void msgi_run(struct Scsi_Host *shpnt)
|
|||
scmd_printk(KERN_WARNING, CURRENT_SC,
|
||||
"target was not allowed to disconnect\n");
|
||||
|
||||
CURRENT_SC->SCp.phase |= disconnected;
|
||||
scsi_pointer->phase |= disconnected;
|
||||
break;
|
||||
|
||||
case COMMAND_COMPLETE:
|
||||
CURRENT_SC->SCp.phase |= completed;
|
||||
scsi_pointer->phase |= completed;
|
||||
break;
|
||||
|
||||
case MESSAGE_REJECT:
|
||||
|
@ -1832,8 +1867,11 @@ static void msgi_end(struct Scsi_Host *shpnt)
|
|||
*/
|
||||
static void msgo_init(struct Scsi_Host *shpnt)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
|
||||
|
||||
if(MSGOLEN==0) {
|
||||
if((CURRENT_SC->SCp.phase & syncneg) && SYNCNEG==2 && SYNCRATE==0) {
|
||||
if ((scsi_pointer->phase & syncneg) && SYNCNEG==2 &&
|
||||
SYNCRATE==0) {
|
||||
ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
|
||||
} else {
|
||||
scmd_printk(KERN_INFO, CURRENT_SC,
|
||||
|
@ -1850,6 +1888,8 @@ static void msgo_init(struct Scsi_Host *shpnt)
|
|||
*/
|
||||
static void msgo_run(struct Scsi_Host *shpnt)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
|
||||
|
||||
while(MSGO_I<MSGOLEN) {
|
||||
if (TESTLO(SSTAT0, SPIORDY))
|
||||
return;
|
||||
|
@ -1861,13 +1901,13 @@ static void msgo_run(struct Scsi_Host *shpnt)
|
|||
|
||||
|
||||
if (MSGO(MSGO_I) & IDENTIFY_BASE)
|
||||
CURRENT_SC->SCp.phase |= identified;
|
||||
scsi_pointer->phase |= identified;
|
||||
|
||||
if (MSGO(MSGO_I)==ABORT)
|
||||
CURRENT_SC->SCp.phase |= aborted;
|
||||
scsi_pointer->phase |= aborted;
|
||||
|
||||
if (MSGO(MSGO_I)==BUS_DEVICE_RESET)
|
||||
CURRENT_SC->SCp.phase |= resetted;
|
||||
scsi_pointer->phase |= resetted;
|
||||
|
||||
SETPORT(SCSIDAT, MSGO(MSGO_I++));
|
||||
}
|
||||
|
@ -1896,7 +1936,7 @@ static void msgo_end(struct Scsi_Host *shpnt)
|
|||
*/
|
||||
static void cmd_init(struct Scsi_Host *shpnt)
|
||||
{
|
||||
if (CURRENT_SC->SCp.sent_command) {
|
||||
if (aha152x_scsi_pointer(CURRENT_SC)->sent_command) {
|
||||
scmd_printk(KERN_ERR, CURRENT_SC,
|
||||
"command already sent\n");
|
||||
done(shpnt, SAM_STAT_GOOD, DID_ERROR);
|
||||
|
@ -1927,7 +1967,7 @@ static void cmd_end(struct Scsi_Host *shpnt)
|
|||
"command sent incompletely (%d/%d)\n",
|
||||
CMD_I, CURRENT_SC->cmd_len);
|
||||
else
|
||||
CURRENT_SC->SCp.sent_command++;
|
||||
aha152x_scsi_pointer(CURRENT_SC)->sent_command++;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1939,7 +1979,7 @@ static void status_run(struct Scsi_Host *shpnt)
|
|||
if (TESTLO(SSTAT0, SPIORDY))
|
||||
return;
|
||||
|
||||
CURRENT_SC->SCp.Status = GETPORT(SCSIDAT);
|
||||
aha152x_scsi_pointer(CURRENT_SC)->Status = GETPORT(SCSIDAT);
|
||||
|
||||
}
|
||||
|
||||
|
@ -1963,6 +2003,7 @@ static void datai_init(struct Scsi_Host *shpnt)
|
|||
|
||||
static void datai_run(struct Scsi_Host *shpnt)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer;
|
||||
unsigned long the_time;
|
||||
int fifodata, data_count;
|
||||
|
||||
|
@ -2000,35 +2041,36 @@ static void datai_run(struct Scsi_Host *shpnt)
|
|||
fifodata = GETPORT(FIFOSTAT);
|
||||
}
|
||||
|
||||
if(CURRENT_SC->SCp.this_residual>0) {
|
||||
while(fifodata>0 && CURRENT_SC->SCp.this_residual>0) {
|
||||
data_count = fifodata > CURRENT_SC->SCp.this_residual ?
|
||||
CURRENT_SC->SCp.this_residual :
|
||||
scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
|
||||
if (scsi_pointer->this_residual > 0) {
|
||||
while (fifodata > 0 && scsi_pointer->this_residual > 0) {
|
||||
data_count = fifodata > scsi_pointer->this_residual ?
|
||||
scsi_pointer->this_residual :
|
||||
fifodata;
|
||||
fifodata -= data_count;
|
||||
|
||||
if (data_count & 1) {
|
||||
SETPORT(DMACNTRL0, ENDMA|_8BIT);
|
||||
*CURRENT_SC->SCp.ptr++ = GETPORT(DATAPORT);
|
||||
CURRENT_SC->SCp.this_residual--;
|
||||
*scsi_pointer->ptr++ = GETPORT(DATAPORT);
|
||||
scsi_pointer->this_residual--;
|
||||
DATA_LEN++;
|
||||
SETPORT(DMACNTRL0, ENDMA);
|
||||
}
|
||||
|
||||
if (data_count > 1) {
|
||||
data_count >>= 1;
|
||||
insw(DATAPORT, CURRENT_SC->SCp.ptr, data_count);
|
||||
CURRENT_SC->SCp.ptr += 2 * data_count;
|
||||
CURRENT_SC->SCp.this_residual -= 2 * data_count;
|
||||
insw(DATAPORT, scsi_pointer->ptr, data_count);
|
||||
scsi_pointer->ptr += 2 * data_count;
|
||||
scsi_pointer->this_residual -= 2 * data_count;
|
||||
DATA_LEN += 2 * data_count;
|
||||
}
|
||||
|
||||
if (CURRENT_SC->SCp.this_residual == 0 &&
|
||||
!sg_is_last(CURRENT_SC->SCp.buffer)) {
|
||||
if (scsi_pointer->this_residual == 0 &&
|
||||
!sg_is_last(scsi_pointer->buffer)) {
|
||||
/* advance to next buffer */
|
||||
CURRENT_SC->SCp.buffer = sg_next(CURRENT_SC->SCp.buffer);
|
||||
CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer);
|
||||
CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
|
||||
scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
|
||||
scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer);
|
||||
scsi_pointer->this_residual = scsi_pointer->buffer->length;
|
||||
}
|
||||
}
|
||||
} else if (fifodata > 0) {
|
||||
|
@ -2096,14 +2138,15 @@ static void datao_init(struct Scsi_Host *shpnt)
|
|||
|
||||
static void datao_run(struct Scsi_Host *shpnt)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
|
||||
unsigned long the_time;
|
||||
int data_count;
|
||||
|
||||
/* until phase changes or all data sent */
|
||||
while(TESTLO(DMASTAT, INTSTAT) && CURRENT_SC->SCp.this_residual>0) {
|
||||
while (TESTLO(DMASTAT, INTSTAT) && scsi_pointer->this_residual > 0) {
|
||||
data_count = 128;
|
||||
if(data_count > CURRENT_SC->SCp.this_residual)
|
||||
data_count=CURRENT_SC->SCp.this_residual;
|
||||
if (data_count > scsi_pointer->this_residual)
|
||||
data_count = scsi_pointer->this_residual;
|
||||
|
||||
if(TESTLO(DMASTAT, DFIFOEMP)) {
|
||||
scmd_printk(KERN_ERR, CURRENT_SC,
|
||||
|
@ -2114,26 +2157,26 @@ static void datao_run(struct Scsi_Host *shpnt)
|
|||
|
||||
if(data_count & 1) {
|
||||
SETPORT(DMACNTRL0,WRITE_READ|ENDMA|_8BIT);
|
||||
SETPORT(DATAPORT, *CURRENT_SC->SCp.ptr++);
|
||||
CURRENT_SC->SCp.this_residual--;
|
||||
SETPORT(DATAPORT, *scsi_pointer->ptr++);
|
||||
scsi_pointer->this_residual--;
|
||||
CMD_INC_RESID(CURRENT_SC, -1);
|
||||
SETPORT(DMACNTRL0,WRITE_READ|ENDMA);
|
||||
}
|
||||
|
||||
if(data_count > 1) {
|
||||
data_count >>= 1;
|
||||
outsw(DATAPORT, CURRENT_SC->SCp.ptr, data_count);
|
||||
CURRENT_SC->SCp.ptr += 2 * data_count;
|
||||
CURRENT_SC->SCp.this_residual -= 2 * data_count;
|
||||
outsw(DATAPORT, scsi_pointer->ptr, data_count);
|
||||
scsi_pointer->ptr += 2 * data_count;
|
||||
scsi_pointer->this_residual -= 2 * data_count;
|
||||
CMD_INC_RESID(CURRENT_SC, -2 * data_count);
|
||||
}
|
||||
|
||||
if (CURRENT_SC->SCp.this_residual == 0 &&
|
||||
!sg_is_last(CURRENT_SC->SCp.buffer)) {
|
||||
if (scsi_pointer->this_residual == 0 &&
|
||||
!sg_is_last(scsi_pointer->buffer)) {
|
||||
/* advance to next buffer */
|
||||
CURRENT_SC->SCp.buffer = sg_next(CURRENT_SC->SCp.buffer);
|
||||
CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer);
|
||||
CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
|
||||
scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
|
||||
scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer);
|
||||
scsi_pointer->this_residual = scsi_pointer->buffer->length;
|
||||
}
|
||||
|
||||
the_time=jiffies + 100*HZ;
|
||||
|
@ -2149,6 +2192,8 @@ static void datao_run(struct Scsi_Host *shpnt)
|
|||
|
||||
static void datao_end(struct Scsi_Host *shpnt)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
|
||||
|
||||
if(TESTLO(DMASTAT, DFIFOEMP)) {
|
||||
u32 datao_cnt = GETSTCNT();
|
||||
int datao_out = DATA_LEN - scsi_get_resid(CURRENT_SC);
|
||||
|
@ -2166,9 +2211,9 @@ static void datao_end(struct Scsi_Host *shpnt)
|
|||
sg = sg_next(sg);
|
||||
}
|
||||
|
||||
CURRENT_SC->SCp.buffer = sg;
|
||||
CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer) + done;
|
||||
CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length -
|
||||
scsi_pointer->buffer = sg;
|
||||
scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer) + done;
|
||||
scsi_pointer->this_residual = scsi_pointer->buffer->length -
|
||||
done;
|
||||
}
|
||||
|
||||
|
@ -2184,6 +2229,7 @@ static void datao_end(struct Scsi_Host *shpnt)
|
|||
*/
|
||||
static int update_state(struct Scsi_Host *shpnt)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
|
||||
int dataphase=0;
|
||||
unsigned int stat0 = GETPORT(SSTAT0);
|
||||
unsigned int stat1 = GETPORT(SSTAT1);
|
||||
|
@ -2197,7 +2243,8 @@ static int update_state(struct Scsi_Host *shpnt)
|
|||
SETPORT(SSTAT1,SCSIRSTI);
|
||||
} else if (stat0 & SELDI && PREVSTATE == busfree) {
|
||||
STATE=seldi;
|
||||
} else if(stat0 & SELDO && CURRENT_SC && (CURRENT_SC->SCp.phase & selecting)) {
|
||||
} else if (stat0 & SELDO && CURRENT_SC &&
|
||||
(scsi_pointer->phase & selecting)) {
|
||||
STATE=seldo;
|
||||
} else if(stat1 & SELTO) {
|
||||
STATE=selto;
|
||||
|
@ -2329,7 +2376,8 @@ static void is_complete(struct Scsi_Host *shpnt)
|
|||
SETPORT(SXFRCTL0, CH1);
|
||||
SETPORT(DMACNTRL0, 0);
|
||||
if(CURRENT_SC)
|
||||
CURRENT_SC->SCp.phase &= ~spiordy;
|
||||
aha152x_scsi_pointer(CURRENT_SC)->phase &=
|
||||
~spiordy;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2351,7 +2399,8 @@ static void is_complete(struct Scsi_Host *shpnt)
|
|||
SETPORT(DMACNTRL0, 0);
|
||||
SETPORT(SXFRCTL0, CH1|SPIOEN);
|
||||
if(CURRENT_SC)
|
||||
CURRENT_SC->SCp.phase |= spiordy;
|
||||
aha152x_scsi_pointer(CURRENT_SC)->phase |=
|
||||
spiordy;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2441,21 +2490,23 @@ static void disp_enintr(struct Scsi_Host *shpnt)
|
|||
*/
|
||||
static void show_command(struct scsi_cmnd *ptr)
|
||||
{
|
||||
const int phase = aha152x_scsi_pointer(ptr)->phase;
|
||||
|
||||
scsi_print_command(ptr);
|
||||
scmd_printk(KERN_DEBUG, ptr,
|
||||
"request_bufflen=%d; resid=%d; "
|
||||
"phase |%s%s%s%s%s%s%s%s%s; next=0x%p",
|
||||
scsi_bufflen(ptr), scsi_get_resid(ptr),
|
||||
(ptr->SCp.phase & not_issued) ? "not issued|" : "",
|
||||
(ptr->SCp.phase & selecting) ? "selecting|" : "",
|
||||
(ptr->SCp.phase & identified) ? "identified|" : "",
|
||||
(ptr->SCp.phase & disconnected) ? "disconnected|" : "",
|
||||
(ptr->SCp.phase & completed) ? "completed|" : "",
|
||||
(ptr->SCp.phase & spiordy) ? "spiordy|" : "",
|
||||
(ptr->SCp.phase & syncneg) ? "syncneg|" : "",
|
||||
(ptr->SCp.phase & aborted) ? "aborted|" : "",
|
||||
(ptr->SCp.phase & resetted) ? "resetted|" : "",
|
||||
(SCDATA(ptr)) ? SCNEXT(ptr) : NULL);
|
||||
phase & not_issued ? "not issued|" : "",
|
||||
phase & selecting ? "selecting|" : "",
|
||||
phase & identified ? "identified|" : "",
|
||||
phase & disconnected ? "disconnected|" : "",
|
||||
phase & completed ? "completed|" : "",
|
||||
phase & spiordy ? "spiordy|" : "",
|
||||
phase & syncneg ? "syncneg|" : "",
|
||||
phase & aborted ? "aborted|" : "",
|
||||
phase & resetted ? "resetted|" : "",
|
||||
SCDATA(ptr) ? SCNEXT(ptr) : NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2487,6 +2538,8 @@ static void show_queues(struct Scsi_Host *shpnt)
|
|||
|
||||
static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(ptr);
|
||||
const int phase = scsi_pointer->phase;
|
||||
int i;
|
||||
|
||||
seq_printf(m, "%p: target=%d; lun=%d; cmnd=( ",
|
||||
|
@ -2496,24 +2549,24 @@ static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
|
|||
seq_printf(m, "0x%02x ", ptr->cmnd[i]);
|
||||
|
||||
seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |",
|
||||
scsi_get_resid(ptr), ptr->SCp.this_residual,
|
||||
sg_nents(ptr->SCp.buffer) - 1);
|
||||
scsi_get_resid(ptr), scsi_pointer->this_residual,
|
||||
sg_nents(scsi_pointer->buffer) - 1);
|
||||
|
||||
if (ptr->SCp.phase & not_issued)
|
||||
if (phase & not_issued)
|
||||
seq_puts(m, "not issued|");
|
||||
if (ptr->SCp.phase & selecting)
|
||||
if (phase & selecting)
|
||||
seq_puts(m, "selecting|");
|
||||
if (ptr->SCp.phase & disconnected)
|
||||
if (phase & disconnected)
|
||||
seq_puts(m, "disconnected|");
|
||||
if (ptr->SCp.phase & aborted)
|
||||
if (phase & aborted)
|
||||
seq_puts(m, "aborted|");
|
||||
if (ptr->SCp.phase & identified)
|
||||
if (phase & identified)
|
||||
seq_puts(m, "identified|");
|
||||
if (ptr->SCp.phase & completed)
|
||||
if (phase & completed)
|
||||
seq_puts(m, "completed|");
|
||||
if (ptr->SCp.phase & spiordy)
|
||||
if (phase & spiordy)
|
||||
seq_puts(m, "spiordy|");
|
||||
if (ptr->SCp.phase & syncneg)
|
||||
if (phase & syncneg)
|
||||
seq_puts(m, "syncneg|");
|
||||
seq_printf(m, "; next=0x%p\n", SCNEXT(ptr));
|
||||
}
|
||||
|
@ -2918,6 +2971,7 @@ static struct scsi_host_template aha152x_driver_template = {
|
|||
.sg_tablesize = SG_ALL,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.slave_alloc = aha152x_adjust_queue,
|
||||
.cmd_size = sizeof(struct aha152x_cmd_priv),
|
||||
};
|
||||
|
||||
#if !defined(AHA152X_PCMCIA)
|
||||
|
@ -3375,13 +3429,11 @@ static int __init aha152x_setup(char *str)
|
|||
setup[setup_count].synchronous = ints[0] >= 6 ? ints[6] : 1;
|
||||
setup[setup_count].delay = ints[0] >= 7 ? ints[7] : DELAY_DEFAULT;
|
||||
setup[setup_count].ext_trans = ints[0] >= 8 ? ints[8] : 0;
|
||||
if (ints[0] > 8) { /*}*/
|
||||
if (ints[0] > 8)
|
||||
printk(KERN_NOTICE "aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>"
|
||||
"[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<EXT_TRANS>]]]]]]]\n");
|
||||
} else {
|
||||
else
|
||||
setup_count++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -206,7 +206,6 @@ static int makecode(unsigned hosterr, unsigned scsierr)
|
|||
|
||||
static int aha1542_test_port(struct Scsi_Host *sh)
|
||||
{
|
||||
u8 inquiry_result[4];
|
||||
int i;
|
||||
|
||||
/* Quick and dirty test for presence of the card. */
|
||||
|
@ -240,7 +239,7 @@ static int aha1542_test_port(struct Scsi_Host *sh)
|
|||
for (i = 0; i < 4; i++) {
|
||||
if (!wait_mask(STATUS(sh->io_port), DF, DF, 0, 0))
|
||||
return 0;
|
||||
inquiry_result[i] = inb(DATA(sh->io_port));
|
||||
(void)inb(DATA(sh->io_port));
|
||||
}
|
||||
|
||||
/* Reading port should reset DF */
|
||||
|
|
|
@ -55,8 +55,12 @@
|
|||
#include <asm/dma.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include "scsi.h"
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include "aha1740.h"
|
||||
|
||||
/* IF YOU ARE HAVING PROBLEMS WITH THIS DRIVER, AND WANT TO WATCH
|
||||
|
|
|
@ -283,7 +283,7 @@ main(int argc, char *argv[])
|
|||
/*
|
||||
* Decend the tree of scopes and insert/emit
|
||||
* patches as appropriate. We perform a depth first
|
||||
* tranversal, recursively handling each scope.
|
||||
* traversal, recursively handling each scope.
|
||||
*/
|
||||
/* start at the root scope */
|
||||
dump_scope(SLIST_FIRST(&scope_stack));
|
||||
|
|
|
@ -60,7 +60,6 @@ void asd_set_dmamode(struct domain_device *dev);
|
|||
/* ---------- TMFs ---------- */
|
||||
int asd_abort_task(struct sas_task *);
|
||||
int asd_abort_task_set(struct domain_device *, u8 *lun);
|
||||
int asd_clear_aca(struct domain_device *, u8 *lun);
|
||||
int asd_clear_task_set(struct domain_device *, u8 *lun);
|
||||
int asd_lu_reset(struct domain_device *, u8 *lun);
|
||||
int asd_I_T_nexus_reset(struct domain_device *dev);
|
||||
|
|
|
@ -960,7 +960,6 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
|
|||
|
||||
.lldd_abort_task = asd_abort_task,
|
||||
.lldd_abort_task_set = asd_abort_task_set,
|
||||
.lldd_clear_aca = asd_clear_aca,
|
||||
.lldd_clear_task_set = asd_clear_task_set,
|
||||
.lldd_I_T_nexus_reset = asd_I_T_nexus_reset,
|
||||
.lldd_lu_reset = asd_lu_reset,
|
||||
|
|
|
@ -322,7 +322,6 @@ Again:
|
|||
|
||||
spin_lock_irqsave(&task->task_state_lock, flags);
|
||||
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
|
||||
task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
|
||||
task->task_state_flags |= SAS_TASK_STATE_DONE;
|
||||
if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) {
|
||||
struct completion *completion = ascb->completion;
|
||||
|
@ -532,7 +531,6 @@ int asd_execute_task(struct sas_task *task, gfp_t gfp_flags)
|
|||
struct sas_task *t = task;
|
||||
struct asd_ascb *ascb = NULL, *a;
|
||||
struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
|
||||
unsigned long flags;
|
||||
|
||||
res = asd_can_queue(asd_ha, 1);
|
||||
if (res)
|
||||
|
@ -575,10 +573,6 @@ int asd_execute_task(struct sas_task *task, gfp_t gfp_flags)
|
|||
}
|
||||
if (res)
|
||||
goto out_err_unmap;
|
||||
|
||||
spin_lock_irqsave(&t->task_state_lock, flags);
|
||||
t->task_state_flags |= SAS_TASK_AT_INITIATOR;
|
||||
spin_unlock_irqrestore(&t->task_state_lock, flags);
|
||||
}
|
||||
list_del_init(&alist);
|
||||
|
||||
|
@ -597,9 +591,6 @@ out_err_unmap:
|
|||
if (a == b)
|
||||
break;
|
||||
t = a->uldd_task;
|
||||
spin_lock_irqsave(&t->task_state_lock, flags);
|
||||
t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
|
||||
spin_unlock_irqrestore(&t->task_state_lock, flags);
|
||||
switch (t->task_proto) {
|
||||
case SAS_PROTOCOL_SATA:
|
||||
case SAS_PROTOCOL_STP:
|
||||
|
|
|
@ -287,7 +287,7 @@ static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
|
|||
fh = edb->vaddr + 16;
|
||||
ru = edb->vaddr + 16 + sizeof(*fh);
|
||||
res = ru->status;
|
||||
if (ru->datapres == 1) /* Response data present */
|
||||
if (ru->datapres == SAS_DATAPRES_RESPONSE_DATA)
|
||||
res = ru->resp_data[3];
|
||||
#if 0
|
||||
ascb->tag = fh->tag;
|
||||
|
@ -644,15 +644,6 @@ int asd_abort_task_set(struct domain_device *dev, u8 *lun)
|
|||
return res;
|
||||
}
|
||||
|
||||
int asd_clear_aca(struct domain_device *dev, u8 *lun)
|
||||
{
|
||||
int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
|
||||
|
||||
if (res == TMF_RESP_FUNC_COMPLETE)
|
||||
asd_clear_nexus_I_T_L(dev, lun);
|
||||
return res;
|
||||
}
|
||||
|
||||
int asd_clear_task_set(struct domain_device *dev, u8 *lun)
|
||||
{
|
||||
int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
|
||||
|
|
|
@ -126,13 +126,17 @@
|
|||
|
||||
#include <asm/ecard.h>
|
||||
|
||||
#include "../scsi.h"
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_dbg.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include <scsi/scsi_transport_spi.h>
|
||||
#include "acornscsi.h"
|
||||
#include "msgqueue.h"
|
||||
#include "scsi.h"
|
||||
#include "arm_scsi.h"
|
||||
|
||||
#include <scsi/scsicam.h>
|
||||
|
||||
|
@ -725,7 +729,7 @@ intr_ret_t acornscsi_kick(AS_Host *host)
|
|||
*/
|
||||
host->scsi.phase = PHASE_CONNECTING;
|
||||
host->SCpnt = SCpnt;
|
||||
host->scsi.SCp = SCpnt->SCp;
|
||||
host->scsi.SCp = *arm_scsi_pointer(SCpnt);
|
||||
host->dma.xfer_setup = 0;
|
||||
host->dma.xfer_required = 0;
|
||||
host->dma.xfer_done = 0;
|
||||
|
@ -1420,6 +1424,7 @@ unsigned char acornscsi_readmessagebyte(AS_Host *host)
|
|||
static
|
||||
void acornscsi_message(AS_Host *host)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer;
|
||||
unsigned char message[16];
|
||||
unsigned int msgidx = 0, msglen = 1;
|
||||
|
||||
|
@ -1489,8 +1494,9 @@ void acornscsi_message(AS_Host *host)
|
|||
* the saved data pointer for the current I/O process.
|
||||
*/
|
||||
acornscsi_dma_cleanup(host);
|
||||
host->SCpnt->SCp = host->scsi.SCp;
|
||||
host->SCpnt->SCp.sent_command = 0;
|
||||
scsi_pointer = arm_scsi_pointer(host->SCpnt);
|
||||
*scsi_pointer = host->scsi.SCp;
|
||||
scsi_pointer->sent_command = 0;
|
||||
host->scsi.phase = PHASE_MSGIN;
|
||||
break;
|
||||
|
||||
|
@ -1505,7 +1511,7 @@ void acornscsi_message(AS_Host *host)
|
|||
* the present command and status areas.'
|
||||
*/
|
||||
acornscsi_dma_cleanup(host);
|
||||
host->scsi.SCp = host->SCpnt->SCp;
|
||||
host->scsi.SCp = *arm_scsi_pointer(host->SCpnt);
|
||||
host->scsi.phase = PHASE_MSGIN;
|
||||
break;
|
||||
|
||||
|
@ -1805,7 +1811,7 @@ int acornscsi_reconnect_finish(AS_Host *host)
|
|||
/*
|
||||
* Restore data pointer from SAVED pointers.
|
||||
*/
|
||||
host->scsi.SCp = host->SCpnt->SCp;
|
||||
host->scsi.SCp = *arm_scsi_pointer(host->SCpnt);
|
||||
#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
|
||||
printk(", data pointers: [%p, %X]",
|
||||
host->scsi.SCp.ptr, host->scsi.SCp.this_residual);
|
||||
|
@ -2404,6 +2410,7 @@ acornscsi_intr(int irq, void *dev_id)
|
|||
*/
|
||||
static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt);
|
||||
void (*done)(struct scsi_cmnd *) = scsi_done;
|
||||
AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
|
||||
|
||||
|
@ -2419,9 +2426,9 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt)
|
|||
|
||||
SCpnt->host_scribble = NULL;
|
||||
SCpnt->result = 0;
|
||||
SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]);
|
||||
SCpnt->SCp.sent_command = 0;
|
||||
SCpnt->SCp.scsi_xferred = 0;
|
||||
scsi_pointer->phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]);
|
||||
scsi_pointer->sent_command = 0;
|
||||
scsi_pointer->scsi_xferred = 0;
|
||||
|
||||
init_SCp(SCpnt);
|
||||
|
||||
|
@ -2787,6 +2794,7 @@ static struct scsi_host_template acornscsi_template = {
|
|||
.cmd_per_lun = 2,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.proc_name = "acornscsi",
|
||||
.cmd_size = sizeof(struct arm_cmd_priv),
|
||||
};
|
||||
|
||||
static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
|
||||
|
|
|
@ -1,16 +1,25 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* linux/drivers/acorn/scsi/scsi.h
|
||||
*
|
||||
* Copyright (C) 2002 Russell King
|
||||
*
|
||||
* Commonly used scsi driver functions.
|
||||
* Commonly used functions by the ARM SCSI-II drivers.
|
||||
*/
|
||||
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#define BELT_AND_BRACES
|
||||
|
||||
struct arm_cmd_priv {
|
||||
struct scsi_pointer scsi_pointer;
|
||||
};
|
||||
|
||||
static inline struct scsi_pointer *arm_scsi_pointer(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct arm_cmd_priv *acmd = scsi_cmd_priv(cmd);
|
||||
|
||||
return &acmd->scsi_pointer;
|
||||
}
|
||||
|
||||
/*
|
||||
* The scatter-gather list handling. This contains all
|
||||
* the yucky stuff that needs to be fixed properly.
|
||||
|
@ -78,16 +87,18 @@ static inline void put_next_SCp_byte(struct scsi_pointer *SCp, unsigned char c)
|
|||
|
||||
static inline void init_SCp(struct scsi_cmnd *SCpnt)
|
||||
{
|
||||
memset(&SCpnt->SCp, 0, sizeof(struct scsi_pointer));
|
||||
struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt);
|
||||
|
||||
memset(scsi_pointer, 0, sizeof(struct scsi_pointer));
|
||||
|
||||
if (scsi_bufflen(SCpnt)) {
|
||||
unsigned long len = 0;
|
||||
|
||||
SCpnt->SCp.buffer = scsi_sglist(SCpnt);
|
||||
SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
|
||||
SCpnt->SCp.ptr = sg_virt(SCpnt->SCp.buffer);
|
||||
SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
|
||||
SCpnt->SCp.phase = scsi_bufflen(SCpnt);
|
||||
scsi_pointer->buffer = scsi_sglist(SCpnt);
|
||||
scsi_pointer->buffers_residual = scsi_sg_count(SCpnt) - 1;
|
||||
scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
|
||||
scsi_pointer->this_residual = scsi_pointer->buffer->length;
|
||||
scsi_pointer->phase = scsi_bufflen(SCpnt);
|
||||
|
||||
#ifdef BELT_AND_BRACES
|
||||
{ /*
|
||||
|
@ -111,15 +122,15 @@ static inline void init_SCp(struct scsi_cmnd *SCpnt)
|
|||
* FIXME: Totaly naive fixup. We should abort
|
||||
* with error
|
||||
*/
|
||||
SCpnt->SCp.phase =
|
||||
scsi_pointer->phase =
|
||||
min_t(unsigned long, len,
|
||||
scsi_bufflen(SCpnt));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
SCpnt->SCp.ptr = NULL;
|
||||
SCpnt->SCp.this_residual = 0;
|
||||
SCpnt->SCp.phase = 0;
|
||||
scsi_pointer->ptr = NULL;
|
||||
scsi_pointer->this_residual = 0;
|
||||
scsi_pointer->phase = 0;
|
||||
}
|
||||
}
|
|
@ -35,8 +35,12 @@
|
|||
#include <asm/io.h>
|
||||
#include <asm/ecard.h>
|
||||
|
||||
#include "../scsi.h"
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include "fas216.h"
|
||||
|
||||
struct arxescsi_info {
|
||||
|
|
|
@ -223,7 +223,7 @@ static struct scsi_host_template cumanascsi_template = {
|
|||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = 2,
|
||||
.proc_name = "CumanaSCSI-1",
|
||||
.cmd_size = NCR5380_CMD_SIZE,
|
||||
.cmd_size = sizeof(struct NCR5380_cmd),
|
||||
.max_sectors = 128,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
};
|
||||
|
|
|
@ -29,10 +29,14 @@
|
|||
#include <asm/ecard.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include "../scsi.h"
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include "fas216.h"
|
||||
#include "scsi.h"
|
||||
#include "arm_scsi.h"
|
||||
|
||||
#include <scsi/scsicam.h>
|
||||
|
||||
|
|
|
@ -35,10 +35,14 @@
|
|||
#include <asm/dma.h>
|
||||
#include <asm/ecard.h>
|
||||
|
||||
#include "../scsi.h"
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include "fas216.h"
|
||||
#include "scsi.h"
|
||||
#include "arm_scsi.h"
|
||||
|
||||
#include <scsi/scsicam.h>
|
||||
|
||||
|
|
|
@ -47,11 +47,15 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/ecard.h>
|
||||
|
||||
#include "../scsi.h"
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_dbg.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include "fas216.h"
|
||||
#include "scsi.h"
|
||||
#include "arm_scsi.h"
|
||||
|
||||
/* NOTE: SCSI2 Synchronous transfers *require* DMA according to
|
||||
* the data sheet. This restriction is crazy, especially when
|
||||
|
@ -757,7 +761,7 @@ static void fas216_transfer(FAS216_Info *info)
|
|||
fas216_log(info, LOG_ERROR, "null buffer passed to "
|
||||
"fas216_starttransfer");
|
||||
print_SCp(&info->scsi.SCp, "SCp: ", "\n");
|
||||
print_SCp(&info->SCpnt->SCp, "Cmnd SCp: ", "\n");
|
||||
print_SCp(arm_scsi_pointer(info->SCpnt), "Cmnd SCp: ", "\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1007,7 +1011,7 @@ fas216_reselected_intr(FAS216_Info *info)
|
|||
/*
|
||||
* Restore data pointer from SAVED data pointer
|
||||
*/
|
||||
info->scsi.SCp = info->SCpnt->SCp;
|
||||
info->scsi.SCp = *arm_scsi_pointer(info->SCpnt);
|
||||
|
||||
fas216_log(info, LOG_CONNECT, "data pointers: [%p, %X]",
|
||||
info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
|
||||
|
@ -1050,6 +1054,7 @@ fas216_reselected_intr(FAS216_Info *info)
|
|||
|
||||
static void fas216_parse_message(FAS216_Info *info, unsigned char *message, int msglen)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer;
|
||||
int i;
|
||||
|
||||
switch (message[0]) {
|
||||
|
@ -1074,8 +1079,9 @@ static void fas216_parse_message(FAS216_Info *info, unsigned char *message, int
|
|||
* as required by the SCSI II standard. These always
|
||||
* point to the start of their respective areas.
|
||||
*/
|
||||
info->SCpnt->SCp = info->scsi.SCp;
|
||||
info->SCpnt->SCp.sent_command = 0;
|
||||
scsi_pointer = arm_scsi_pointer(info->SCpnt);
|
||||
*scsi_pointer = info->scsi.SCp;
|
||||
scsi_pointer->sent_command = 0;
|
||||
fas216_log(info, LOG_CONNECT | LOG_MESSAGES | LOG_BUFFER,
|
||||
"save data pointers: [%p, %X]",
|
||||
info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
|
||||
|
@ -1088,7 +1094,7 @@ static void fas216_parse_message(FAS216_Info *info, unsigned char *message, int
|
|||
/*
|
||||
* Restore current data pointer from SAVED data pointer
|
||||
*/
|
||||
info->scsi.SCp = info->SCpnt->SCp;
|
||||
info->scsi.SCp = *arm_scsi_pointer(info->SCpnt);
|
||||
fas216_log(info, LOG_CONNECT | LOG_MESSAGES | LOG_BUFFER,
|
||||
"restore data pointers: [%p, 0x%x]",
|
||||
info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
|
||||
|
@ -1766,7 +1772,7 @@ static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
|
|||
* claim host busy
|
||||
*/
|
||||
info->scsi.phase = PHASE_SELECTION;
|
||||
info->scsi.SCp = SCpnt->SCp;
|
||||
info->scsi.SCp = *arm_scsi_pointer(SCpnt);
|
||||
info->SCpnt = SCpnt;
|
||||
info->dma.transfer_type = fasdma_none;
|
||||
|
||||
|
@ -1845,7 +1851,7 @@ static void fas216_do_bus_device_reset(FAS216_Info *info,
|
|||
* claim host busy
|
||||
*/
|
||||
info->scsi.phase = PHASE_SELECTION;
|
||||
info->scsi.SCp = SCpnt->SCp;
|
||||
info->scsi.SCp = *arm_scsi_pointer(SCpnt);
|
||||
info->SCpnt = SCpnt;
|
||||
info->dma.transfer_type = fasdma_none;
|
||||
|
||||
|
@ -1995,11 +2001,13 @@ static void fas216_devicereset_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
|
|||
static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
|
||||
unsigned int result)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt);
|
||||
|
||||
fas216_log_target(info, LOG_CONNECT, SCpnt->device->id,
|
||||
"request sense complete, result=0x%04x%02x%02x",
|
||||
result, SCpnt->SCp.Message, SCpnt->SCp.Status);
|
||||
result, scsi_pointer->Message, scsi_pointer->Status);
|
||||
|
||||
if (result != DID_OK || SCpnt->SCp.Status != SAM_STAT_GOOD)
|
||||
if (result != DID_OK || scsi_pointer->Status != SAM_STAT_GOOD)
|
||||
/*
|
||||
* Something went wrong. Make sure that we don't
|
||||
* have valid data in the sense buffer that could
|
||||
|
@ -2029,6 +2037,8 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
|
|||
static void
|
||||
fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt);
|
||||
|
||||
info->stats.fins += 1;
|
||||
|
||||
set_host_byte(SCpnt, result);
|
||||
|
@ -2103,8 +2113,8 @@ request_sense:
|
|||
fas216_log_target(info, LOG_CONNECT, SCpnt->device->id,
|
||||
"requesting sense");
|
||||
init_SCp(SCpnt);
|
||||
SCpnt->SCp.Message = 0;
|
||||
SCpnt->SCp.Status = 0;
|
||||
scsi_pointer->Message = 0;
|
||||
scsi_pointer->Status = 0;
|
||||
SCpnt->host_scribble = (void *)fas216_rq_sns_done;
|
||||
|
||||
/*
|
||||
|
|
|
@ -312,6 +312,10 @@ typedef struct {
|
|||
|
||||
/* driver-private data per SCSI command. */
|
||||
struct fas216_cmd_priv {
|
||||
/*
|
||||
* @scsi_pointer must be the first member. See also arm_scsi_pointer().
|
||||
*/
|
||||
struct scsi_pointer scsi_pointer;
|
||||
void (*scsi_done)(struct scsi_cmnd *cmd);
|
||||
};
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ static struct scsi_host_template oakscsi_template = {
|
|||
.cmd_per_lun = 2,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.proc_name = "oakscsi",
|
||||
.cmd_size = NCR5380_CMD_SIZE,
|
||||
.cmd_size = sizeof(struct NCR5380_cmd),
|
||||
.max_sectors = 128,
|
||||
};
|
||||
|
||||
|
|
|
@ -20,10 +20,14 @@
|
|||
#include <asm/ecard.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include "../scsi.h"
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include "fas216.h"
|
||||
#include "scsi.h"
|
||||
#include "arm_scsi.h"
|
||||
|
||||
#include <scsi/scsicam.h>
|
||||
|
||||
|
|
|
@ -20,7 +20,11 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include "../scsi.h"
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
|
||||
#define DEBUG
|
||||
|
||||
|
|
|
@ -538,7 +538,7 @@ static int falcon_classify_cmd(struct scsi_cmnd *cmd)
|
|||
static int atari_scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
|
||||
struct scsi_cmnd *cmd)
|
||||
{
|
||||
int wanted_len = cmd->SCp.this_residual;
|
||||
int wanted_len = NCR5380_to_ncmd(cmd)->this_residual;
|
||||
int possible_len, limit;
|
||||
|
||||
if (wanted_len < DMA_MIN_SIZE)
|
||||
|
@ -610,7 +610,7 @@ static int atari_scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
|
|||
}
|
||||
|
||||
/* Last step: apply the hard limit on DMA transfers */
|
||||
limit = (atari_dma_buffer && !STRAM_ADDR(virt_to_phys(cmd->SCp.ptr))) ?
|
||||
limit = (atari_dma_buffer && !STRAM_ADDR(virt_to_phys(NCR5380_to_ncmd(cmd)->ptr))) ?
|
||||
STRAM_BUFFER_SIZE : 255*512;
|
||||
if (possible_len > limit)
|
||||
possible_len = limit;
|
||||
|
@ -711,7 +711,7 @@ static struct scsi_host_template atari_scsi_template = {
|
|||
.this_id = 7,
|
||||
.cmd_per_lun = 2,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.cmd_size = NCR5380_CMD_SIZE,
|
||||
.cmd_size = sizeof(struct NCR5380_cmd),
|
||||
};
|
||||
|
||||
static int __init atari_scsi_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -218,7 +218,7 @@ static char const *cqe_desc[] = {
|
|||
|
||||
static int beiscsi_eh_abort(struct scsi_cmnd *sc)
|
||||
{
|
||||
struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr;
|
||||
struct iscsi_task *abrt_task = iscsi_cmd(sc)->task;
|
||||
struct iscsi_cls_session *cls_session;
|
||||
struct beiscsi_io_task *abrt_io_task;
|
||||
struct beiscsi_conn *beiscsi_conn;
|
||||
|
@ -403,6 +403,7 @@ static struct scsi_host_template beiscsi_sht = {
|
|||
.cmd_per_lun = BEISCSI_CMD_PER_LUN,
|
||||
.vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
|
||||
.track_queue_depth = 1,
|
||||
.cmd_size = sizeof(struct iscsi_cmd),
|
||||
};
|
||||
|
||||
static struct scsi_transport_template *beiscsi_scsi_transport;
|
||||
|
|
|
@ -711,7 +711,7 @@ bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr,
|
|||
char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
|
||||
|
||||
bfa_get_adapter_serial_num(&bfad->bfa, serial_num);
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", serial_num);
|
||||
return sysfs_emit(buf, "%s\n", serial_num);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -725,7 +725,7 @@ bfad_im_model_show(struct device *dev, struct device_attribute *attr,
|
|||
char model[BFA_ADAPTER_MODEL_NAME_LEN];
|
||||
|
||||
bfa_get_adapter_model(&bfad->bfa, model);
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", model);
|
||||
return sysfs_emit(buf, "%s\n", model);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -805,7 +805,7 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
|
|||
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
|
||||
"Invalid Model");
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", model_descr);
|
||||
return sysfs_emit(buf, "%s\n", model_descr);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -819,7 +819,7 @@ bfad_im_node_name_show(struct device *dev, struct device_attribute *attr,
|
|||
u64 nwwn;
|
||||
|
||||
nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port);
|
||||
return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn));
|
||||
return sysfs_emit(buf, "0x%llx\n", cpu_to_be64(nwwn));
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -836,7 +836,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
|
|||
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
|
||||
strlcpy(symname, port_attr.port_cfg.sym_name.symname,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", symname);
|
||||
return sysfs_emit(buf, "%s\n", symname);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -850,14 +850,14 @@ bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr,
|
|||
char hw_ver[BFA_VERSION_LEN];
|
||||
|
||||
bfa_get_pci_chip_rev(&bfad->bfa, hw_ver);
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver);
|
||||
return sysfs_emit(buf, "%s\n", hw_ver);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_VERSION);
|
||||
return sysfs_emit(buf, "%s\n", BFAD_DRIVER_VERSION);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -871,7 +871,7 @@ bfad_im_optionrom_version_show(struct device *dev,
|
|||
char optrom_ver[BFA_VERSION_LEN];
|
||||
|
||||
bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver);
|
||||
return sysfs_emit(buf, "%s\n", optrom_ver);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -885,7 +885,7 @@ bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr,
|
|||
char fw_ver[BFA_VERSION_LEN];
|
||||
|
||||
bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver);
|
||||
return sysfs_emit(buf, "%s\n", fw_ver);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -897,7 +897,7 @@ bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr,
|
|||
(struct bfad_im_port_s *) shost->hostdata[0];
|
||||
struct bfad_s *bfad = im_port->bfad;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
return sysfs_emit(buf, "%d\n",
|
||||
bfa_get_nports(&bfad->bfa));
|
||||
}
|
||||
|
||||
|
@ -905,7 +905,7 @@ static ssize_t
|
|||
bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_NAME);
|
||||
return sysfs_emit(buf, "%s\n", BFAD_DRIVER_NAME);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -924,14 +924,14 @@ bfad_im_num_of_discovered_ports_show(struct device *dev,
|
|||
rports = kcalloc(nrports, sizeof(struct bfa_rport_qualifier_s),
|
||||
GFP_ATOMIC);
|
||||
if (rports == NULL)
|
||||
return snprintf(buf, PAGE_SIZE, "Failed\n");
|
||||
return sysfs_emit(buf, "Failed\n");
|
||||
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports);
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
kfree(rports);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", nrports);
|
||||
return sysfs_emit(buf, "%d\n", nrports);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(serial_number, S_IRUGO,
|
||||
|
|
|
@ -150,10 +150,10 @@ bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
|
|||
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dtsk;
|
||||
wait_queue_head_t *wq;
|
||||
|
||||
cmnd->SCp.Status |= tsk_status << 1;
|
||||
set_bit(IO_DONE_BIT, (unsigned long *)&cmnd->SCp.Status);
|
||||
wq = (wait_queue_head_t *) cmnd->SCp.ptr;
|
||||
cmnd->SCp.ptr = NULL;
|
||||
bfad_priv(cmnd)->status |= tsk_status << 1;
|
||||
set_bit(IO_DONE_BIT, &bfad_priv(cmnd)->status);
|
||||
wq = bfad_priv(cmnd)->wq;
|
||||
bfad_priv(cmnd)->wq = NULL;
|
||||
|
||||
if (wq)
|
||||
wake_up(wq);
|
||||
|
@ -259,7 +259,7 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
|
|||
* happens.
|
||||
*/
|
||||
cmnd->host_scribble = NULL;
|
||||
cmnd->SCp.Status = 0;
|
||||
bfad_priv(cmnd)->status = 0;
|
||||
bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
|
||||
/*
|
||||
* bfa_itnim can be NULL if the port gets disconnected and the bfa
|
||||
|
@ -326,8 +326,8 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
|
|||
* if happens.
|
||||
*/
|
||||
cmnd->host_scribble = NULL;
|
||||
cmnd->SCp.ptr = (char *)&wq;
|
||||
cmnd->SCp.Status = 0;
|
||||
bfad_priv(cmnd)->wq = &wq;
|
||||
bfad_priv(cmnd)->status = 0;
|
||||
bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
|
||||
/*
|
||||
* bfa_itnim can be NULL if the port gets disconnected and the bfa
|
||||
|
@ -347,10 +347,9 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
|
|||
FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO);
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
|
||||
wait_event(wq, test_bit(IO_DONE_BIT,
|
||||
(unsigned long *)&cmnd->SCp.Status));
|
||||
wait_event(wq, test_bit(IO_DONE_BIT, &bfad_priv(cmnd)->status));
|
||||
|
||||
task_status = cmnd->SCp.Status >> 1;
|
||||
task_status = bfad_priv(cmnd)->status >> 1;
|
||||
if (task_status != BFI_TSKIM_STS_OK) {
|
||||
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
|
||||
"LUN reset failure, status: %d\n", task_status);
|
||||
|
@ -381,16 +380,16 @@ bfad_im_reset_target_handler(struct scsi_cmnd *cmnd)
|
|||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
itnim = bfad_get_itnim(im_port, starget->id);
|
||||
if (itnim) {
|
||||
cmnd->SCp.ptr = (char *)&wq;
|
||||
bfad_priv(cmnd)->wq = &wq;
|
||||
rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
|
||||
if (rc == BFA_STATUS_OK) {
|
||||
/* wait target reset to complete */
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
wait_event(wq, test_bit(IO_DONE_BIT,
|
||||
(unsigned long *)&cmnd->SCp.Status));
|
||||
&bfad_priv(cmnd)->status));
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
|
||||
task_status = cmnd->SCp.Status >> 1;
|
||||
task_status = bfad_priv(cmnd)->status >> 1;
|
||||
if (task_status != BFI_TSKIM_STS_OK)
|
||||
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
|
||||
"target reset failure,"
|
||||
|
@ -797,6 +796,7 @@ struct scsi_host_template bfad_im_scsi_host_template = {
|
|||
.name = BFAD_DRIVER_NAME,
|
||||
.info = bfad_im_info,
|
||||
.queuecommand = bfad_im_queuecommand,
|
||||
.cmd_size = sizeof(struct bfad_cmd_priv),
|
||||
.eh_timed_out = fc_eh_timed_out,
|
||||
.eh_abort_handler = bfad_im_abort_handler,
|
||||
.eh_device_reset_handler = bfad_im_reset_lun_handler,
|
||||
|
@ -819,6 +819,7 @@ struct scsi_host_template bfad_im_vport_template = {
|
|||
.name = BFAD_DRIVER_NAME,
|
||||
.info = bfad_im_info,
|
||||
.queuecommand = bfad_im_queuecommand,
|
||||
.cmd_size = sizeof(struct bfad_cmd_priv),
|
||||
.eh_timed_out = fc_eh_timed_out,
|
||||
.eh_abort_handler = bfad_im_abort_handler,
|
||||
.eh_device_reset_handler = bfad_im_reset_lun_handler,
|
||||
|
|
|
@ -43,6 +43,22 @@ u32 bfad_im_supported_speeds(struct bfa_s *bfa);
|
|||
*/
|
||||
#define IO_DONE_BIT 0
|
||||
|
||||
/**
|
||||
* struct bfad_cmd_priv - private data per SCSI command.
|
||||
* @status: Lowest bit represents IO_DONE. The next seven bits hold a value of
|
||||
* type enum bfi_tskim_status.
|
||||
* @wq: Wait queue used to wait for completion of an operation.
|
||||
*/
|
||||
struct bfad_cmd_priv {
|
||||
unsigned long status;
|
||||
wait_queue_head_t *wq;
|
||||
};
|
||||
|
||||
static inline struct bfad_cmd_priv *bfad_priv(struct scsi_cmnd *cmd)
|
||||
{
|
||||
return scsi_cmd_priv(cmd);
|
||||
}
|
||||
|
||||
struct bfad_itnim_data_s {
|
||||
struct bfad_itnim_s *itnim;
|
||||
};
|
||||
|
|
|
@ -137,8 +137,6 @@
|
|||
#define BNX2FC_FW_TIMEOUT (3 * HZ)
|
||||
#define PORT_MAX 2
|
||||
|
||||
#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
|
||||
|
||||
/* FC FCP Status */
|
||||
#define FC_GOOD 0
|
||||
|
||||
|
@ -493,7 +491,14 @@ struct bnx2fc_unsol_els {
|
|||
struct work_struct unsol_els_work;
|
||||
};
|
||||
|
||||
struct bnx2fc_priv {
|
||||
struct bnx2fc_cmd *io_req;
|
||||
};
|
||||
|
||||
static inline struct bnx2fc_priv *bnx2fc_priv(struct scsi_cmnd *cmd)
|
||||
{
|
||||
return scsi_cmd_priv(cmd);
|
||||
}
|
||||
|
||||
struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt);
|
||||
struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
|
||||
|
|
|
@ -2718,14 +2718,13 @@ static int __init bnx2fc_mod_init(void)
|
|||
|
||||
bg = &bnx2fc_global;
|
||||
skb_queue_head_init(&bg->fcoe_rx_list);
|
||||
l2_thread = kthread_create(bnx2fc_l2_rcv_thread,
|
||||
(void *)bg,
|
||||
"bnx2fc_l2_thread");
|
||||
l2_thread = kthread_run(bnx2fc_l2_rcv_thread,
|
||||
(void *)bg,
|
||||
"bnx2fc_l2_thread");
|
||||
if (IS_ERR(l2_thread)) {
|
||||
rc = PTR_ERR(l2_thread);
|
||||
goto free_wq;
|
||||
}
|
||||
wake_up_process(l2_thread);
|
||||
spin_lock_bh(&bg->fcoe_rx_list.lock);
|
||||
bg->kthread = l2_thread;
|
||||
spin_unlock_bh(&bg->fcoe_rx_list.lock);
|
||||
|
@ -2975,6 +2974,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
|
|||
.track_queue_depth = 1,
|
||||
.slave_configure = bnx2fc_slave_configure,
|
||||
.shost_groups = bnx2fc_host_groups,
|
||||
.cmd_size = sizeof(struct bnx2fc_priv),
|
||||
};
|
||||
|
||||
static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
|
||||
|
|
|
@ -204,7 +204,7 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
|
|||
sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
|
||||
sc_cmd->allowed);
|
||||
scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
|
||||
sc_cmd->SCp.ptr = NULL;
|
||||
bnx2fc_priv(sc_cmd)->io_req = NULL;
|
||||
scsi_done(sc_cmd);
|
||||
}
|
||||
|
||||
|
@ -765,7 +765,7 @@ retry_tmf:
|
|||
task = &(task_page[index]);
|
||||
bnx2fc_init_mp_task(io_req, task);
|
||||
|
||||
sc_cmd->SCp.ptr = (char *)io_req;
|
||||
bnx2fc_priv(sc_cmd)->io_req = io_req;
|
||||
|
||||
/* Obtain free SQ entry */
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
|
@ -1147,7 +1147,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
|
|||
BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
|
||||
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
|
||||
io_req = bnx2fc_priv(sc_cmd)->io_req;
|
||||
if (!io_req) {
|
||||
/* Command might have just completed */
|
||||
printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
|
||||
|
@ -1572,8 +1572,8 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
|
|||
printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
|
||||
fc_hdr->fh_r_ctl);
|
||||
}
|
||||
if (!sc_cmd->SCp.ptr) {
|
||||
printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
|
||||
if (!bnx2fc_priv(sc_cmd)->io_req) {
|
||||
printk(KERN_ERR PFX "tm_compl: io_req is NULL\n");
|
||||
return;
|
||||
}
|
||||
switch (io_req->fcp_status) {
|
||||
|
@ -1609,7 +1609,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
|
|||
return;
|
||||
}
|
||||
|
||||
sc_cmd->SCp.ptr = NULL;
|
||||
bnx2fc_priv(sc_cmd)->io_req = NULL;
|
||||
scsi_done(sc_cmd);
|
||||
|
||||
kref_put(&io_req->refcount, bnx2fc_cmd_release);
|
||||
|
@ -1773,8 +1773,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
|
|||
io_req->fcp_resid = fcp_rsp->fcp_resid;
|
||||
|
||||
io_req->scsi_comp_flags = rsp_flags;
|
||||
CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
|
||||
fcp_rsp->scsi_status_code;
|
||||
io_req->cdb_status = fcp_rsp->scsi_status_code;
|
||||
|
||||
/* Fetch fcp_rsp_info and fcp_sns_info if available */
|
||||
if (num_rq) {
|
||||
|
@ -1946,8 +1945,8 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
|
|||
/* parse fcp_rsp and obtain sense data from RQ if available */
|
||||
bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq, rq_data);
|
||||
|
||||
if (!sc_cmd->SCp.ptr) {
|
||||
printk(KERN_ERR PFX "SCp.ptr is NULL\n");
|
||||
if (!bnx2fc_priv(sc_cmd)->io_req) {
|
||||
printk(KERN_ERR PFX "io_req is NULL\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2018,7 +2017,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
|
|||
io_req->fcp_status);
|
||||
break;
|
||||
}
|
||||
sc_cmd->SCp.ptr = NULL;
|
||||
bnx2fc_priv(sc_cmd)->io_req = NULL;
|
||||
scsi_done(sc_cmd);
|
||||
kref_put(&io_req->refcount, bnx2fc_cmd_release);
|
||||
}
|
||||
|
@ -2044,7 +2043,7 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
|
|||
io_req->port = port;
|
||||
io_req->tgt = tgt;
|
||||
io_req->data_xfer_len = scsi_bufflen(sc_cmd);
|
||||
sc_cmd->SCp.ptr = (char *)io_req;
|
||||
bnx2fc_priv(sc_cmd)->io_req = io_req;
|
||||
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
|
||||
|
|
|
@ -482,7 +482,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
|
|||
}
|
||||
|
||||
/*
|
||||
* Offlaod process is protected with hba mutex.
|
||||
* Offload process is protected with hba mutex.
|
||||
* Use the same mutex_lock for upload process too
|
||||
*/
|
||||
mutex_lock(&hba->hba_mutex);
|
||||
|
|
|
@ -2268,6 +2268,7 @@ static struct scsi_host_template bnx2i_host_template = {
|
|||
.sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
|
||||
.shost_groups = bnx2i_dev_groups,
|
||||
.track_queue_depth = 1,
|
||||
.cmd_size = sizeof(struct iscsi_cmd),
|
||||
};
|
||||
|
||||
struct iscsi_transport bnx2i_iscsi_transport = {
|
||||
|
|
|
@ -166,7 +166,7 @@ csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
|
|||
struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
|
||||
|
||||
/* Check for Task Management */
|
||||
if (likely(scmnd->SCp.Message == 0)) {
|
||||
if (likely(csio_priv(scmnd)->fc_tm_flags == 0)) {
|
||||
int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
|
||||
fcp_cmnd->fc_tm_flags = 0;
|
||||
fcp_cmnd->fc_cmdref = 0;
|
||||
|
@ -185,7 +185,7 @@ csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
|
|||
} else {
|
||||
memset(fcp_cmnd, 0, sizeof(*fcp_cmnd));
|
||||
int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
|
||||
fcp_cmnd->fc_tm_flags = (uint8_t)scmnd->SCp.Message;
|
||||
fcp_cmnd->fc_tm_flags = csio_priv(scmnd)->fc_tm_flags;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1855,7 +1855,7 @@ csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
|
|||
|
||||
/* Needed during abort */
|
||||
cmnd->host_scribble = (unsigned char *)ioreq;
|
||||
cmnd->SCp.Message = 0;
|
||||
csio_priv(cmnd)->fc_tm_flags = 0;
|
||||
|
||||
/* Kick off SCSI IO SM on the ioreq */
|
||||
spin_lock_irqsave(&hw->lock, flags);
|
||||
|
@ -2026,7 +2026,7 @@ csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
|
|||
req, req->wr_status);
|
||||
|
||||
/* Cache FW return status */
|
||||
cmnd->SCp.Status = req->wr_status;
|
||||
csio_priv(cmnd)->wr_status = req->wr_status;
|
||||
|
||||
/* Special handling based on FCP response */
|
||||
|
||||
|
@ -2049,7 +2049,7 @@ csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
|
|||
/* Modify return status if flags indicate success */
|
||||
if (flags & FCP_RSP_LEN_VAL)
|
||||
if (rsp_info->rsp_code == FCP_TMF_CMPL)
|
||||
cmnd->SCp.Status = FW_SUCCESS;
|
||||
csio_priv(cmnd)->wr_status = FW_SUCCESS;
|
||||
|
||||
csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code);
|
||||
}
|
||||
|
@ -2125,9 +2125,9 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
|
|||
|
||||
csio_scsi_cmnd(ioreq) = cmnd;
|
||||
cmnd->host_scribble = (unsigned char *)ioreq;
|
||||
cmnd->SCp.Status = 0;
|
||||
csio_priv(cmnd)->wr_status = 0;
|
||||
|
||||
cmnd->SCp.Message = FCP_TMF_LUN_RESET;
|
||||
csio_priv(cmnd)->fc_tm_flags = FCP_TMF_LUN_RESET;
|
||||
ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000;
|
||||
|
||||
/*
|
||||
|
@ -2178,9 +2178,10 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
|
|||
}
|
||||
|
||||
/* LUN reset returned, check cached status */
|
||||
if (cmnd->SCp.Status != FW_SUCCESS) {
|
||||
if (csio_priv(cmnd)->wr_status != FW_SUCCESS) {
|
||||
csio_err(hw, "LUN reset failed (%d:%llu), status: %d\n",
|
||||
cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status);
|
||||
cmnd->device->id, cmnd->device->lun,
|
||||
csio_priv(cmnd)->wr_status);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -2271,6 +2272,7 @@ struct scsi_host_template csio_fcoe_shost_template = {
|
|||
.name = CSIO_DRV_DESC,
|
||||
.proc_name = KBUILD_MODNAME,
|
||||
.queuecommand = csio_queuecommand,
|
||||
.cmd_size = sizeof(struct csio_cmd_priv),
|
||||
.eh_timed_out = fc_eh_timed_out,
|
||||
.eh_abort_handler = csio_eh_abort_handler,
|
||||
.eh_device_reset_handler = csio_eh_lun_reset_handler,
|
||||
|
|
|
@ -188,6 +188,16 @@ struct csio_scsi_level_data {
|
|||
uint64_t oslun;
|
||||
};
|
||||
|
||||
struct csio_cmd_priv {
|
||||
uint8_t fc_tm_flags; /* task management flags */
|
||||
uint16_t wr_status;
|
||||
};
|
||||
|
||||
static inline struct csio_cmd_priv *csio_priv(struct scsi_cmnd *cmd)
|
||||
{
|
||||
return scsi_cmd_priv(cmd);
|
||||
}
|
||||
|
||||
static inline struct csio_ioreq *
|
||||
csio_get_scsi_ioreq(struct csio_scsim *scm)
|
||||
{
|
||||
|
|
|
@ -98,6 +98,7 @@ static struct scsi_host_template cxgb3i_host_template = {
|
|||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.this_id = -1,
|
||||
.track_queue_depth = 1,
|
||||
.cmd_size = sizeof(struct iscsi_cmd),
|
||||
};
|
||||
|
||||
static struct iscsi_transport cxgb3i_iscsi_transport = {
|
||||
|
|
|
@ -116,6 +116,7 @@ static struct scsi_host_template cxgb4i_host_template = {
|
|||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.this_id = -1,
|
||||
.track_queue_depth = 1,
|
||||
.cmd_size = sizeof(struct iscsi_cmd),
|
||||
};
|
||||
|
||||
static struct iscsi_transport cxgb4i_iscsi_transport = {
|
||||
|
|
|
@ -430,8 +430,8 @@ static int write_same16(struct scsi_device *sdev,
|
|||
struct device *dev = &cfg->dev->dev;
|
||||
const u32 s = ilog2(sdev->sector_size) - 9;
|
||||
const u32 to = sdev->request_queue->rq_timeout;
|
||||
const u32 ws_limit = blk_queue_get_max_sectors(sdev->request_queue,
|
||||
REQ_OP_WRITE_SAME) >> s;
|
||||
const u32 ws_limit =
|
||||
sdev->request_queue->limits.max_write_zeroes_sectors >> s;
|
||||
|
||||
cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
|
||||
scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
|
||||
|
|
|
@ -3314,9 +3314,6 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
|
|||
|
||||
/* Here is the info for Doug Gilbert's sg3 ... */
|
||||
scsi_set_resid(cmd, srb->total_xfer_length);
|
||||
/* This may be interpreted by sb. or not ... */
|
||||
cmd->SCp.this_residual = srb->total_xfer_length;
|
||||
cmd->SCp.buffers_residual = 0;
|
||||
if (debug_enabled(DBG_KG)) {
|
||||
if (srb->total_xfer_length)
|
||||
dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> "
|
||||
|
|
|
@ -52,7 +52,7 @@ static struct scsi_host_template dmx3191d_driver_template = {
|
|||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = 2,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.cmd_size = NCR5380_CMD_SIZE,
|
||||
.cmd_size = sizeof(struct NCR5380_cmd),
|
||||
};
|
||||
|
||||
static int dmx3191d_probe_one(struct pci_dev *pdev,
|
||||
|
|
|
@ -4127,7 +4127,7 @@ sli_calc_max_qentries(struct sli4 *sli4)
|
|||
sli4->qinfo.count_mask[q]);
|
||||
}
|
||||
|
||||
/* single, continguous DMA allocations will be called for each queue
|
||||
/* single, contiguous DMA allocations will be called for each queue
|
||||
* of size (max_qentries * queue entry size); since these can be large,
|
||||
* check against the OS max DMA allocation size
|
||||
*/
|
||||
|
|
|
@ -2678,6 +2678,7 @@ struct scsi_host_template scsi_esp_template = {
|
|||
.sg_tablesize = SG_ALL,
|
||||
.max_sectors = 0xffff,
|
||||
.skip_settle_delay = 1,
|
||||
.cmd_size = sizeof(struct esp_cmd_priv),
|
||||
};
|
||||
EXPORT_SYMBOL(scsi_esp_template);
|
||||
|
||||
|
@ -2739,9 +2740,6 @@ static struct spi_function_template esp_transport_ops = {
|
|||
|
||||
static int __init esp_init(void)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(struct scsi_pointer) <
|
||||
sizeof(struct esp_cmd_priv));
|
||||
|
||||
esp_transport_template = spi_attach_transport(&esp_transport_ops);
|
||||
if (!esp_transport_template)
|
||||
return -ENODEV;
|
||||
|
|
|
@ -262,7 +262,8 @@ struct esp_cmd_priv {
|
|||
struct scatterlist *cur_sg;
|
||||
int tot_residue;
|
||||
};
|
||||
#define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp))
|
||||
|
||||
#define ESP_CMD_PRIV(cmd) ((struct esp_cmd_priv *)scsi_cmd_priv(cmd))
|
||||
|
||||
/* NOTE: this enum is ordered based on chip features! */
|
||||
enum esp_rev {
|
||||
|
|
|
@ -277,6 +277,7 @@ static struct scsi_host_template fcoe_shost_template = {
|
|||
.sg_tablesize = SG_ALL,
|
||||
.max_sectors = 0xffff,
|
||||
.track_queue_depth = 1,
|
||||
.cmd_size = sizeof(struct libfc_cmd_priv),
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -115,6 +115,11 @@ struct fdomain {
|
|||
struct work_struct work;
|
||||
};
|
||||
|
||||
static struct scsi_pointer *fdomain_scsi_pointer(struct scsi_cmnd *cmd)
|
||||
{
|
||||
return scsi_cmd_priv(cmd);
|
||||
}
|
||||
|
||||
static inline void fdomain_make_bus_idle(struct fdomain *fd)
|
||||
{
|
||||
outb(0, fd->base + REG_BCTL);
|
||||
|
@ -263,20 +268,21 @@ static void fdomain_work(struct work_struct *work)
|
|||
struct Scsi_Host *sh = container_of((void *)fd, struct Scsi_Host,
|
||||
hostdata);
|
||||
struct scsi_cmnd *cmd = fd->cur_cmd;
|
||||
struct scsi_pointer *scsi_pointer = fdomain_scsi_pointer(cmd);
|
||||
unsigned long flags;
|
||||
int status;
|
||||
int done = 0;
|
||||
|
||||
spin_lock_irqsave(sh->host_lock, flags);
|
||||
|
||||
if (cmd->SCp.phase & in_arbitration) {
|
||||
if (scsi_pointer->phase & in_arbitration) {
|
||||
status = inb(fd->base + REG_ASTAT);
|
||||
if (!(status & ASTAT_ARB)) {
|
||||
set_host_byte(cmd, DID_BUS_BUSY);
|
||||
fdomain_finish_cmd(fd);
|
||||
goto out;
|
||||
}
|
||||
cmd->SCp.phase = in_selection;
|
||||
scsi_pointer->phase = in_selection;
|
||||
|
||||
outb(ICTL_SEL | FIFO_COUNT, fd->base + REG_ICTL);
|
||||
outb(BCTL_BUSEN | BCTL_SEL, fd->base + REG_BCTL);
|
||||
|
@ -285,7 +291,7 @@ static void fdomain_work(struct work_struct *work)
|
|||
/* Stop arbitration and enable parity */
|
||||
outb(ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL);
|
||||
goto out;
|
||||
} else if (cmd->SCp.phase & in_selection) {
|
||||
} else if (scsi_pointer->phase & in_selection) {
|
||||
status = inb(fd->base + REG_BSTAT);
|
||||
if (!(status & BSTAT_BSY)) {
|
||||
/* Try again, for slow devices */
|
||||
|
@ -297,75 +303,75 @@ static void fdomain_work(struct work_struct *work)
|
|||
/* Stop arbitration and enable parity */
|
||||
outb(ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL);
|
||||
}
|
||||
cmd->SCp.phase = in_other;
|
||||
scsi_pointer->phase = in_other;
|
||||
outb(ICTL_FIFO | ICTL_REQ | FIFO_COUNT, fd->base + REG_ICTL);
|
||||
outb(BCTL_BUSEN, fd->base + REG_BCTL);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* cur_cmd->SCp.phase == in_other: this is the body of the routine */
|
||||
/* fdomain_scsi_pointer(cur_cmd)->phase == in_other: this is the body of the routine */
|
||||
status = inb(fd->base + REG_BSTAT);
|
||||
|
||||
if (status & BSTAT_REQ) {
|
||||
switch (status & (BSTAT_MSG | BSTAT_CMD | BSTAT_IO)) {
|
||||
case BSTAT_CMD: /* COMMAND OUT */
|
||||
outb(cmd->cmnd[cmd->SCp.sent_command++],
|
||||
outb(cmd->cmnd[scsi_pointer->sent_command++],
|
||||
fd->base + REG_SCSI_DATA);
|
||||
break;
|
||||
case 0: /* DATA OUT -- tmc18c50/tmc18c30 only */
|
||||
if (fd->chip != tmc1800 && !cmd->SCp.have_data_in) {
|
||||
cmd->SCp.have_data_in = -1;
|
||||
if (fd->chip != tmc1800 && !scsi_pointer->have_data_in) {
|
||||
scsi_pointer->have_data_in = -1;
|
||||
outb(ACTL_IRQEN | ACTL_FIFOWR | ACTL_FIFOEN |
|
||||
PARITY_MASK, fd->base + REG_ACTL);
|
||||
}
|
||||
break;
|
||||
case BSTAT_IO: /* DATA IN -- tmc18c50/tmc18c30 only */
|
||||
if (fd->chip != tmc1800 && !cmd->SCp.have_data_in) {
|
||||
cmd->SCp.have_data_in = 1;
|
||||
if (fd->chip != tmc1800 && !scsi_pointer->have_data_in) {
|
||||
scsi_pointer->have_data_in = 1;
|
||||
outb(ACTL_IRQEN | ACTL_FIFOEN | PARITY_MASK,
|
||||
fd->base + REG_ACTL);
|
||||
}
|
||||
break;
|
||||
case BSTAT_CMD | BSTAT_IO: /* STATUS IN */
|
||||
cmd->SCp.Status = inb(fd->base + REG_SCSI_DATA);
|
||||
scsi_pointer->Status = inb(fd->base + REG_SCSI_DATA);
|
||||
break;
|
||||
case BSTAT_MSG | BSTAT_CMD: /* MESSAGE OUT */
|
||||
outb(MESSAGE_REJECT, fd->base + REG_SCSI_DATA);
|
||||
break;
|
||||
case BSTAT_MSG | BSTAT_CMD | BSTAT_IO: /* MESSAGE IN */
|
||||
cmd->SCp.Message = inb(fd->base + REG_SCSI_DATA);
|
||||
if (cmd->SCp.Message == COMMAND_COMPLETE)
|
||||
scsi_pointer->Message = inb(fd->base + REG_SCSI_DATA);
|
||||
if (scsi_pointer->Message == COMMAND_COMPLETE)
|
||||
++done;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (fd->chip == tmc1800 && !cmd->SCp.have_data_in &&
|
||||
cmd->SCp.sent_command >= cmd->cmd_len) {
|
||||
if (fd->chip == tmc1800 && !scsi_pointer->have_data_in &&
|
||||
scsi_pointer->sent_command >= cmd->cmd_len) {
|
||||
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
|
||||
cmd->SCp.have_data_in = -1;
|
||||
scsi_pointer->have_data_in = -1;
|
||||
outb(ACTL_IRQEN | ACTL_FIFOWR | ACTL_FIFOEN |
|
||||
PARITY_MASK, fd->base + REG_ACTL);
|
||||
} else {
|
||||
cmd->SCp.have_data_in = 1;
|
||||
scsi_pointer->have_data_in = 1;
|
||||
outb(ACTL_IRQEN | ACTL_FIFOEN | PARITY_MASK,
|
||||
fd->base + REG_ACTL);
|
||||
}
|
||||
}
|
||||
|
||||
if (cmd->SCp.have_data_in == -1) /* DATA OUT */
|
||||
if (scsi_pointer->have_data_in == -1) /* DATA OUT */
|
||||
fdomain_write_data(cmd);
|
||||
|
||||
if (cmd->SCp.have_data_in == 1) /* DATA IN */
|
||||
if (scsi_pointer->have_data_in == 1) /* DATA IN */
|
||||
fdomain_read_data(cmd);
|
||||
|
||||
if (done) {
|
||||
set_status_byte(cmd, cmd->SCp.Status);
|
||||
set_status_byte(cmd, scsi_pointer->Status);
|
||||
set_host_byte(cmd, DID_OK);
|
||||
scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
|
||||
scsi_msg_to_host_byte(cmd, scsi_pointer->Message);
|
||||
fdomain_finish_cmd(fd);
|
||||
} else {
|
||||
if (cmd->SCp.phase & disconnect) {
|
||||
if (scsi_pointer->phase & disconnect) {
|
||||
outb(ICTL_FIFO | ICTL_SEL | ICTL_REQ | FIFO_COUNT,
|
||||
fd->base + REG_ICTL);
|
||||
outb(0, fd->base + REG_BCTL);
|
||||
|
@ -398,14 +404,15 @@ static irqreturn_t fdomain_irq(int irq, void *dev_id)
|
|||
|
||||
static int fdomain_queue(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = fdomain_scsi_pointer(cmd);
|
||||
struct fdomain *fd = shost_priv(cmd->device->host);
|
||||
unsigned long flags;
|
||||
|
||||
cmd->SCp.Status = 0;
|
||||
cmd->SCp.Message = 0;
|
||||
cmd->SCp.have_data_in = 0;
|
||||
cmd->SCp.sent_command = 0;
|
||||
cmd->SCp.phase = in_arbitration;
|
||||
scsi_pointer->Status = 0;
|
||||
scsi_pointer->Message = 0;
|
||||
scsi_pointer->have_data_in = 0;
|
||||
scsi_pointer->sent_command = 0;
|
||||
scsi_pointer->phase = in_arbitration;
|
||||
scsi_set_resid(cmd, scsi_bufflen(cmd));
|
||||
|
||||
spin_lock_irqsave(sh->host_lock, flags);
|
||||
|
@ -440,7 +447,7 @@ static int fdomain_abort(struct scsi_cmnd *cmd)
|
|||
spin_lock_irqsave(sh->host_lock, flags);
|
||||
|
||||
fdomain_make_bus_idle(fd);
|
||||
fd->cur_cmd->SCp.phase |= aborted;
|
||||
fdomain_scsi_pointer(fd->cur_cmd)->phase |= aborted;
|
||||
|
||||
/* Aborts are not done well. . . */
|
||||
set_host_byte(fd->cur_cmd, DID_ABORT);
|
||||
|
@ -501,6 +508,7 @@ static struct scsi_host_template fdomain_template = {
|
|||
.this_id = 7,
|
||||
.sg_tablesize = 64,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.cmd_size = sizeof(struct scsi_pointer),
|
||||
};
|
||||
|
||||
struct Scsi_Host *fdomain_create(int base, int irq, int this_id,
|
||||
|
|
|
@ -89,15 +89,28 @@
|
|||
#define FNIC_DEV_RST_ABTS_PENDING BIT(21)
|
||||
|
||||
/*
|
||||
* Usage of the scsi_cmnd scratchpad.
|
||||
* fnic private data per SCSI command.
|
||||
* These fields are locked by the hashed io_req_lock.
|
||||
*/
|
||||
#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
|
||||
#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase)
|
||||
#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message)
|
||||
#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
|
||||
#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command)
|
||||
#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status)
|
||||
struct fnic_cmd_priv {
|
||||
struct fnic_io_req *io_req;
|
||||
enum fnic_ioreq_state state;
|
||||
u32 flags;
|
||||
u16 abts_status;
|
||||
u16 lr_status;
|
||||
};
|
||||
|
||||
static inline struct fnic_cmd_priv *fnic_priv(struct scsi_cmnd *cmd)
|
||||
{
|
||||
return scsi_cmd_priv(cmd);
|
||||
}
|
||||
|
||||
static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct fnic_cmd_priv *fcmd = fnic_priv(cmd);
|
||||
|
||||
return ((u64)fcmd->flags << 32) | fcmd->state;
|
||||
}
|
||||
|
||||
#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
|
||||
|
||||
|
|
|
@ -124,6 +124,7 @@ static struct scsi_host_template fnic_host_template = {
|
|||
.max_sectors = 0xffff,
|
||||
.shost_groups = fnic_host_groups,
|
||||
.track_queue_depth = 1,
|
||||
.cmd_size = sizeof(struct fnic_cmd_priv),
|
||||
};
|
||||
|
||||
static void
|
||||
|
|
|
@ -497,8 +497,8 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
|
|||
* caller disabling them.
|
||||
*/
|
||||
spin_unlock(lp->host->host_lock);
|
||||
CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
|
||||
CMD_FLAGS(sc) = FNIC_NO_FLAGS;
|
||||
fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED;
|
||||
fnic_priv(sc)->flags = FNIC_NO_FLAGS;
|
||||
|
||||
/* Get a new io_req for this SCSI IO */
|
||||
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
|
||||
|
@ -513,7 +513,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
|
|||
sg_count = scsi_dma_map(sc);
|
||||
if (sg_count < 0) {
|
||||
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
|
||||
tag, sc, 0, sc->cmnd[0], sg_count, CMD_STATE(sc));
|
||||
tag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state);
|
||||
mempool_free(io_req, fnic->io_req_pool);
|
||||
goto out;
|
||||
}
|
||||
|
@ -558,9 +558,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
|
|||
io_lock_acquired = 1;
|
||||
io_req->port_id = rport->port_id;
|
||||
io_req->start_time = jiffies;
|
||||
CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
|
||||
CMD_SP(sc) = (char *)io_req;
|
||||
CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
|
||||
fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
|
||||
fnic_priv(sc)->io_req = io_req;
|
||||
fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED;
|
||||
|
||||
/* create copy wq desc and enqueue it */
|
||||
wq = &fnic->wq_copy[0];
|
||||
|
@ -571,11 +571,10 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
|
|||
* refetch the pointer under the lock.
|
||||
*/
|
||||
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
|
||||
tag, sc, 0, 0, 0,
|
||||
(((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
CMD_SP(sc) = NULL;
|
||||
CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
|
||||
tag, sc, 0, 0, 0, fnic_flags_and_state(sc));
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
fnic_priv(sc)->io_req = NULL;
|
||||
fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
if (io_req) {
|
||||
fnic_release_ioreq_buf(fnic, io_req, sc);
|
||||
|
@ -594,7 +593,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
|
|||
atomic64_read(&fnic_stats->io_stats.active_ios));
|
||||
|
||||
/* REVISIT: Use per IO lock in the final code */
|
||||
CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
|
||||
fnic_priv(sc)->flags |= FNIC_IO_ISSUED;
|
||||
}
|
||||
out:
|
||||
cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
|
||||
|
@ -603,8 +602,8 @@ out:
|
|||
sc->cmnd[5]);
|
||||
|
||||
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
|
||||
tag, sc, io_req, sg_count, cmd_trace,
|
||||
(((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
|
||||
tag, sc, io_req, sg_count, cmd_trace,
|
||||
fnic_flags_and_state(sc));
|
||||
|
||||
/* if only we issued IO, will we have the io lock */
|
||||
if (io_lock_acquired)
|
||||
|
@ -867,11 +866,11 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
|
|||
|
||||
io_lock = fnic_io_lock_hash(fnic, sc);
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
WARN_ON_ONCE(!io_req);
|
||||
if (!io_req) {
|
||||
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
|
||||
CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
|
||||
fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"icmnd_cmpl io_req is null - "
|
||||
|
@ -888,17 +887,17 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
|
|||
* if SCSI-ML has already issued abort on this command,
|
||||
* set completion of the IO. The abts path will clean it up
|
||||
*/
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
|
||||
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
|
||||
|
||||
/*
|
||||
* set the FNIC_IO_DONE so that this doesn't get
|
||||
* flagged as 'out of order' if it was not aborted
|
||||
*/
|
||||
CMD_FLAGS(sc) |= FNIC_IO_DONE;
|
||||
CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
|
||||
fnic_priv(sc)->flags |= FNIC_IO_DONE;
|
||||
fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
if(FCPIO_ABORTED == hdr_status)
|
||||
CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
|
||||
fnic_priv(sc)->flags |= FNIC_IO_ABORTED;
|
||||
|
||||
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
|
||||
"icmnd_cmpl abts pending "
|
||||
|
@ -912,7 +911,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
|
|||
}
|
||||
|
||||
/* Mark the IO as complete */
|
||||
CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
|
||||
fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
|
||||
|
||||
icmnd_cmpl = &desc->u.icmnd_cmpl;
|
||||
|
||||
|
@ -983,8 +982,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
|
|||
}
|
||||
|
||||
/* Break link with the SCSI command */
|
||||
CMD_SP(sc) = NULL;
|
||||
CMD_FLAGS(sc) |= FNIC_IO_DONE;
|
||||
fnic_priv(sc)->io_req = NULL;
|
||||
fnic_priv(sc)->flags |= FNIC_IO_DONE;
|
||||
|
||||
if (hdr_status != FCPIO_SUCCESS) {
|
||||
atomic64_inc(&fnic_stats->io_stats.io_failures);
|
||||
|
@ -1005,8 +1004,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
|
|||
((u64)icmnd_cmpl->_resvd0[1] << 56 |
|
||||
(u64)icmnd_cmpl->_resvd0[0] << 48 |
|
||||
jiffies_to_msecs(jiffies - start_time)),
|
||||
desc, cmd_trace,
|
||||
(((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
|
||||
desc, cmd_trace, fnic_flags_and_state(sc));
|
||||
|
||||
if (sc->sc_data_direction == DMA_FROM_DEVICE) {
|
||||
fnic->lport->host_stats.fcp_input_requests++;
|
||||
|
@ -1094,12 +1092,12 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
|
|||
}
|
||||
io_lock = fnic_io_lock_hash(fnic, sc);
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
WARN_ON_ONCE(!io_req);
|
||||
if (!io_req) {
|
||||
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
|
||||
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"itmf_cmpl io_req is null - "
|
||||
"hdr status = %s tag = 0x%x sc 0x%p\n",
|
||||
|
@ -1114,9 +1112,9 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
|
|||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"dev reset abts cmpl recd. id %x status %s\n",
|
||||
id, fnic_fcpio_status_to_str(hdr_status));
|
||||
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
|
||||
CMD_ABTS_STATUS(sc) = hdr_status;
|
||||
CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
|
||||
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
|
||||
fnic_priv(sc)->abts_status = hdr_status;
|
||||
fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
|
||||
if (io_req->abts_done)
|
||||
complete(io_req->abts_done);
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
@ -1126,7 +1124,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
|
|||
case FCPIO_SUCCESS:
|
||||
break;
|
||||
case FCPIO_TIMEOUT:
|
||||
if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
|
||||
if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
|
||||
atomic64_inc(&abts_stats->abort_fw_timeouts);
|
||||
else
|
||||
atomic64_inc(
|
||||
|
@ -1138,34 +1136,34 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
|
|||
(int)(id & FNIC_TAG_MASK));
|
||||
break;
|
||||
case FCPIO_IO_NOT_FOUND:
|
||||
if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
|
||||
if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
|
||||
atomic64_inc(&abts_stats->abort_io_not_found);
|
||||
else
|
||||
atomic64_inc(
|
||||
&term_stats->terminate_io_not_found);
|
||||
break;
|
||||
default:
|
||||
if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
|
||||
if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
|
||||
atomic64_inc(&abts_stats->abort_failures);
|
||||
else
|
||||
atomic64_inc(
|
||||
&term_stats->terminate_failures);
|
||||
break;
|
||||
}
|
||||
if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
|
||||
if (fnic_priv(sc)->state != FNIC_IOREQ_ABTS_PENDING) {
|
||||
/* This is a late completion. Ignore it */
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
|
||||
CMD_ABTS_STATUS(sc) = hdr_status;
|
||||
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
|
||||
fnic_priv(sc)->abts_status = hdr_status;
|
||||
|
||||
/* If the status is IO not found consider it as success */
|
||||
if (hdr_status == FCPIO_IO_NOT_FOUND)
|
||||
CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS;
|
||||
fnic_priv(sc)->abts_status = FCPIO_SUCCESS;
|
||||
|
||||
if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
|
||||
if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
|
||||
atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
|
||||
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
|
@ -1184,7 +1182,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
|
|||
} else {
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"abts cmpl, completing IO\n");
|
||||
CMD_SP(sc) = NULL;
|
||||
fnic_priv(sc)->io_req = NULL;
|
||||
sc->result = (DID_ERROR << 16);
|
||||
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
@ -1201,8 +1199,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
|
|||
(u64)sc->cmnd[2] << 24 |
|
||||
(u64)sc->cmnd[3] << 16 |
|
||||
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
|
||||
(((u64)CMD_FLAGS(sc) << 32) |
|
||||
CMD_STATE(sc)));
|
||||
fnic_flags_and_state(sc));
|
||||
scsi_done(sc);
|
||||
atomic64_dec(&fnic_stats->io_stats.active_ios);
|
||||
if (atomic64_read(&fnic->io_cmpl_skip))
|
||||
|
@ -1212,15 +1209,14 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
|
|||
}
|
||||
} else if (id & FNIC_TAG_DEV_RST) {
|
||||
/* Completion of device reset */
|
||||
CMD_LR_STATUS(sc) = hdr_status;
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
|
||||
fnic_priv(sc)->lr_status = hdr_status;
|
||||
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
|
||||
fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING;
|
||||
FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
|
||||
sc->device->host->host_no, id, sc,
|
||||
jiffies_to_msecs(jiffies - start_time),
|
||||
desc, 0,
|
||||
(((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
|
||||
desc, 0, fnic_flags_and_state(sc));
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"Terminate pending "
|
||||
"dev reset cmpl recd. id %d status %s\n",
|
||||
|
@ -1228,14 +1224,13 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
|
|||
fnic_fcpio_status_to_str(hdr_status));
|
||||
return;
|
||||
}
|
||||
if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
|
||||
if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) {
|
||||
/* Need to wait for terminate completion */
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
|
||||
sc->device->host->host_no, id, sc,
|
||||
jiffies_to_msecs(jiffies - start_time),
|
||||
desc, 0,
|
||||
(((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
|
||||
desc, 0, fnic_flags_and_state(sc));
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"dev reset cmpl recd after time out. "
|
||||
"id %d status %s\n",
|
||||
|
@ -1243,8 +1238,8 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
|
|||
fnic_fcpio_status_to_str(hdr_status));
|
||||
return;
|
||||
}
|
||||
CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
|
||||
CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
|
||||
fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
|
||||
fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"dev reset cmpl recd. id %d status %s\n",
|
||||
(int)(id & FNIC_TAG_MASK),
|
||||
|
@ -1256,7 +1251,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
|
|||
} else {
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"Unexpected itmf io state %s tag %x\n",
|
||||
fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
|
||||
fnic_ioreq_state_to_str(fnic_priv(sc)->state), id);
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -1369,21 +1364,21 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
|
|||
io_lock = fnic_io_lock_tag(fnic, tag);
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
|
||||
!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
|
||||
!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
|
||||
/*
|
||||
* We will be here only when FW completes reset
|
||||
* without sending completions for outstanding ios.
|
||||
*/
|
||||
CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
|
||||
fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
|
||||
if (io_req && io_req->dr_done)
|
||||
complete(io_req->dr_done);
|
||||
else if (io_req && io_req->abts_done)
|
||||
complete(io_req->abts_done);
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
return true;
|
||||
} else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
|
||||
} else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
return true;
|
||||
}
|
||||
|
@ -1392,7 +1387,7 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
|
|||
goto cleanup_scsi_cmd;
|
||||
}
|
||||
|
||||
CMD_SP(sc) = NULL;
|
||||
fnic_priv(sc)->io_req = NULL;
|
||||
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
||||
|
@ -1416,7 +1411,7 @@ cleanup_scsi_cmd:
|
|||
atomic64_inc(&fnic_stats->io_stats.io_completions);
|
||||
|
||||
/* Complete the command to SCSI */
|
||||
if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
|
||||
if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED))
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
|
||||
tag, sc);
|
||||
|
@ -1428,7 +1423,7 @@ cleanup_scsi_cmd:
|
|||
(u64)sc->cmnd[2] << 24 |
|
||||
(u64)sc->cmnd[3] << 16 |
|
||||
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
|
||||
(((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
|
||||
fnic_flags_and_state(sc));
|
||||
|
||||
scsi_done(sc);
|
||||
|
||||
|
@ -1467,7 +1462,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
|
|||
spin_lock_irqsave(io_lock, flags);
|
||||
|
||||
/* Get the IO context which this desc refers to */
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
|
||||
/* fnic interrupts are turned off by now */
|
||||
|
||||
|
@ -1476,7 +1471,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
|
|||
goto wq_copy_cleanup_scsi_cmd;
|
||||
}
|
||||
|
||||
CMD_SP(sc) = NULL;
|
||||
fnic_priv(sc)->io_req = NULL;
|
||||
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
||||
|
@ -1495,7 +1490,7 @@ wq_copy_cleanup_scsi_cmd:
|
|||
0, ((u64)sc->cmnd[0] << 32 |
|
||||
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
|
||||
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
|
||||
(((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
|
||||
fnic_flags_and_state(sc));
|
||||
|
||||
scsi_done(sc);
|
||||
}
|
||||
|
@ -1570,15 +1565,15 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data,
|
|||
io_lock = fnic_io_lock_tag(fnic, abt_tag);
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
|
||||
if (!io_req || io_req->port_id != iter_data->port_id) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
return true;
|
||||
}
|
||||
|
||||
if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
|
||||
(!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
|
||||
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
|
||||
!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) {
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
|
||||
sc);
|
||||
|
@ -1590,7 +1585,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data,
|
|||
* Found IO that is still pending with firmware and
|
||||
* belongs to rport that went away
|
||||
*/
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
|
||||
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
return true;
|
||||
}
|
||||
|
@ -1598,20 +1593,20 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data,
|
|||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"fnic_rport_exch_reset: io_req->abts_done is set "
|
||||
"state is %s\n",
|
||||
fnic_ioreq_state_to_str(CMD_STATE(sc)));
|
||||
fnic_ioreq_state_to_str(fnic_priv(sc)->state));
|
||||
}
|
||||
|
||||
if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
|
||||
if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) {
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"rport_exch_reset "
|
||||
"IO not yet issued %p tag 0x%x flags "
|
||||
"%x state %d\n",
|
||||
sc, abt_tag, CMD_FLAGS(sc), CMD_STATE(sc));
|
||||
sc, abt_tag, fnic_priv(sc)->flags, fnic_priv(sc)->state);
|
||||
}
|
||||
old_ioreq_state = CMD_STATE(sc);
|
||||
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
|
||||
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
|
||||
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
|
||||
old_ioreq_state = fnic_priv(sc)->state;
|
||||
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
|
||||
fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
|
||||
if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
|
||||
atomic64_inc(&reset_stats->device_reset_terminates);
|
||||
abt_tag |= FNIC_TAG_DEV_RST;
|
||||
}
|
||||
|
@ -1637,15 +1632,15 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data,
|
|||
* lun reset
|
||||
*/
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
|
||||
CMD_STATE(sc) = old_ioreq_state;
|
||||
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
|
||||
fnic_priv(sc)->state = old_ioreq_state;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
} else {
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
|
||||
CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
|
||||
if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
|
||||
fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
|
||||
else
|
||||
CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
|
||||
fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
atomic64_inc(&term_stats->terminates);
|
||||
iter_data->term_cnt++;
|
||||
|
@ -1753,9 +1748,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|||
FNIC_SCSI_DBG(KERN_DEBUG,
|
||||
fnic->lport->host,
|
||||
"Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
|
||||
rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
|
||||
rport->port_id, sc->device->lun, tag, fnic_priv(sc)->flags);
|
||||
|
||||
CMD_FLAGS(sc) = FNIC_NO_FLAGS;
|
||||
fnic_priv(sc)->flags = FNIC_NO_FLAGS;
|
||||
|
||||
if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
|
||||
ret = FAILED;
|
||||
|
@ -1772,11 +1767,11 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|||
* happened, the completion wont actually complete the command
|
||||
* and it will be considered as an aborted command
|
||||
*
|
||||
* The CMD_SP will not be cleared except while holding io_req_lock.
|
||||
* .io_req will not be cleared except while holding io_req_lock.
|
||||
*/
|
||||
io_lock = fnic_io_lock_hash(fnic, sc);
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
if (!io_req) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
goto fnic_abort_cmd_end;
|
||||
|
@ -1784,7 +1779,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|||
|
||||
io_req->abts_done = &tm_done;
|
||||
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
|
||||
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
goto wait_pending;
|
||||
}
|
||||
|
@ -1813,9 +1808,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|||
* the completion wont be done till mid-layer, since abort
|
||||
* has already started.
|
||||
*/
|
||||
old_ioreq_state = CMD_STATE(sc);
|
||||
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
|
||||
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
|
||||
old_ioreq_state = fnic_priv(sc)->state;
|
||||
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
|
||||
fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
|
||||
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
||||
|
@ -1837,9 +1832,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|||
if (fnic_queue_abort_io_req(fnic, tag, task_req, fc_lun.scsi_lun,
|
||||
io_req)) {
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
|
||||
CMD_STATE(sc) = old_ioreq_state;
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
|
||||
fnic_priv(sc)->state = old_ioreq_state;
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
if (io_req)
|
||||
io_req->abts_done = NULL;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
@ -1847,10 +1842,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|||
goto fnic_abort_cmd_end;
|
||||
}
|
||||
if (task_req == FCPIO_ITMF_ABT_TASK) {
|
||||
CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
|
||||
fnic_priv(sc)->flags |= FNIC_IO_ABTS_ISSUED;
|
||||
atomic64_inc(&fnic_stats->abts_stats.aborts);
|
||||
} else {
|
||||
CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
|
||||
fnic_priv(sc)->flags |= FNIC_IO_TERM_ISSUED;
|
||||
atomic64_inc(&fnic_stats->term_stats.terminates);
|
||||
}
|
||||
|
||||
|
@ -1868,32 +1863,32 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|||
/* Check the abort status */
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
if (!io_req) {
|
||||
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
|
||||
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
|
||||
ret = FAILED;
|
||||
goto fnic_abort_cmd_end;
|
||||
}
|
||||
io_req->abts_done = NULL;
|
||||
|
||||
/* fw did not complete abort, timed out */
|
||||
if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
|
||||
if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
if (task_req == FCPIO_ITMF_ABT_TASK) {
|
||||
atomic64_inc(&abts_stats->abort_drv_timeouts);
|
||||
} else {
|
||||
atomic64_inc(&term_stats->terminate_drv_timeouts);
|
||||
}
|
||||
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
|
||||
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_TIMED_OUT;
|
||||
ret = FAILED;
|
||||
goto fnic_abort_cmd_end;
|
||||
}
|
||||
|
||||
/* IO out of order */
|
||||
|
||||
if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
|
||||
if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"Issuing Host reset due to out of order IO\n");
|
||||
|
@ -1902,7 +1897,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|||
goto fnic_abort_cmd_end;
|
||||
}
|
||||
|
||||
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
|
||||
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
|
||||
|
||||
start_time = io_req->start_time;
|
||||
/*
|
||||
|
@ -1910,9 +1905,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|||
* free the io_req if successful. If abort fails,
|
||||
* Device reset will clean the I/O.
|
||||
*/
|
||||
if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS)
|
||||
CMD_SP(sc) = NULL;
|
||||
else {
|
||||
if (fnic_priv(sc)->abts_status == FCPIO_SUCCESS) {
|
||||
fnic_priv(sc)->io_req = NULL;
|
||||
} else {
|
||||
ret = FAILED;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
goto fnic_abort_cmd_end;
|
||||
|
@ -1938,7 +1933,7 @@ fnic_abort_cmd_end:
|
|||
0, ((u64)sc->cmnd[0] << 32 |
|
||||
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
|
||||
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
|
||||
(((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
|
||||
fnic_flags_and_state(sc));
|
||||
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"Returning from abort cmd type %x %s\n", task_req,
|
||||
|
@ -2029,7 +2024,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
|
|||
|
||||
io_lock = fnic_io_lock_tag(fnic, abt_tag);
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
if (!io_req) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
return true;
|
||||
|
@ -2041,14 +2036,14 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
|
|||
*/
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"Found IO in %s on lun\n",
|
||||
fnic_ioreq_state_to_str(CMD_STATE(sc)));
|
||||
fnic_ioreq_state_to_str(fnic_priv(sc)->state));
|
||||
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
|
||||
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
return true;
|
||||
}
|
||||
if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
|
||||
(!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
|
||||
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
|
||||
(!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) {
|
||||
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
|
||||
"%s dev rst not pending sc 0x%p\n", __func__,
|
||||
sc);
|
||||
|
@ -2059,8 +2054,8 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
|
|||
if (io_req->abts_done)
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"%s: io_req->abts_done is set state is %s\n",
|
||||
__func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
|
||||
old_ioreq_state = CMD_STATE(sc);
|
||||
__func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
|
||||
old_ioreq_state = fnic_priv(sc)->state;
|
||||
/*
|
||||
* Any pending IO issued prior to reset is expected to be
|
||||
* in abts pending state, if not we need to set
|
||||
|
@ -2068,17 +2063,17 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
|
|||
* When IO is completed, the IO will be handed over and
|
||||
* handled in this function.
|
||||
*/
|
||||
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
|
||||
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
|
||||
|
||||
BUG_ON(io_req->abts_done);
|
||||
|
||||
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
|
||||
if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
|
||||
abt_tag |= FNIC_TAG_DEV_RST;
|
||||
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
|
||||
"%s: dev rst sc 0x%p\n", __func__, sc);
|
||||
}
|
||||
|
||||
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
|
||||
fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
|
||||
io_req->abts_done = &tm_done;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
||||
|
@ -2089,48 +2084,48 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
|
|||
FCPIO_ITMF_ABT_TASK_TERM,
|
||||
fc_lun.scsi_lun, io_req)) {
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
if (io_req)
|
||||
io_req->abts_done = NULL;
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
|
||||
CMD_STATE(sc) = old_ioreq_state;
|
||||
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
|
||||
fnic_priv(sc)->state = old_ioreq_state;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
iter_data->ret = FAILED;
|
||||
return false;
|
||||
} else {
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
|
||||
CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
|
||||
if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
|
||||
fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
}
|
||||
CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
|
||||
fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
|
||||
|
||||
wait_for_completion_timeout(&tm_done, msecs_to_jiffies
|
||||
(fnic->config.ed_tov));
|
||||
|
||||
/* Recheck cmd state to check if it is now aborted */
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
if (!io_req) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
|
||||
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
io_req->abts_done = NULL;
|
||||
|
||||
/* if abort is still pending with fw, fail */
|
||||
if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
|
||||
if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
|
||||
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
|
||||
iter_data->ret = FAILED;
|
||||
return false;
|
||||
}
|
||||
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
|
||||
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
|
||||
|
||||
/* original sc used for lr is handled by dev reset code */
|
||||
if (sc != iter_data->lr_sc)
|
||||
CMD_SP(sc) = NULL;
|
||||
fnic_priv(sc)->io_req = NULL;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
||||
/* original sc used for lr is handled by dev reset code */
|
||||
|
@ -2271,7 +2266,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|||
goto fnic_device_reset_end;
|
||||
}
|
||||
|
||||
CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
|
||||
fnic_priv(sc)->flags = FNIC_DEVICE_RESET;
|
||||
/* Allocate tag if not present */
|
||||
|
||||
if (unlikely(tag < 0)) {
|
||||
|
@ -2287,7 +2282,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|||
}
|
||||
io_lock = fnic_io_lock_hash(fnic, sc);
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
|
||||
/*
|
||||
* If there is a io_req attached to this command, then use it,
|
||||
|
@ -2301,11 +2296,11 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|||
}
|
||||
memset(io_req, 0, sizeof(*io_req));
|
||||
io_req->port_id = rport->port_id;
|
||||
CMD_SP(sc) = (char *)io_req;
|
||||
fnic_priv(sc)->io_req = io_req;
|
||||
}
|
||||
io_req->dr_done = &tm_done;
|
||||
CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
|
||||
CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
|
||||
fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
|
||||
fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
|
||||
|
@ -2316,13 +2311,13 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|||
*/
|
||||
if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
if (io_req)
|
||||
io_req->dr_done = NULL;
|
||||
goto fnic_device_reset_clean;
|
||||
}
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
|
||||
fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
||||
/*
|
||||
|
@ -2333,7 +2328,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|||
msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
|
||||
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
if (!io_req) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
|
@ -2342,7 +2337,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|||
}
|
||||
io_req->dr_done = NULL;
|
||||
|
||||
status = CMD_LR_STATUS(sc);
|
||||
status = fnic_priv(sc)->lr_status;
|
||||
|
||||
/*
|
||||
* If lun reset not completed, bail out with failed. io_req
|
||||
|
@ -2352,7 +2347,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|||
atomic64_inc(&reset_stats->device_reset_timeouts);
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"Device reset timed out\n");
|
||||
CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
|
||||
fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
int_to_scsilun(sc->device->lun, &fc_lun);
|
||||
/*
|
||||
|
@ -2361,7 +2356,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|||
*/
|
||||
while (1) {
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
|
||||
if (fnic_priv(sc)->flags & FNIC_DEV_RST_TERM_ISSUED) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
break;
|
||||
}
|
||||
|
@ -2374,8 +2369,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|||
msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
|
||||
} else {
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
|
||||
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
|
||||
fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
|
||||
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
|
||||
io_req->abts_done = &tm_done;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
|
@ -2386,13 +2381,13 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|||
}
|
||||
while (1) {
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
|
||||
if (!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
wait_for_completion_timeout(&tm_done,
|
||||
msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
|
||||
break;
|
||||
} else {
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
io_req->abts_done = NULL;
|
||||
goto fnic_device_reset_clean;
|
||||
}
|
||||
|
@ -2407,7 +2402,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|||
FNIC_SCSI_DBG(KERN_DEBUG,
|
||||
fnic->lport->host,
|
||||
"Device reset completed - failed\n");
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
goto fnic_device_reset_clean;
|
||||
}
|
||||
|
||||
|
@ -2420,7 +2415,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|||
*/
|
||||
if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"Device reset failed"
|
||||
" since could not abort all IOs\n");
|
||||
|
@ -2429,14 +2424,14 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|||
|
||||
/* Clean lun reset command */
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
if (io_req)
|
||||
/* Completed, and successful */
|
||||
ret = SUCCESS;
|
||||
|
||||
fnic_device_reset_clean:
|
||||
if (io_req)
|
||||
CMD_SP(sc) = NULL;
|
||||
fnic_priv(sc)->io_req = NULL;
|
||||
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
||||
|
@ -2452,7 +2447,7 @@ fnic_device_reset_end:
|
|||
0, ((u64)sc->cmnd[0] << 32 |
|
||||
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
|
||||
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
|
||||
(((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
|
||||
fnic_flags_and_state(sc));
|
||||
|
||||
/* free tag if it is allocated */
|
||||
if (unlikely(tag_gen_flag))
|
||||
|
@ -2697,7 +2692,7 @@ static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data,
|
|||
io_lock = fnic_io_lock_hash(fnic, sc);
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
io_req = fnic_priv(sc)->io_req;
|
||||
if (!io_req) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
return true;
|
||||
|
@ -2709,8 +2704,8 @@ static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data,
|
|||
*/
|
||||
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
|
||||
"Found IO in %s on lun\n",
|
||||
fnic_ioreq_state_to_str(CMD_STATE(sc)));
|
||||
cmd_state = CMD_STATE(sc);
|
||||
fnic_ioreq_state_to_str(fnic_priv(sc)->state));
|
||||
cmd_state = fnic_priv(sc)->state;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
if (cmd_state == FNIC_IOREQ_ABTS_PENDING)
|
||||
iter_data->ret = 1;
|
||||
|
|
|
@ -663,7 +663,7 @@ static inline int generic_NCR5380_psend(struct NCR5380_hostdata *hostdata,
|
|||
static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata,
|
||||
struct scsi_cmnd *cmd)
|
||||
{
|
||||
int transfersize = cmd->SCp.this_residual;
|
||||
int transfersize = NCR5380_to_ncmd(cmd)->this_residual;
|
||||
|
||||
if (hostdata->flags & FLAG_NO_PSEUDO_DMA)
|
||||
return 0;
|
||||
|
@ -675,7 +675,7 @@ static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata,
|
|||
/* Limit PDMA send to 512 B to avoid random corruption on DTC3181E */
|
||||
if (hostdata->board == BOARD_DTC3181E &&
|
||||
cmd->sc_data_direction == DMA_TO_DEVICE)
|
||||
transfersize = min(cmd->SCp.this_residual, 512);
|
||||
transfersize = min(transfersize, 512);
|
||||
|
||||
return min(transfersize, DMA_MAX_SIZE);
|
||||
}
|
||||
|
@ -702,7 +702,7 @@ static struct scsi_host_template driver_template = {
|
|||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = 2,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.cmd_size = NCR5380_CMD_SIZE,
|
||||
.cmd_size = sizeof(struct NCR5380_cmd),
|
||||
.max_sectors = 128,
|
||||
};
|
||||
|
||||
|
|
|
@ -12,7 +12,11 @@
|
|||
#include <asm/amigaints.h>
|
||||
#include <asm/amigahw.h>
|
||||
|
||||
#include "scsi.h"
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include "wd33c93.h"
|
||||
#include "gvp11.h"
|
||||
|
||||
|
@ -49,18 +53,19 @@ void gvp11_setup(char *str, int *ints)
|
|||
|
||||
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
|
||||
struct Scsi_Host *instance = cmd->device->host;
|
||||
struct gvp11_hostdata *hdata = shost_priv(instance);
|
||||
struct WD33C93_hostdata *wh = &hdata->wh;
|
||||
struct gvp11_scsiregs *regs = hdata->regs;
|
||||
unsigned short cntr = GVP11_DMAC_INT_ENABLE;
|
||||
unsigned long addr = virt_to_bus(cmd->SCp.ptr);
|
||||
unsigned long addr = virt_to_bus(scsi_pointer->ptr);
|
||||
int bank_mask;
|
||||
static int scsi_alloc_out_of_range = 0;
|
||||
|
||||
/* use bounce buffer if the physical address is bad */
|
||||
if (addr & wh->dma_xfer_mask) {
|
||||
wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
|
||||
wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
|
||||
|
||||
if (!scsi_alloc_out_of_range) {
|
||||
wh->dma_bounce_buffer =
|
||||
|
@ -109,8 +114,8 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
|||
|
||||
if (!dir_in) {
|
||||
/* copy to bounce buffer for a write */
|
||||
memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
|
||||
cmd->SCp.this_residual);
|
||||
memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
|
||||
scsi_pointer->this_residual);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -126,10 +131,10 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
|||
|
||||
if (dir_in) {
|
||||
/* invalidate any cache */
|
||||
cache_clear(addr, cmd->SCp.this_residual);
|
||||
cache_clear(addr, scsi_pointer->this_residual);
|
||||
} else {
|
||||
/* push any dirty cache */
|
||||
cache_push(addr, cmd->SCp.this_residual);
|
||||
cache_push(addr, scsi_pointer->this_residual);
|
||||
}
|
||||
|
||||
bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0;
|
||||
|
@ -146,6 +151,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
|||
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
|
||||
int status)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt);
|
||||
struct gvp11_hostdata *hdata = shost_priv(instance);
|
||||
struct WD33C93_hostdata *wh = &hdata->wh;
|
||||
struct gvp11_scsiregs *regs = hdata->regs;
|
||||
|
@ -158,8 +164,8 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
|
|||
/* copy from a bounce buffer, if necessary */
|
||||
if (status && wh->dma_bounce_buffer) {
|
||||
if (wh->dma_dir && SCpnt)
|
||||
memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
|
||||
SCpnt->SCp.this_residual);
|
||||
memcpy(scsi_pointer->ptr, wh->dma_bounce_buffer,
|
||||
scsi_pointer->this_residual);
|
||||
|
||||
if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED)
|
||||
kfree(wh->dma_bounce_buffer);
|
||||
|
@ -185,6 +191,7 @@ static struct scsi_host_template gvp11_scsi_template = {
|
|||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = CMD_PER_LUN,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.cmd_size = sizeof(struct scsi_pointer),
|
||||
};
|
||||
|
||||
static int check_wd33c93(struct gvp11_scsiregs *regs)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue