Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-for-linus-2.6
This commit is contained in:
commit
35d91f75c2
|
@ -60,6 +60,8 @@ scsi.txt
|
|||
- short blurb on using SCSI support as a module.
|
||||
scsi_mid_low_api.txt
|
||||
- info on API between SCSI layer and low level drivers
|
||||
scsi_eh.txt
|
||||
- info on SCSI midlayer error handling infrastructure
|
||||
st.txt
|
||||
- info on scsi tape driver
|
||||
sym53c500_cs.txt
|
||||
|
|
|
@ -0,0 +1,479 @@
|
|||
|
||||
SCSI EH
|
||||
======================================
|
||||
|
||||
This document describes SCSI midlayer error handling infrastructure.
|
||||
Please refer to Documentation/scsi/scsi_mid_low_api.txt for more
|
||||
information regarding SCSI midlayer.
|
||||
|
||||
TABLE OF CONTENTS
|
||||
|
||||
[1] How SCSI commands travel through the midlayer and to EH
|
||||
[1-1] struct scsi_cmnd
|
||||
[1-2] How do scmd's get completed?
|
||||
[1-2-1] Completing a scmd w/ scsi_done
|
||||
[1-2-2] Completing a scmd w/ timeout
|
||||
[1-3] How EH takes over
|
||||
[2] How SCSI EH works
|
||||
[2-1] EH through fine-grained callbacks
|
||||
[2-1-1] Overview
|
||||
[2-1-2] Flow of scmds through EH
|
||||
[2-1-3] Flow of control
|
||||
[2-2] EH through hostt->eh_strategy_handler()
|
||||
[2-2-1] Pre hostt->eh_strategy_handler() SCSI midlayer conditions
|
||||
[2-2-2] Post hostt->eh_strategy_handler() SCSI midlayer conditions
|
||||
[2-2-3] Things to consider
|
||||
|
||||
|
||||
[1] How SCSI commands travel through the midlayer and to EH
|
||||
|
||||
[1-1] struct scsi_cmnd
|
||||
|
||||
Each SCSI command is represented with struct scsi_cmnd (== scmd). A
|
||||
scmd has two list_head's to link itself into lists. The two are
|
||||
scmd->list and scmd->eh_entry. The former is used for free list or
|
||||
per-device allocated scmd list and not of much interest to this EH
|
||||
discussion. The latter is used for completion and EH lists and unless
|
||||
otherwise stated scmds are always linked using scmd->eh_entry in this
|
||||
discussion.
|
||||
|
||||
|
||||
[1-2] How do scmd's get completed?
|
||||
|
||||
Once LLDD gets hold of a scmd, either the LLDD will complete the
|
||||
command by calling scsi_done callback passed from midlayer when
|
||||
invoking hostt->queuecommand() or SCSI midlayer will time it out.
|
||||
|
||||
|
||||
[1-2-1] Completing a scmd w/ scsi_done
|
||||
|
||||
For all non-EH commands, scsi_done() is the completion callback. It
|
||||
does the following.
|
||||
|
||||
1. Delete timeout timer. If it fails, it means that timeout timer
|
||||
has expired and is going to finish the command. Just return.
|
||||
|
||||
2. Link scmd to per-cpu scsi_done_q using scmd->en_entry
|
||||
|
||||
3. Raise SCSI_SOFTIRQ
|
||||
|
||||
SCSI_SOFTIRQ handler scsi_softirq calls scsi_decide_disposition() to
|
||||
determine what to do with the command. scsi_decide_disposition()
|
||||
looks at the scmd->result value and sense data to determine what to do
|
||||
with the command.
|
||||
|
||||
- SUCCESS
|
||||
scsi_finish_command() is invoked for the command. The
|
||||
function does some maintenance choirs and notify completion by
|
||||
calling scmd->done() callback, which, for fs requests, would
|
||||
be HLD completion callback - sd:sd_rw_intr, sr:rw_intr,
|
||||
st:st_intr.
|
||||
|
||||
- NEEDS_RETRY
|
||||
- ADD_TO_MLQUEUE
|
||||
scmd is requeued to blk queue.
|
||||
|
||||
- otherwise
|
||||
scsi_eh_scmd_add(scmd, 0) is invoked for the command. See
|
||||
[1-3] for details of this funciton.
|
||||
|
||||
|
||||
[1-2-2] Completing a scmd w/ timeout
|
||||
|
||||
The timeout handler is scsi_times_out(). When a timeout occurs, this
|
||||
function
|
||||
|
||||
1. invokes optional hostt->eh_timedout() callback. Return value can
|
||||
be one of
|
||||
|
||||
- EH_HANDLED
|
||||
This indicates that eh_timedout() dealt with the timeout. The
|
||||
scmd is passed to __scsi_done() and thus linked into per-cpu
|
||||
scsi_done_q. Normal command completion described in [1-2-1]
|
||||
follows.
|
||||
|
||||
- EH_RESET_TIMER
|
||||
This indicates that more time is required to finish the
|
||||
command. Timer is restarted. This action is counted as a
|
||||
retry and only allowed scmd->allowed + 1(!) times. Once the
|
||||
limit is reached, action for EH_NOT_HANDLED is taken instead.
|
||||
|
||||
*NOTE* This action is racy as the LLDD could finish the scmd
|
||||
after the timeout has expired but before it's added back. In
|
||||
such cases, scsi_done() would think that timeout has occurred
|
||||
and return without doing anything. We lose completion and the
|
||||
command will time out again.
|
||||
|
||||
- EH_NOT_HANDLED
|
||||
This is the same as when eh_timedout() callback doesn't exist.
|
||||
Step #2 is taken.
|
||||
|
||||
2. scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD) is invoked for the
|
||||
command. See [1-3] for more information.
|
||||
|
||||
|
||||
[1-3] How EH takes over
|
||||
|
||||
scmds enter EH via scsi_eh_scmd_add(), which does the following.
|
||||
|
||||
1. Turns on scmd->eh_eflags as requested. It's 0 for error
|
||||
completions and SCSI_EH_CANCEL_CMD for timeouts.
|
||||
|
||||
2. Links scmd->eh_entry to shost->eh_cmd_q
|
||||
|
||||
3. Sets SHOST_RECOVERY bit in shost->shost_state
|
||||
|
||||
4. Increments shost->host_failed
|
||||
|
||||
5. Wakes up SCSI EH thread if shost->host_busy == shost->host_failed
|
||||
|
||||
As can be seen above, once any scmd is added to shost->eh_cmd_q,
|
||||
SHOST_RECOVERY shost_state bit is turned on. This prevents any new
|
||||
scmd to be issued from blk queue to the host; eventually, all scmds on
|
||||
the host either complete normally, fail and get added to eh_cmd_q, or
|
||||
time out and get added to shost->eh_cmd_q.
|
||||
|
||||
If all scmds either complete or fail, the number of in-flight scmds
|
||||
becomes equal to the number of failed scmds - i.e. shost->host_busy ==
|
||||
shost->host_failed. This wakes up SCSI EH thread. So, once woken up,
|
||||
SCSI EH thread can expect that all in-flight commands have failed and
|
||||
are linked on shost->eh_cmd_q.
|
||||
|
||||
Note that this does not mean lower layers are quiescent. If a LLDD
|
||||
completed a scmd with error status, the LLDD and lower layers are
|
||||
assumed to forget about the scmd at that point. However, if a scmd
|
||||
has timed out, unless hostt->eh_timedout() made lower layers forget
|
||||
about the scmd, which currently no LLDD does, the command is still
|
||||
active as long as lower layers are concerned and completion could
|
||||
occur at any time. Of course, all such completions are ignored as the
|
||||
timer has already expired.
|
||||
|
||||
We'll talk about how SCSI EH takes actions to abort - make LLDD
|
||||
forget about - timed out scmds later.
|
||||
|
||||
|
||||
[2] How SCSI EH works
|
||||
|
||||
LLDD's can implement SCSI EH actions in one of the following two
|
||||
ways.
|
||||
|
||||
- Fine-grained EH callbacks
|
||||
LLDD can implement fine-grained EH callbacks and let SCSI
|
||||
midlayer drive error handling and call appropriate callbacks.
|
||||
This will be dicussed further in [2-1].
|
||||
|
||||
- eh_strategy_handler() callback
|
||||
This is one big callback which should perform whole error
|
||||
handling. As such, it should do all choirs SCSI midlayer
|
||||
performs during recovery. This will be discussed in [2-2].
|
||||
|
||||
Once recovery is complete, SCSI EH resumes normal operation by
|
||||
calling scsi_restart_operations(), which
|
||||
|
||||
1. Checks if door locking is needed and locks door.
|
||||
|
||||
2. Clears SHOST_RECOVERY shost_state bit
|
||||
|
||||
3. Wakes up waiters on shost->host_wait. This occurs if someone
|
||||
calls scsi_block_when_processing_errors() on the host.
|
||||
(*QUESTION* why is it needed? All operations will be blocked
|
||||
anyway after it reaches blk queue.)
|
||||
|
||||
4. Kicks queues in all devices on the host in the asses
|
||||
|
||||
|
||||
[2-1] EH through fine-grained callbacks
|
||||
|
||||
[2-1-1] Overview
|
||||
|
||||
If eh_strategy_handler() is not present, SCSI midlayer takes charge
|
||||
of driving error handling. EH's goals are two - make LLDD, host and
|
||||
device forget about timed out scmds and make them ready for new
|
||||
commands. A scmd is said to be recovered if the scmd is forgotten by
|
||||
lower layers and lower layers are ready to process or fail the scmd
|
||||
again.
|
||||
|
||||
To achieve these goals, EH performs recovery actions with increasing
|
||||
severity. Some actions are performed by issueing SCSI commands and
|
||||
others are performed by invoking one of the following fine-grained
|
||||
hostt EH callbacks. Callbacks may be omitted and omitted ones are
|
||||
considered to fail always.
|
||||
|
||||
int (* eh_abort_handler)(struct scsi_cmnd *);
|
||||
int (* eh_device_reset_handler)(struct scsi_cmnd *);
|
||||
int (* eh_bus_reset_handler)(struct scsi_cmnd *);
|
||||
int (* eh_host_reset_handler)(struct scsi_cmnd *);
|
||||
|
||||
Higher-severity actions are taken only when lower-severity actions
|
||||
cannot recover some of failed scmds. Also, note that failure of the
|
||||
highest-severity action means EH failure and results in offlining of
|
||||
all unrecovered devices.
|
||||
|
||||
During recovery, the following rules are followed
|
||||
|
||||
- Recovery actions are performed on failed scmds on the to do list,
|
||||
eh_work_q. If a recovery action succeeds for a scmd, recovered
|
||||
scmds are removed from eh_work_q.
|
||||
|
||||
Note that single recovery action on a scmd can recover multiple
|
||||
scmds. e.g. resetting a device recovers all failed scmds on the
|
||||
device.
|
||||
|
||||
- Higher severity actions are taken iff eh_work_q is not empty after
|
||||
lower severity actions are complete.
|
||||
|
||||
- EH reuses failed scmds to issue commands for recovery. For
|
||||
timed-out scmds, SCSI EH ensures that LLDD forgets about a scmd
|
||||
before reusing it for EH commands.
|
||||
|
||||
When a scmd is recovered, the scmd is moved from eh_work_q to EH
|
||||
local eh_done_q using scsi_eh_finish_cmd(). After all scmds are
|
||||
recovered (eh_work_q is empty), scsi_eh_flush_done_q() is invoked to
|
||||
either retry or error-finish (notify upper layer of failure) recovered
|
||||
scmds.
|
||||
|
||||
scmds are retried iff its sdev is still online (not offlined during
|
||||
EH), REQ_FAILFAST is not set and ++scmd->retries is less than
|
||||
scmd->allowed.
|
||||
|
||||
|
||||
[2-1-2] Flow of scmds through EH
|
||||
|
||||
1. Error completion / time out
|
||||
ACTION: scsi_eh_scmd_add() is invoked for scmd
|
||||
- set scmd->eh_eflags
|
||||
- add scmd to shost->eh_cmd_q
|
||||
- set SHOST_RECOVERY
|
||||
- shost->host_failed++
|
||||
LOCKING: shost->host_lock
|
||||
|
||||
2. EH starts
|
||||
ACTION: move all scmds to EH's local eh_work_q. shost->eh_cmd_q
|
||||
is cleared.
|
||||
LOCKING: shost->host_lock (not strictly necessary, just for
|
||||
consistency)
|
||||
|
||||
3. scmd recovered
|
||||
ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
|
||||
- shost->host_failed--
|
||||
- clear scmd->eh_eflags
|
||||
- scsi_setup_cmd_retry()
|
||||
- move from local eh_work_q to local eh_done_q
|
||||
LOCKING: none
|
||||
|
||||
4. EH completes
|
||||
ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
|
||||
layer of failure.
|
||||
- scmd is removed from eh_done_q and scmd->eh_entry is cleared
|
||||
- if retry is necessary, scmd is requeued using
|
||||
scsi_queue_insert()
|
||||
- otherwise, scsi_finish_command() is invoked for scmd
|
||||
LOCKING: queue or finish function performs appropriate locking
|
||||
|
||||
|
||||
[2-1-3] Flow of control
|
||||
|
||||
EH through fine-grained callbacks start from scsi_unjam_host().
|
||||
|
||||
<<scsi_unjam_host>>
|
||||
|
||||
1. Lock shost->host_lock, splice_init shost->eh_cmd_q into local
|
||||
eh_work_q and unlock host_lock. Note that shost->eh_cmd_q is
|
||||
cleared by this action.
|
||||
|
||||
2. Invoke scsi_eh_get_sense.
|
||||
|
||||
<<scsi_eh_get_sense>>
|
||||
|
||||
This action is taken for each error-completed
|
||||
(!SCSI_EH_CANCEL_CMD) commands without valid sense data. Most
|
||||
SCSI transports/LLDDs automatically acquire sense data on
|
||||
command failures (autosense). Autosense is recommended for
|
||||
performance reasons and as sense information could get out of
|
||||
sync inbetween occurrence of CHECK CONDITION and this action.
|
||||
|
||||
Note that if autosense is not supported, scmd->sense_buffer
|
||||
contains invalid sense data when error-completing the scmd
|
||||
with scsi_done(). scsi_decide_disposition() always returns
|
||||
FAILED in such cases thus invoking SCSI EH. When the scmd
|
||||
reaches here, sense data is acquired and
|
||||
scsi_decide_disposition() is called again.
|
||||
|
||||
1. Invoke scsi_request_sense() which issues REQUEST_SENSE
|
||||
command. If fails, no action. Note that taking no action
|
||||
causes higher-severity recovery to be taken for the scmd.
|
||||
|
||||
2. Invoke scsi_decide_disposition() on the scmd
|
||||
|
||||
- SUCCESS
|
||||
scmd->retries is set to scmd->allowed preventing
|
||||
scsi_eh_flush_done_q() from retrying the scmd and
|
||||
scsi_eh_finish_cmd() is invoked.
|
||||
|
||||
- NEEDS_RETRY
|
||||
scsi_eh_finish_cmd() invoked
|
||||
|
||||
- otherwise
|
||||
No action.
|
||||
|
||||
3. If !list_empty(&eh_work_q), invoke scsi_eh_abort_cmds().
|
||||
|
||||
<<scsi_eh_abort_cmds>>
|
||||
|
||||
This action is taken for each timed out command.
|
||||
hostt->eh_abort_handler() is invoked for each scmd. The
|
||||
handler returns SUCCESS if it has succeeded to make LLDD and
|
||||
all related hardware forget about the scmd.
|
||||
|
||||
If a timedout scmd is successfully aborted and the sdev is
|
||||
either offline or ready, scsi_eh_finish_cmd() is invoked for
|
||||
the scmd. Otherwise, the scmd is left in eh_work_q for
|
||||
higher-severity actions.
|
||||
|
||||
Note that both offline and ready status mean that the sdev is
|
||||
ready to process new scmds, where processing also implies
|
||||
immediate failing; thus, if a sdev is in one of the two
|
||||
states, no further recovery action is needed.
|
||||
|
||||
Device readiness is tested using scsi_eh_tur() which issues
|
||||
TEST_UNIT_READY command. Note that the scmd must have been
|
||||
aborted successfully before reusing it for TEST_UNIT_READY.
|
||||
|
||||
4. If !list_empty(&eh_work_q), invoke scsi_eh_ready_devs()
|
||||
|
||||
<<scsi_eh_ready_devs>>
|
||||
|
||||
This function takes four increasingly more severe measures to
|
||||
make failed sdevs ready for new commands.
|
||||
|
||||
1. Invoke scsi_eh_stu()
|
||||
|
||||
<<scsi_eh_stu>>
|
||||
|
||||
For each sdev which has failed scmds with valid sense data
|
||||
of which scsi_check_sense()'s verdict is FAILED,
|
||||
START_STOP_UNIT command is issued w/ start=1. Note that
|
||||
as we explicitly choose error-completed scmds, it is known
|
||||
that lower layers have forgotten about the scmd and we can
|
||||
reuse it for STU.
|
||||
|
||||
If STU succeeds and the sdev is either offline or ready,
|
||||
all failed scmds on the sdev are EH-finished with
|
||||
scsi_eh_finish_cmd().
|
||||
|
||||
*NOTE* If hostt->eh_abort_handler() isn't implemented or
|
||||
failed, we may still have timed out scmds at this point
|
||||
and STU doesn't make lower layers forget about those
|
||||
scmds. Yet, this function EH-finish all scmds on the sdev
|
||||
if STU succeeds leaving lower layers in an inconsistent
|
||||
state. It seems that STU action should be taken only when
|
||||
a sdev has no timed out scmd.
|
||||
|
||||
2. If !list_empty(&eh_work_q), invoke scsi_eh_bus_device_reset().
|
||||
|
||||
<<scsi_eh_bus_device_reset>>
|
||||
|
||||
This action is very similar to scsi_eh_stu() except that,
|
||||
instead of issuing STU, hostt->eh_device_reset_handler()
|
||||
is used. Also, as we're not issuing SCSI commands and
|
||||
resetting clears all scmds on the sdev, there is no need
|
||||
to choose error-completed scmds.
|
||||
|
||||
3. If !list_empty(&eh_work_q), invoke scsi_eh_bus_reset()
|
||||
|
||||
<<scsi_eh_bus_reset>>
|
||||
|
||||
hostt->eh_bus_reset_handler() is invoked for each channel
|
||||
with failed scmds. If bus reset succeeds, all failed
|
||||
scmds on all ready or offline sdevs on the channel are
|
||||
EH-finished.
|
||||
|
||||
4. If !list_empty(&eh_work_q), invoke scsi_eh_host_reset()
|
||||
|
||||
<<scsi_eh_host_reset>>
|
||||
|
||||
This is the last resort. hostt->eh_host_reset_handler()
|
||||
is invoked. If host reset succeeds, all failed scmds on
|
||||
all ready or offline sdevs on the host are EH-finished.
|
||||
|
||||
5. If !list_empty(&eh_work_q), invoke scsi_eh_offline_sdevs()
|
||||
|
||||
<<scsi_eh_offline_sdevs>>
|
||||
|
||||
Take all sdevs which still have unrecovered scmds offline
|
||||
and EH-finish the scmds.
|
||||
|
||||
5. Invoke scsi_eh_flush_done_q().
|
||||
|
||||
<<scsi_eh_flush_done_q>>
|
||||
|
||||
At this point all scmds are recovered (or given up) and
|
||||
put on eh_done_q by scsi_eh_finish_cmd(). This function
|
||||
flushes eh_done_q by either retrying or notifying upper
|
||||
layer of failure of the scmds.
|
||||
|
||||
|
||||
[2-2] EH through hostt->eh_strategy_handler()
|
||||
|
||||
hostt->eh_strategy_handler() is invoked in the place of
|
||||
scsi_unjam_host() and it is responsible for whole recovery process.
|
||||
On completion, the handler should have made lower layers forget about
|
||||
all failed scmds and either ready for new commands or offline. Also,
|
||||
it should perform SCSI EH maintenance choirs to maintain integrity of
|
||||
SCSI midlayer. IOW, of the steps described in [2-1-2], all steps
|
||||
except for #1 must be implemented by eh_strategy_handler().
|
||||
|
||||
|
||||
[2-2-1] Pre hostt->eh_strategy_handler() SCSI midlayer conditions
|
||||
|
||||
The following conditions are true on entry to the handler.
|
||||
|
||||
- Each failed scmd's eh_flags field is set appropriately.
|
||||
|
||||
- Each failed scmd is linked on scmd->eh_cmd_q by scmd->eh_entry.
|
||||
|
||||
- SHOST_RECOVERY is set.
|
||||
|
||||
- shost->host_failed == shost->host_busy
|
||||
|
||||
|
||||
[2-2-2] Post hostt->eh_strategy_handler() SCSI midlayer conditions
|
||||
|
||||
The following conditions must be true on exit from the handler.
|
||||
|
||||
- shost->host_failed is zero.
|
||||
|
||||
- Each scmd's eh_eflags field is cleared.
|
||||
|
||||
- Each scmd is in such a state that scsi_setup_cmd_retry() on the
|
||||
scmd doesn't make any difference.
|
||||
|
||||
- shost->eh_cmd_q is cleared.
|
||||
|
||||
- Each scmd->eh_entry is cleared.
|
||||
|
||||
- Either scsi_queue_insert() or scsi_finish_command() is called on
|
||||
each scmd. Note that the handler is free to use scmd->retries and
|
||||
->allowed to limit the number of retries.
|
||||
|
||||
|
||||
[2-2-3] Things to consider
|
||||
|
||||
- Know that timed out scmds are still active on lower layers. Make
|
||||
lower layers forget about them before doing anything else with
|
||||
those scmds.
|
||||
|
||||
- For consistency, when accessing/modifying shost data structure,
|
||||
grab shost->host_lock.
|
||||
|
||||
- On completion, each failed sdev must have forgotten about all
|
||||
active scmds.
|
||||
|
||||
- On completion, each failed sdev must be ready for new commands or
|
||||
offline.
|
||||
|
||||
|
||||
--
|
||||
Tejun Heo
|
||||
htejun@gmail.com
|
||||
11th September 2005
|
|
@ -123,6 +123,7 @@ static int verify_command(struct file *file, unsigned char *cmd)
|
|||
safe_for_read(READ_12),
|
||||
safe_for_read(READ_16),
|
||||
safe_for_read(READ_BUFFER),
|
||||
safe_for_read(READ_DEFECT_DATA),
|
||||
safe_for_read(READ_LONG),
|
||||
safe_for_read(INQUIRY),
|
||||
safe_for_read(MODE_SENSE),
|
||||
|
|
|
@ -790,7 +790,7 @@ static void sbp2_host_reset(struct hpsb_host *host)
|
|||
static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
|
||||
{
|
||||
struct sbp2scsi_host_info *hi = scsi_id->hi;
|
||||
struct scsi_device *sdev;
|
||||
int error;
|
||||
|
||||
SBP2_DEBUG("sbp2_start_device");
|
||||
|
||||
|
@ -939,10 +939,10 @@ alloc_fail:
|
|||
sbp2_max_speed_and_size(scsi_id);
|
||||
|
||||
/* Add this device to the scsi layer now */
|
||||
sdev = scsi_add_device(scsi_id->scsi_host, 0, scsi_id->ud->id, 0);
|
||||
if (IS_ERR(sdev)) {
|
||||
error = scsi_add_device(scsi_id->scsi_host, 0, scsi_id->ud->id, 0);
|
||||
if (error) {
|
||||
SBP2_ERR("scsi_add_device failed");
|
||||
return PTR_ERR(sdev);
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -59,6 +59,7 @@
|
|||
Fix 'handled=1' ISR usage, remove bogus IRQ check.
|
||||
Remove un-needed eh_abort handler.
|
||||
Add support for embedded firmware error strings.
|
||||
2.26.02.003 - Correctly handle single sgl's with use_sg=1.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
@ -81,7 +82,7 @@
|
|||
#include "3w-9xxx.h"
|
||||
|
||||
/* Globals */
|
||||
#define TW_DRIVER_VERSION "2.26.02.002"
|
||||
#define TW_DRIVER_VERSION "2.26.02.003"
|
||||
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
|
||||
static unsigned int twa_device_extension_count;
|
||||
static int twa_major = -1;
|
||||
|
@ -1805,6 +1806,8 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
|
|||
if (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH) {
|
||||
command_packet->sg_list[0].address = tw_dev->generic_buffer_phys[request_id];
|
||||
command_packet->sg_list[0].length = TW_MIN_SGL_LENGTH;
|
||||
if (tw_dev->srb[request_id]->sc_data_direction == DMA_TO_DEVICE || tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL)
|
||||
memcpy(tw_dev->generic_buffer_virt[request_id], tw_dev->srb[request_id]->request_buffer, tw_dev->srb[request_id]->request_bufflen);
|
||||
} else {
|
||||
buffaddr = twa_map_scsi_single_data(tw_dev, request_id);
|
||||
if (buffaddr == 0)
|
||||
|
@ -1823,6 +1826,12 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
|
|||
|
||||
if (tw_dev->srb[request_id]->use_sg > 0) {
|
||||
if ((tw_dev->srb[request_id]->use_sg == 1) && (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH)) {
|
||||
if (tw_dev->srb[request_id]->sc_data_direction == DMA_TO_DEVICE || tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL) {
|
||||
struct scatterlist *sg = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
|
||||
char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
|
||||
memcpy(tw_dev->generic_buffer_virt[request_id], buf, sg->length);
|
||||
kunmap_atomic(buf - sg->offset, KM_IRQ0);
|
||||
}
|
||||
command_packet->sg_list[0].address = tw_dev->generic_buffer_phys[request_id];
|
||||
command_packet->sg_list[0].length = TW_MIN_SGL_LENGTH;
|
||||
} else {
|
||||
|
@ -1888,11 +1897,20 @@ out:
|
|||
/* This function completes an execute scsi operation */
|
||||
static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
|
||||
{
|
||||
/* Copy the response if too small */
|
||||
if ((tw_dev->srb[request_id]->request_buffer) && (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH)) {
|
||||
memcpy(tw_dev->srb[request_id]->request_buffer,
|
||||
tw_dev->generic_buffer_virt[request_id],
|
||||
tw_dev->srb[request_id]->request_bufflen);
|
||||
if (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH &&
|
||||
(tw_dev->srb[request_id]->sc_data_direction == DMA_FROM_DEVICE ||
|
||||
tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL)) {
|
||||
if (tw_dev->srb[request_id]->use_sg == 0) {
|
||||
memcpy(tw_dev->srb[request_id]->request_buffer,
|
||||
tw_dev->generic_buffer_virt[request_id],
|
||||
tw_dev->srb[request_id]->request_bufflen);
|
||||
}
|
||||
if (tw_dev->srb[request_id]->use_sg == 1) {
|
||||
struct scatterlist *sg = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
|
||||
char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
|
||||
memcpy(buf, tw_dev->generic_buffer_virt[request_id], sg->length);
|
||||
kunmap_atomic(buf - sg->offset, KM_IRQ0);
|
||||
}
|
||||
}
|
||||
} /* End twa_scsiop_execute_scsi_complete() */
|
||||
|
||||
|
|
|
@ -235,6 +235,13 @@ config SCSI_ISCSI_ATTRS
|
|||
each attached iSCSI device to sysfs, say Y.
|
||||
Otherwise, say N.
|
||||
|
||||
config SCSI_SAS_ATTRS
|
||||
tristate "SAS Transport Attributes"
|
||||
depends on SCSI
|
||||
help
|
||||
If you wish to export transport-specific information about
|
||||
each attached SAS device to sysfs, say Y.
|
||||
|
||||
endmenu
|
||||
|
||||
menu "SCSI low-level drivers"
|
||||
|
|
|
@ -31,6 +31,7 @@ obj-$(CONFIG_RAID_ATTRS) += raid_class.o
|
|||
obj-$(CONFIG_SCSI_SPI_ATTRS) += scsi_transport_spi.o
|
||||
obj-$(CONFIG_SCSI_FC_ATTRS) += scsi_transport_fc.o
|
||||
obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o
|
||||
obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o
|
||||
|
||||
obj-$(CONFIG_SCSI_AMIGA7XX) += amiga7xx.o 53c7xx.o
|
||||
obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o
|
||||
|
|
|
@ -966,21 +966,21 @@ static void
|
|||
lpfc_get_host_fabric_name (struct Scsi_Host *shost)
|
||||
{
|
||||
struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
|
||||
u64 nodename;
|
||||
u64 node_name;
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
|
||||
if ((phba->fc_flag & FC_FABRIC) ||
|
||||
((phba->fc_topology == TOPOLOGY_LOOP) &&
|
||||
(phba->fc_flag & FC_PUBLIC_LOOP)))
|
||||
memcpy(&nodename, &phba->fc_fabparam.nodeName, sizeof(u64));
|
||||
node_name = wwn_to_u64(phba->fc_fabparam.nodeName.wwn);
|
||||
else
|
||||
/* fabric is local port if there is no F/FL_Port */
|
||||
memcpy(&nodename, &phba->fc_nodename, sizeof(u64));
|
||||
node_name = wwn_to_u64(phba->fc_nodename.wwn);
|
||||
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
fc_host_fabric_name(shost) = be64_to_cpu(nodename);
|
||||
fc_host_fabric_name(shost) = node_name;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1103,21 +1103,20 @@ lpfc_get_starget_node_name(struct scsi_target *starget)
|
|||
{
|
||||
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
|
||||
struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
|
||||
uint64_t node_name = 0;
|
||||
u64 node_name = 0;
|
||||
struct lpfc_nodelist *ndlp = NULL;
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
/* Search the mapped list for this target ID */
|
||||
list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
|
||||
if (starget->id == ndlp->nlp_sid) {
|
||||
memcpy(&node_name, &ndlp->nlp_nodename,
|
||||
sizeof(struct lpfc_name));
|
||||
node_name = wwn_to_u64(ndlp->nlp_nodename.wwn);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
fc_starget_node_name(starget) = be64_to_cpu(node_name);
|
||||
fc_starget_node_name(starget) = node_name;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1125,21 +1124,20 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
|
|||
{
|
||||
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
|
||||
struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
|
||||
uint64_t port_name = 0;
|
||||
u64 port_name = 0;
|
||||
struct lpfc_nodelist *ndlp = NULL;
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
/* Search the mapped list for this target ID */
|
||||
list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
|
||||
if (starget->id == ndlp->nlp_sid) {
|
||||
memcpy(&port_name, &ndlp->nlp_portname,
|
||||
sizeof(struct lpfc_name));
|
||||
port_name = wwn_to_u64(ndlp->nlp_portname.wwn);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
fc_starget_port_name(starget) = be64_to_cpu(port_name);
|
||||
fc_starget_port_name(starget) = port_name;
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -1017,13 +1017,10 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
|
|||
struct fc_rport *rport;
|
||||
struct lpfc_rport_data *rdata;
|
||||
struct fc_rport_identifiers rport_ids;
|
||||
uint64_t wwn;
|
||||
|
||||
/* Remote port has reappeared. Re-register w/ FC transport */
|
||||
memcpy(&wwn, &ndlp->nlp_nodename, sizeof(uint64_t));
|
||||
rport_ids.node_name = be64_to_cpu(wwn);
|
||||
memcpy(&wwn, &ndlp->nlp_portname, sizeof(uint64_t));
|
||||
rport_ids.port_name = be64_to_cpu(wwn);
|
||||
rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.wwn);
|
||||
rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.wwn);
|
||||
rport_ids.port_id = ndlp->nlp_DID;
|
||||
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
|
||||
if (ndlp->nlp_type & NLP_FCP_TARGET)
|
||||
|
|
|
@ -262,12 +262,14 @@ struct lpfc_sli_ct_request {
|
|||
#define FF_FRAME_SIZE 2048
|
||||
|
||||
struct lpfc_name {
|
||||
union {
|
||||
struct {
|
||||
#ifdef __BIG_ENDIAN_BITFIELD
|
||||
uint8_t nameType:4; /* FC Word 0, bit 28:31 */
|
||||
uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */
|
||||
uint8_t nameType:4; /* FC Word 0, bit 28:31 */
|
||||
uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */
|
||||
#else /* __LITTLE_ENDIAN_BITFIELD */
|
||||
uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */
|
||||
uint8_t nameType:4; /* FC Word 0, bit 28:31 */
|
||||
uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */
|
||||
uint8_t nameType:4; /* FC Word 0, bit 28:31 */
|
||||
#endif
|
||||
|
||||
#define NAME_IEEE 0x1 /* IEEE name - nameType */
|
||||
|
@ -276,8 +278,11 @@ struct lpfc_name {
|
|||
#define NAME_IP_TYPE 0x4 /* IP address */
|
||||
#define NAME_CCITT_TYPE 0xC
|
||||
#define NAME_CCITT_GR_TYPE 0xE
|
||||
uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */
|
||||
uint8_t IEEE[6]; /* FC IEEE address */
|
||||
uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */
|
||||
uint8_t IEEE[6]; /* FC IEEE address */
|
||||
};
|
||||
uint8_t wwn[8];
|
||||
};
|
||||
};
|
||||
|
||||
struct csp {
|
||||
|
|
|
@ -1333,7 +1333,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|||
unsigned long bar0map_len, bar2map_len;
|
||||
int error = -ENODEV, retval;
|
||||
int i;
|
||||
u64 wwname;
|
||||
|
||||
if (pci_enable_device(pdev))
|
||||
goto out;
|
||||
|
@ -1524,10 +1523,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|||
* Must done after lpfc_sli_hba_setup()
|
||||
*/
|
||||
|
||||
memcpy(&wwname, &phba->fc_nodename, sizeof(u64));
|
||||
fc_host_node_name(host) = be64_to_cpu(wwname);
|
||||
memcpy(&wwname, &phba->fc_portname, sizeof(u64));
|
||||
fc_host_port_name(host) = be64_to_cpu(wwname);
|
||||
fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.wwn);
|
||||
fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.wwn);
|
||||
fc_host_supported_classes(host) = FC_COS_CLASS3;
|
||||
|
||||
memset(fc_host_supported_fc4s(host), 0,
|
||||
|
|
|
@ -360,16 +360,16 @@ qla2x00_get_starget_node_name(struct scsi_target *starget)
|
|||
struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
|
||||
scsi_qla_host_t *ha = to_qla_host(host);
|
||||
fc_port_t *fcport;
|
||||
uint64_t node_name = 0;
|
||||
u64 node_name = 0;
|
||||
|
||||
list_for_each_entry(fcport, &ha->fcports, list) {
|
||||
if (starget->id == fcport->os_target_id) {
|
||||
node_name = *(uint64_t *)fcport->node_name;
|
||||
node_name = wwn_to_u64(fcport->node_name);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
fc_starget_node_name(starget) = be64_to_cpu(node_name);
|
||||
fc_starget_node_name(starget) = node_name;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -378,16 +378,16 @@ qla2x00_get_starget_port_name(struct scsi_target *starget)
|
|||
struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
|
||||
scsi_qla_host_t *ha = to_qla_host(host);
|
||||
fc_port_t *fcport;
|
||||
uint64_t port_name = 0;
|
||||
u64 port_name = 0;
|
||||
|
||||
list_for_each_entry(fcport, &ha->fcports, list) {
|
||||
if (starget->id == fcport->os_target_id) {
|
||||
port_name = *(uint64_t *)fcport->port_name;
|
||||
port_name = wwn_to_u64(fcport->port_name);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
fc_starget_port_name(starget) = be64_to_cpu(port_name);
|
||||
fc_starget_port_name(starget) = port_name;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -460,9 +460,7 @@ struct fc_function_template qla2xxx_transport_functions = {
|
|||
void
|
||||
qla2x00_init_host_attr(scsi_qla_host_t *ha)
|
||||
{
|
||||
fc_host_node_name(ha->host) =
|
||||
be64_to_cpu(*(uint64_t *)ha->init_cb->node_name);
|
||||
fc_host_port_name(ha->host) =
|
||||
be64_to_cpu(*(uint64_t *)ha->init_cb->port_name);
|
||||
fc_host_node_name(ha->host) = wwn_to_u64(ha->init_cb->node_name);
|
||||
fc_host_port_name(ha->host) = wwn_to_u64(ha->init_cb->port_name);
|
||||
fc_host_supported_classes(ha->host) = FC_COS_CLASS3;
|
||||
}
|
||||
|
|
|
@ -2066,8 +2066,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
|
|||
return;
|
||||
}
|
||||
|
||||
rport_ids.node_name = be64_to_cpu(*(uint64_t *)fcport->node_name);
|
||||
rport_ids.port_name = be64_to_cpu(*(uint64_t *)fcport->port_name);
|
||||
rport_ids.node_name = wwn_to_u64(fcport->node_name);
|
||||
rport_ids.port_name = wwn_to_u64(fcport->port_name);
|
||||
rport_ids.port_id = fcport->d_id.b.domain << 16 |
|
||||
fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
|
||||
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
|
||||
|
|
|
@ -97,6 +97,30 @@ int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
|
|||
}
|
||||
|
||||
static void scsi_run_queue(struct request_queue *q);
|
||||
static void scsi_release_buffers(struct scsi_cmnd *cmd);
|
||||
|
||||
/*
|
||||
* Function: scsi_unprep_request()
|
||||
*
|
||||
* Purpose: Remove all preparation done for a request, including its
|
||||
* associated scsi_cmnd, so that it can be requeued.
|
||||
*
|
||||
* Arguments: req - request to unprepare
|
||||
*
|
||||
* Lock status: Assumed that no locks are held upon entry.
|
||||
*
|
||||
* Returns: Nothing.
|
||||
*/
|
||||
static void scsi_unprep_request(struct request *req)
|
||||
{
|
||||
struct scsi_cmnd *cmd = req->special;
|
||||
|
||||
req->flags &= ~REQ_DONTPREP;
|
||||
req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
|
||||
|
||||
scsi_release_buffers(cmd);
|
||||
scsi_put_command(cmd);
|
||||
}
|
||||
|
||||
/*
|
||||
* Function: scsi_queue_insert()
|
||||
|
@ -116,12 +140,14 @@ static void scsi_run_queue(struct request_queue *q);
|
|||
* commands.
|
||||
* Notes: This could be called either from an interrupt context or a
|
||||
* normal process context.
|
||||
* Notes: Upon return, cmd is a stale pointer.
|
||||
*/
|
||||
int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
|
||||
{
|
||||
struct Scsi_Host *host = cmd->device->host;
|
||||
struct scsi_device *device = cmd->device;
|
||||
struct request_queue *q = device->request_queue;
|
||||
struct request *req = cmd->request;
|
||||
unsigned long flags;
|
||||
|
||||
SCSI_LOG_MLQUEUE(1,
|
||||
|
@ -162,8 +188,9 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
|
|||
* function. The SCSI request function detects the blocked condition
|
||||
* and plugs the queue appropriately.
|
||||
*/
|
||||
scsi_unprep_request(req);
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
blk_requeue_request(q, cmd->request);
|
||||
blk_requeue_request(q, req);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
scsi_run_queue(q);
|
||||
|
@ -339,7 +366,7 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
|
|||
int result;
|
||||
|
||||
if (sshdr) {
|
||||
sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
|
||||
sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
|
||||
if (!sense)
|
||||
return DRIVER_ERROR << 24;
|
||||
memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
|
||||
|
@ -552,15 +579,16 @@ static void scsi_run_queue(struct request_queue *q)
|
|||
* I/O errors in the middle of the request, in which case
|
||||
* we need to request the blocks that come after the bad
|
||||
* sector.
|
||||
* Notes: Upon return, cmd is a stale pointer.
|
||||
*/
|
||||
static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct request *req = cmd->request;
|
||||
unsigned long flags;
|
||||
|
||||
cmd->request->flags &= ~REQ_DONTPREP;
|
||||
|
||||
scsi_unprep_request(req);
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
blk_requeue_request(q, cmd->request);
|
||||
blk_requeue_request(q, req);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
scsi_run_queue(q);
|
||||
|
@ -595,13 +623,14 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
|
|||
*
|
||||
* Lock status: Assumed that lock is not held upon entry.
|
||||
*
|
||||
* Returns: cmd if requeue done or required, NULL otherwise
|
||||
* Returns: cmd if requeue required, NULL otherwise.
|
||||
*
|
||||
* Notes: This is called for block device requests in order to
|
||||
* mark some number of sectors as complete.
|
||||
*
|
||||
* We are guaranteeing that the request queue will be goosed
|
||||
* at some point during this call.
|
||||
* Notes: If cmd was requeued, upon return it will be a stale pointer.
|
||||
*/
|
||||
static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
|
||||
int bytes, int requeue)
|
||||
|
@ -624,14 +653,15 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
|
|||
if (!uptodate && blk_noretry_request(req))
|
||||
end_that_request_chunk(req, 0, leftover);
|
||||
else {
|
||||
if (requeue)
|
||||
if (requeue) {
|
||||
/*
|
||||
* Bleah. Leftovers again. Stick the
|
||||
* leftovers in the front of the
|
||||
* queue, and goose the queue again.
|
||||
*/
|
||||
scsi_requeue_command(q, cmd);
|
||||
|
||||
cmd = NULL;
|
||||
}
|
||||
return cmd;
|
||||
}
|
||||
}
|
||||
|
@ -857,15 +887,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
|
|||
* requeueing right here - we will requeue down below
|
||||
* when we handle the bad sectors.
|
||||
*/
|
||||
cmd = scsi_end_request(cmd, 1, good_bytes, result == 0);
|
||||
|
||||
/*
|
||||
* If the command completed without error, then either finish off the
|
||||
* rest of the command, or start a new one.
|
||||
* If the command completed without error, then either
|
||||
* finish off the rest of the command, or start a new one.
|
||||
*/
|
||||
if (result == 0 || cmd == NULL ) {
|
||||
if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
|
||||
return;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Now, if we were good little boys and girls, Santa left us a request
|
||||
|
@ -880,7 +908,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
|
|||
* and quietly refuse further access.
|
||||
*/
|
||||
cmd->device->changed = 1;
|
||||
cmd = scsi_end_request(cmd, 0,
|
||||
scsi_end_request(cmd, 0,
|
||||
this_count, 1);
|
||||
return;
|
||||
} else {
|
||||
|
@ -914,7 +942,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
|
|||
scsi_requeue_command(q, cmd);
|
||||
result = 0;
|
||||
} else {
|
||||
cmd = scsi_end_request(cmd, 0, this_count, 1);
|
||||
scsi_end_request(cmd, 0, this_count, 1);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
|
@ -931,7 +959,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
|
|||
dev_printk(KERN_INFO,
|
||||
&cmd->device->sdev_gendev,
|
||||
"Device not ready.\n");
|
||||
cmd = scsi_end_request(cmd, 0, this_count, 1);
|
||||
scsi_end_request(cmd, 0, this_count, 1);
|
||||
return;
|
||||
case VOLUME_OVERFLOW:
|
||||
if (!(req->flags & REQ_QUIET)) {
|
||||
|
@ -941,7 +969,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
|
|||
__scsi_print_command(cmd->data_cmnd);
|
||||
scsi_print_sense("", cmd);
|
||||
}
|
||||
cmd = scsi_end_request(cmd, 0, block_bytes, 1);
|
||||
scsi_end_request(cmd, 0, block_bytes, 1);
|
||||
return;
|
||||
default:
|
||||
break;
|
||||
|
@ -972,7 +1000,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
|
|||
block_bytes = req->hard_cur_sectors << 9;
|
||||
if (!block_bytes)
|
||||
block_bytes = req->data_len;
|
||||
cmd = scsi_end_request(cmd, 0, block_bytes, 1);
|
||||
scsi_end_request(cmd, 0, block_bytes, 1);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_io_completion);
|
||||
|
@ -1118,7 +1146,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
|
|||
if (unlikely(!scsi_device_online(sdev))) {
|
||||
printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
|
||||
sdev->host->host_no, sdev->id, sdev->lun);
|
||||
return BLKPREP_KILL;
|
||||
goto kill;
|
||||
}
|
||||
if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
|
||||
/* OK, we're not in a running state don't prep
|
||||
|
@ -1128,7 +1156,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
|
|||
* at all allowed down */
|
||||
printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
|
||||
sdev->host->host_no, sdev->id, sdev->lun);
|
||||
return BLKPREP_KILL;
|
||||
goto kill;
|
||||
}
|
||||
/* OK, we only allow special commands (i.e. not
|
||||
* user initiated ones */
|
||||
|
@ -1160,11 +1188,11 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
|
|||
if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
|
||||
if(specials_only == SDEV_QUIESCE ||
|
||||
specials_only == SDEV_BLOCK)
|
||||
return BLKPREP_DEFER;
|
||||
goto defer;
|
||||
|
||||
printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
|
||||
sdev->host->host_no, sdev->id, sdev->lun);
|
||||
return BLKPREP_KILL;
|
||||
goto kill;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1182,7 +1210,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
|
|||
cmd->tag = req->tag;
|
||||
} else {
|
||||
blk_dump_rq_flags(req, "SCSI bad req");
|
||||
return BLKPREP_KILL;
|
||||
goto kill;
|
||||
}
|
||||
|
||||
/* note the overloading of req->special. When the tag
|
||||
|
@ -1220,8 +1248,13 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
|
|||
* required).
|
||||
*/
|
||||
ret = scsi_init_io(cmd);
|
||||
if (ret) /* BLKPREP_KILL return also releases the command */
|
||||
return ret;
|
||||
switch(ret) {
|
||||
case BLKPREP_KILL:
|
||||
/* BLKPREP_KILL return also releases the command */
|
||||
goto kill;
|
||||
case BLKPREP_DEFER:
|
||||
goto defer;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the actual SCSI command for this request.
|
||||
|
@ -1231,7 +1264,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
|
|||
if (unlikely(!drv->init_command(cmd))) {
|
||||
scsi_release_buffers(cmd);
|
||||
scsi_put_command(cmd);
|
||||
return BLKPREP_KILL;
|
||||
goto kill;
|
||||
}
|
||||
} else {
|
||||
memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
|
||||
|
@ -1262,6 +1295,9 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
|
|||
if (sdev->device_busy == 0)
|
||||
blk_plug_device(q);
|
||||
return BLKPREP_DEFER;
|
||||
kill:
|
||||
req->errors = DID_NO_CONNECT << 16;
|
||||
return BLKPREP_KILL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1336,19 +1372,24 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
|
|||
}
|
||||
|
||||
/*
|
||||
* Kill requests for a dead device
|
||||
* Kill a request for a dead device
|
||||
*/
|
||||
static void scsi_kill_requests(request_queue_t *q)
|
||||
static void scsi_kill_request(struct request *req, request_queue_t *q)
|
||||
{
|
||||
struct request *req;
|
||||
struct scsi_cmnd *cmd = req->special;
|
||||
|
||||
while ((req = elv_next_request(q)) != NULL) {
|
||||
blkdev_dequeue_request(req);
|
||||
req->flags |= REQ_QUIET;
|
||||
while (end_that_request_first(req, 0, req->nr_sectors))
|
||||
;
|
||||
end_that_request_last(req);
|
||||
blkdev_dequeue_request(req);
|
||||
|
||||
if (unlikely(cmd == NULL)) {
|
||||
printk(KERN_CRIT "impossible request in %s.\n",
|
||||
__FUNCTION__);
|
||||
BUG();
|
||||
}
|
||||
|
||||
scsi_init_cmd_errh(cmd);
|
||||
cmd->result = DID_NO_CONNECT << 16;
|
||||
atomic_inc(&cmd->device->iorequest_cnt);
|
||||
__scsi_done(cmd);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1371,7 +1412,8 @@ static void scsi_request_fn(struct request_queue *q)
|
|||
|
||||
if (!sdev) {
|
||||
printk("scsi: killing requests for dead queue\n");
|
||||
scsi_kill_requests(q);
|
||||
while ((req = elv_next_request(q)) != NULL)
|
||||
scsi_kill_request(req, q);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1398,11 +1440,7 @@ static void scsi_request_fn(struct request_queue *q)
|
|||
if (unlikely(!scsi_device_online(sdev))) {
|
||||
printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
|
||||
sdev->host->host_no, sdev->id, sdev->lun);
|
||||
blkdev_dequeue_request(req);
|
||||
req->flags |= REQ_QUIET;
|
||||
while (end_that_request_first(req, 0, req->nr_sectors))
|
||||
;
|
||||
end_that_request_last(req);
|
||||
scsi_kill_request(req, q);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1415,6 +1453,14 @@ static void scsi_request_fn(struct request_queue *q)
|
|||
sdev->device_busy++;
|
||||
|
||||
spin_unlock(q->queue_lock);
|
||||
cmd = req->special;
|
||||
if (unlikely(cmd == NULL)) {
|
||||
printk(KERN_CRIT "impossible request in %s.\n"
|
||||
"please mail a stack trace to "
|
||||
"linux-scsi@vger.kernel.org",
|
||||
__FUNCTION__);
|
||||
BUG();
|
||||
}
|
||||
spin_lock(shost->host_lock);
|
||||
|
||||
if (!scsi_host_queue_ready(q, shost, sdev))
|
||||
|
@ -1433,15 +1479,6 @@ static void scsi_request_fn(struct request_queue *q)
|
|||
*/
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
cmd = req->special;
|
||||
if (unlikely(cmd == NULL)) {
|
||||
printk(KERN_CRIT "impossible request in %s.\n"
|
||||
"please mail a stack trace to "
|
||||
"linux-scsi@vger.kernel.org",
|
||||
__FUNCTION__);
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Finally, initialize any error handling parameters, and set up
|
||||
* the timers for timeouts.
|
||||
|
@ -1477,6 +1514,7 @@ static void scsi_request_fn(struct request_queue *q)
|
|||
* cases (host limits or settings) should run the queue at some
|
||||
* later time.
|
||||
*/
|
||||
scsi_unprep_request(req);
|
||||
spin_lock_irq(q->queue_lock);
|
||||
blk_requeue_request(q, req);
|
||||
sdev->device_busy--;
|
||||
|
|
|
@ -124,6 +124,7 @@ extern void scsi_sysfs_unregister(void);
|
|||
extern void scsi_sysfs_device_initialize(struct scsi_device *);
|
||||
extern int scsi_sysfs_target_initialize(struct scsi_device *);
|
||||
extern struct scsi_transport_template blank_transport_template;
|
||||
extern void __scsi_remove_device(struct scsi_device *);
|
||||
|
||||
extern struct bus_type scsi_bus_type;
|
||||
|
||||
|
|
|
@ -870,8 +870,12 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
|
|||
out_free_sdev:
|
||||
if (res == SCSI_SCAN_LUN_PRESENT) {
|
||||
if (sdevp) {
|
||||
scsi_device_get(sdev);
|
||||
*sdevp = sdev;
|
||||
if (scsi_device_get(sdev) == 0) {
|
||||
*sdevp = sdev;
|
||||
} else {
|
||||
__scsi_remove_device(sdev);
|
||||
res = SCSI_SCAN_NO_RESPONSE;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (sdev->host->hostt->slave_destroy)
|
||||
|
@ -1260,6 +1264,19 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
|
|||
}
|
||||
EXPORT_SYMBOL(__scsi_add_device);
|
||||
|
||||
int scsi_add_device(struct Scsi_Host *host, uint channel,
|
||||
uint target, uint lun)
|
||||
{
|
||||
struct scsi_device *sdev =
|
||||
__scsi_add_device(host, channel, target, lun, NULL);
|
||||
if (IS_ERR(sdev))
|
||||
return PTR_ERR(sdev);
|
||||
|
||||
scsi_device_put(sdev);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_add_device);
|
||||
|
||||
void scsi_rescan_device(struct device *dev)
|
||||
{
|
||||
struct scsi_driver *drv;
|
||||
|
@ -1276,27 +1293,8 @@ void scsi_rescan_device(struct device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL(scsi_rescan_device);
|
||||
|
||||
/**
|
||||
* scsi_scan_target - scan a target id, possibly including all LUNs on the
|
||||
* target.
|
||||
* @sdevsca: Scsi_Device handle for scanning
|
||||
* @shost: host to scan
|
||||
* @channel: channel to scan
|
||||
* @id: target id to scan
|
||||
*
|
||||
* Description:
|
||||
* Scan the target id on @shost, @channel, and @id. Scan at least LUN
|
||||
* 0, and possibly all LUNs on the target id.
|
||||
*
|
||||
* Use the pre-allocated @sdevscan as a handle for the scanning. This
|
||||
* function sets sdevscan->host, sdevscan->id and sdevscan->lun; the
|
||||
* scanning functions modify sdevscan->lun.
|
||||
*
|
||||
* First try a REPORT LUN scan, if that does not scan the target, do a
|
||||
* sequential scan of LUNs on the target id.
|
||||
**/
|
||||
void scsi_scan_target(struct device *parent, unsigned int channel,
|
||||
unsigned int id, unsigned int lun, int rescan)
|
||||
static void __scsi_scan_target(struct device *parent, unsigned int channel,
|
||||
unsigned int id, unsigned int lun, int rescan)
|
||||
{
|
||||
struct Scsi_Host *shost = dev_to_shost(parent);
|
||||
int bflags = 0;
|
||||
|
@ -1310,9 +1308,7 @@ void scsi_scan_target(struct device *parent, unsigned int channel,
|
|||
*/
|
||||
return;
|
||||
|
||||
|
||||
starget = scsi_alloc_target(parent, channel, id);
|
||||
|
||||
if (!starget)
|
||||
return;
|
||||
|
||||
|
@ -1358,6 +1354,33 @@ void scsi_scan_target(struct device *parent, unsigned int channel,
|
|||
|
||||
put_device(&starget->dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_scan_target - scan a target id, possibly including all LUNs on the
|
||||
* target.
|
||||
* @parent: host to scan
|
||||
* @channel: channel to scan
|
||||
* @id: target id to scan
|
||||
* @lun: Specific LUN to scan or SCAN_WILD_CARD
|
||||
* @rescan: passed to LUN scanning routines
|
||||
*
|
||||
* Description:
|
||||
* Scan the target id on @parent, @channel, and @id. Scan at least LUN 0,
|
||||
* and possibly all LUNs on the target id.
|
||||
*
|
||||
* First try a REPORT LUN scan, if that does not scan the target, do a
|
||||
* sequential scan of LUNs on the target id.
|
||||
**/
|
||||
void scsi_scan_target(struct device *parent, unsigned int channel,
|
||||
unsigned int id, unsigned int lun, int rescan)
|
||||
{
|
||||
struct Scsi_Host *shost = dev_to_shost(parent);
|
||||
|
||||
down(&shost->scan_mutex);
|
||||
if (scsi_host_scan_allowed(shost))
|
||||
__scsi_scan_target(parent, channel, id, lun, rescan);
|
||||
up(&shost->scan_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_scan_target);
|
||||
|
||||
static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
|
||||
|
@ -1383,10 +1406,12 @@ static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
|
|||
order_id = shost->max_id - id - 1;
|
||||
else
|
||||
order_id = id;
|
||||
scsi_scan_target(&shost->shost_gendev, channel, order_id, lun, rescan);
|
||||
__scsi_scan_target(&shost->shost_gendev, channel,
|
||||
order_id, lun, rescan);
|
||||
}
|
||||
else
|
||||
scsi_scan_target(&shost->shost_gendev, channel, id, lun, rescan);
|
||||
__scsi_scan_target(&shost->shost_gendev, channel,
|
||||
id, lun, rescan);
|
||||
}
|
||||
|
||||
int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
|
||||
|
@ -1484,12 +1509,15 @@ void scsi_forget_host(struct Scsi_Host *shost)
|
|||
*/
|
||||
struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
|
||||
{
|
||||
struct scsi_device *sdev;
|
||||
struct scsi_device *sdev = NULL;
|
||||
struct scsi_target *starget;
|
||||
|
||||
down(&shost->scan_mutex);
|
||||
if (!scsi_host_scan_allowed(shost))
|
||||
goto out;
|
||||
starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id);
|
||||
if (!starget)
|
||||
return NULL;
|
||||
goto out;
|
||||
|
||||
sdev = scsi_alloc_sdev(starget, 0, NULL);
|
||||
if (sdev) {
|
||||
|
@ -1497,6 +1525,8 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
|
|||
sdev->borken = 0;
|
||||
}
|
||||
put_device(&starget->dev);
|
||||
out:
|
||||
up(&shost->scan_mutex);
|
||||
return sdev;
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_get_host_dev);
|
||||
|
|
|
@ -653,7 +653,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
|
|||
error = attr_add(&sdev->sdev_gendev,
|
||||
sdev->host->hostt->sdev_attrs[i]);
|
||||
if (error) {
|
||||
scsi_remove_device(sdev);
|
||||
__scsi_remove_device(sdev);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -667,7 +667,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
|
|||
scsi_sysfs_sdev_attrs[i]);
|
||||
error = device_create_file(&sdev->sdev_gendev, attr);
|
||||
if (error) {
|
||||
scsi_remove_device(sdev);
|
||||
__scsi_remove_device(sdev);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -687,17 +687,10 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
|
|||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_remove_device - unregister a device from the scsi bus
|
||||
* @sdev: scsi_device to unregister
|
||||
**/
|
||||
void scsi_remove_device(struct scsi_device *sdev)
|
||||
void __scsi_remove_device(struct scsi_device *sdev)
|
||||
{
|
||||
struct Scsi_Host *shost = sdev->host;
|
||||
|
||||
down(&shost->scan_mutex);
|
||||
if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
class_device_unregister(&sdev->sdev_classdev);
|
||||
device_del(&sdev->sdev_gendev);
|
||||
|
@ -706,8 +699,17 @@ void scsi_remove_device(struct scsi_device *sdev)
|
|||
sdev->host->hostt->slave_destroy(sdev);
|
||||
transport_unregister_device(&sdev->sdev_gendev);
|
||||
put_device(&sdev->sdev_gendev);
|
||||
out:
|
||||
up(&shost->scan_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_remove_device - unregister a device from the scsi bus
|
||||
* @sdev: scsi_device to unregister
|
||||
**/
|
||||
void scsi_remove_device(struct scsi_device *sdev)
|
||||
{
|
||||
down(&sdev->host->scan_mutex);
|
||||
__scsi_remove_device(sdev);
|
||||
up(&sdev->host->scan_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_remove_device);
|
||||
|
||||
|
|
|
@ -0,0 +1,820 @@
|
|||
/*
|
||||
* Copyright (C) 2005 Dell Inc.
|
||||
* Released under GPL v2.
|
||||
*
|
||||
* Serial Attached SCSI (SAS) transport class.
|
||||
*
|
||||
* The SAS transport class contains common code to deal with SAS HBAs,
|
||||
* an aproximated representation of SAS topologies in the driver model,
|
||||
* and various sysfs attributes to expose these topologies and managment
|
||||
* interfaces to userspace.
|
||||
*
|
||||
* In addition to the basic SCSI core objects this transport class
|
||||
* introduces two additional intermediate objects: The SAS PHY
|
||||
* as represented by struct sas_phy defines an "outgoing" PHY on
|
||||
* a SAS HBA or Expander, and the SAS remote PHY represented by
|
||||
* struct sas_rphy defines an "incoming" PHY on a SAS Expander or
|
||||
* end device. Note that this is purely a software concept, the
|
||||
* underlying hardware for a PHY and a remote PHY is the exactly
|
||||
* the same.
|
||||
*
|
||||
* There is no concept of a SAS port in this code, users can see
|
||||
* what PHYs form a wide port based on the port_identifier attribute,
|
||||
* which is the same for all PHYs in a port.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_transport.h>
|
||||
#include <scsi/scsi_transport_sas.h>
|
||||
|
||||
|
||||
#define SAS_HOST_ATTRS 0
|
||||
#define SAS_PORT_ATTRS 11
|
||||
#define SAS_RPORT_ATTRS 5
|
||||
|
||||
struct sas_internal {
|
||||
struct scsi_transport_template t;
|
||||
struct sas_function_template *f;
|
||||
|
||||
struct class_device_attribute private_host_attrs[SAS_HOST_ATTRS];
|
||||
struct class_device_attribute private_phy_attrs[SAS_PORT_ATTRS];
|
||||
struct class_device_attribute private_rphy_attrs[SAS_RPORT_ATTRS];
|
||||
|
||||
struct transport_container phy_attr_cont;
|
||||
struct transport_container rphy_attr_cont;
|
||||
|
||||
/*
|
||||
* The array of null terminated pointers to attributes
|
||||
* needed by scsi_sysfs.c
|
||||
*/
|
||||
struct class_device_attribute *host_attrs[SAS_HOST_ATTRS + 1];
|
||||
struct class_device_attribute *phy_attrs[SAS_PORT_ATTRS + 1];
|
||||
struct class_device_attribute *rphy_attrs[SAS_RPORT_ATTRS + 1];
|
||||
};
|
||||
#define to_sas_internal(tmpl) container_of(tmpl, struct sas_internal, t)
|
||||
|
||||
struct sas_host_attrs {
|
||||
struct list_head rphy_list;
|
||||
spinlock_t lock;
|
||||
u32 next_target_id;
|
||||
};
|
||||
#define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data)
|
||||
|
||||
|
||||
/*
|
||||
* Hack to allow attributes of the same name in different objects.
|
||||
*/
|
||||
#define SAS_CLASS_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \
|
||||
struct class_device_attribute class_device_attr_##_prefix##_##_name = \
|
||||
__ATTR(_name,_mode,_show,_store)
|
||||
|
||||
|
||||
/*
|
||||
* Pretty printing helpers
|
||||
*/
|
||||
|
||||
#define sas_bitfield_name_match(title, table) \
|
||||
static ssize_t \
|
||||
get_sas_##title##_names(u32 table_key, char *buf) \
|
||||
{ \
|
||||
char *prefix = ""; \
|
||||
ssize_t len = 0; \
|
||||
int i; \
|
||||
\
|
||||
for (i = 0; i < sizeof(table)/sizeof(table[0]); i++) { \
|
||||
if (table[i].value & table_key) { \
|
||||
len += sprintf(buf + len, "%s%s", \
|
||||
prefix, table[i].name); \
|
||||
prefix = ", "; \
|
||||
} \
|
||||
} \
|
||||
len += sprintf(buf + len, "\n"); \
|
||||
return len; \
|
||||
}
|
||||
|
||||
#define sas_bitfield_name_search(title, table) \
|
||||
static ssize_t \
|
||||
get_sas_##title##_names(u32 table_key, char *buf) \
|
||||
{ \
|
||||
ssize_t len = 0; \
|
||||
int i; \
|
||||
\
|
||||
for (i = 0; i < sizeof(table)/sizeof(table[0]); i++) { \
|
||||
if (table[i].value == table_key) { \
|
||||
len += sprintf(buf + len, "%s", \
|
||||
table[i].name); \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
len += sprintf(buf + len, "\n"); \
|
||||
return len; \
|
||||
}
|
||||
|
||||
static struct {
|
||||
u32 value;
|
||||
char *name;
|
||||
} sas_device_type_names[] = {
|
||||
{ SAS_PHY_UNUSED, "unused" },
|
||||
{ SAS_END_DEVICE, "end device" },
|
||||
{ SAS_EDGE_EXPANDER_DEVICE, "edge expander" },
|
||||
{ SAS_FANOUT_EXPANDER_DEVICE, "fanout expander" },
|
||||
};
|
||||
sas_bitfield_name_search(device_type, sas_device_type_names)
|
||||
|
||||
|
||||
static struct {
|
||||
u32 value;
|
||||
char *name;
|
||||
} sas_protocol_names[] = {
|
||||
{ SAS_PROTOCOL_SATA, "sata" },
|
||||
{ SAS_PROTOCOL_SMP, "smp" },
|
||||
{ SAS_PROTOCOL_STP, "stp" },
|
||||
{ SAS_PROTOCOL_SSP, "ssp" },
|
||||
};
|
||||
sas_bitfield_name_match(protocol, sas_protocol_names)
|
||||
|
||||
static struct {
|
||||
u32 value;
|
||||
char *name;
|
||||
} sas_linkspeed_names[] = {
|
||||
{ SAS_LINK_RATE_UNKNOWN, "Unknown" },
|
||||
{ SAS_PHY_DISABLED, "Phy disabled" },
|
||||
{ SAS_LINK_RATE_FAILED, "Link Rate failed" },
|
||||
{ SAS_SATA_SPINUP_HOLD, "Spin-up hold" },
|
||||
{ SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" },
|
||||
{ SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" },
|
||||
};
|
||||
sas_bitfield_name_search(linkspeed, sas_linkspeed_names)
|
||||
|
||||
|
||||
/*
|
||||
* SAS host attributes
|
||||
*/
|
||||
|
||||
static int sas_host_setup(struct transport_container *tc, struct device *dev,
|
||||
struct class_device *cdev)
|
||||
{
|
||||
struct Scsi_Host *shost = dev_to_shost(dev);
|
||||
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
|
||||
|
||||
INIT_LIST_HEAD(&sas_host->rphy_list);
|
||||
spin_lock_init(&sas_host->lock);
|
||||
sas_host->next_target_id = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static DECLARE_TRANSPORT_CLASS(sas_host_class,
|
||||
"sas_host", sas_host_setup, NULL, NULL);
|
||||
|
||||
static int sas_host_match(struct attribute_container *cont,
|
||||
struct device *dev)
|
||||
{
|
||||
struct Scsi_Host *shost;
|
||||
struct sas_internal *i;
|
||||
|
||||
if (!scsi_is_host_device(dev))
|
||||
return 0;
|
||||
shost = dev_to_shost(dev);
|
||||
|
||||
if (!shost->transportt)
|
||||
return 0;
|
||||
if (shost->transportt->host_attrs.ac.class !=
|
||||
&sas_host_class.class)
|
||||
return 0;
|
||||
|
||||
i = to_sas_internal(shost->transportt);
|
||||
return &i->t.host_attrs.ac == cont;
|
||||
}
|
||||
|
||||
static int do_sas_phy_delete(struct device *dev, void *data)
|
||||
{
|
||||
if (scsi_is_sas_phy(dev))
|
||||
sas_phy_delete(dev_to_phy(dev));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* sas_remove_host -- tear down a Scsi_Host's SAS data structures
|
||||
* @shost: Scsi Host that is torn down
|
||||
*
|
||||
* Removes all SAS PHYs and remote PHYs for a given Scsi_Host.
|
||||
* Must be called just before scsi_remove_host for SAS HBAs.
|
||||
*/
|
||||
void sas_remove_host(struct Scsi_Host *shost)
|
||||
{
|
||||
device_for_each_child(&shost->shost_gendev, NULL, do_sas_phy_delete);
|
||||
}
|
||||
EXPORT_SYMBOL(sas_remove_host);
|
||||
|
||||
|
||||
/*
|
||||
* SAS Port attributes
|
||||
*/
|
||||
|
||||
#define sas_phy_show_simple(field, name, format_string, cast) \
|
||||
static ssize_t \
|
||||
show_sas_phy_##name(struct class_device *cdev, char *buf) \
|
||||
{ \
|
||||
struct sas_phy *phy = transport_class_to_phy(cdev); \
|
||||
\
|
||||
return snprintf(buf, 20, format_string, cast phy->field); \
|
||||
}
|
||||
|
||||
#define sas_phy_simple_attr(field, name, format_string, type) \
|
||||
sas_phy_show_simple(field, name, format_string, (type)) \
|
||||
static CLASS_DEVICE_ATTR(name, S_IRUGO, show_sas_phy_##name, NULL)
|
||||
|
||||
#define sas_phy_show_protocol(field, name) \
|
||||
static ssize_t \
|
||||
show_sas_phy_##name(struct class_device *cdev, char *buf) \
|
||||
{ \
|
||||
struct sas_phy *phy = transport_class_to_phy(cdev); \
|
||||
\
|
||||
if (!phy->field) \
|
||||
return snprintf(buf, 20, "none\n"); \
|
||||
return get_sas_protocol_names(phy->field, buf); \
|
||||
}
|
||||
|
||||
#define sas_phy_protocol_attr(field, name) \
|
||||
sas_phy_show_protocol(field, name) \
|
||||
static CLASS_DEVICE_ATTR(name, S_IRUGO, show_sas_phy_##name, NULL)
|
||||
|
||||
#define sas_phy_show_linkspeed(field) \
|
||||
static ssize_t \
|
||||
show_sas_phy_##field(struct class_device *cdev, char *buf) \
|
||||
{ \
|
||||
struct sas_phy *phy = transport_class_to_phy(cdev); \
|
||||
\
|
||||
return get_sas_linkspeed_names(phy->field, buf); \
|
||||
}
|
||||
|
||||
#define sas_phy_linkspeed_attr(field) \
|
||||
sas_phy_show_linkspeed(field) \
|
||||
static CLASS_DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, NULL)
|
||||
|
||||
static ssize_t
|
||||
show_sas_device_type(struct class_device *cdev, char *buf)
|
||||
{
|
||||
struct sas_phy *phy = transport_class_to_phy(cdev);
|
||||
|
||||
if (!phy->identify.device_type)
|
||||
return snprintf(buf, 20, "none\n");
|
||||
return get_sas_device_type_names(phy->identify.device_type, buf);
|
||||
}
|
||||
|
||||
static CLASS_DEVICE_ATTR(device_type, S_IRUGO, show_sas_device_type, NULL);
|
||||
|
||||
sas_phy_protocol_attr(identify.initiator_port_protocols,
|
||||
initiator_port_protocols);
|
||||
sas_phy_protocol_attr(identify.target_port_protocols,
|
||||
target_port_protocols);
|
||||
sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
|
||||
unsigned long long);
|
||||
sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
|
||||
sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", u8);
|
||||
sas_phy_linkspeed_attr(negotiated_linkrate);
|
||||
sas_phy_linkspeed_attr(minimum_linkrate_hw);
|
||||
sas_phy_linkspeed_attr(minimum_linkrate);
|
||||
sas_phy_linkspeed_attr(maximum_linkrate_hw);
|
||||
sas_phy_linkspeed_attr(maximum_linkrate);
|
||||
|
||||
|
||||
static DECLARE_TRANSPORT_CLASS(sas_phy_class,
|
||||
"sas_phy", NULL, NULL, NULL);
|
||||
|
||||
static int sas_phy_match(struct attribute_container *cont, struct device *dev)
|
||||
{
|
||||
struct Scsi_Host *shost;
|
||||
struct sas_internal *i;
|
||||
|
||||
if (!scsi_is_sas_phy(dev))
|
||||
return 0;
|
||||
shost = dev_to_shost(dev->parent);
|
||||
|
||||
if (!shost->transportt)
|
||||
return 0;
|
||||
if (shost->transportt->host_attrs.ac.class !=
|
||||
&sas_host_class.class)
|
||||
return 0;
|
||||
|
||||
i = to_sas_internal(shost->transportt);
|
||||
return &i->phy_attr_cont.ac == cont;
|
||||
}
|
||||
|
||||
static void sas_phy_release(struct device *dev)
|
||||
{
|
||||
struct sas_phy *phy = dev_to_phy(dev);
|
||||
|
||||
put_device(dev->parent);
|
||||
kfree(phy);
|
||||
}
|
||||
|
||||
/**
|
||||
* sas_phy_alloc -- allocates and initialize a SAS PHY structure
|
||||
* @parent: Parent device
|
||||
* @number: Port number
|
||||
*
|
||||
* Allocates an SAS PHY structure. It will be added in the device tree
|
||||
* below the device specified by @parent, which has to be either a Scsi_Host
|
||||
* or sas_rphy.
|
||||
*
|
||||
* Returns:
|
||||
* SAS PHY allocated or %NULL if the allocation failed.
|
||||
*/
|
||||
struct sas_phy *sas_phy_alloc(struct device *parent, int number)
|
||||
{
|
||||
struct Scsi_Host *shost = dev_to_shost(parent);
|
||||
struct sas_phy *phy;
|
||||
|
||||
phy = kmalloc(sizeof(*phy), GFP_KERNEL);
|
||||
if (!phy)
|
||||
return NULL;
|
||||
memset(phy, 0, sizeof(*phy));
|
||||
|
||||
get_device(parent);
|
||||
|
||||
phy->number = number;
|
||||
|
||||
device_initialize(&phy->dev);
|
||||
phy->dev.parent = get_device(parent);
|
||||
phy->dev.release = sas_phy_release;
|
||||
sprintf(phy->dev.bus_id, "phy-%d:%d", shost->host_no, number);
|
||||
|
||||
transport_setup_device(&phy->dev);
|
||||
|
||||
return phy;
|
||||
}
|
||||
EXPORT_SYMBOL(sas_phy_alloc);
|
||||
|
||||
/**
|
||||
* sas_phy_add -- add a SAS PHY to the device hierachy
|
||||
* @phy: The PHY to be added
|
||||
*
|
||||
* Publishes a SAS PHY to the rest of the system.
|
||||
*/
|
||||
int sas_phy_add(struct sas_phy *phy)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = device_add(&phy->dev);
|
||||
if (!error) {
|
||||
transport_add_device(&phy->dev);
|
||||
transport_configure_device(&phy->dev);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL(sas_phy_add);
|
||||
|
||||
/**
|
||||
* sas_phy_free -- free a SAS PHY
|
||||
* @phy: SAS PHY to free
|
||||
*
|
||||
* Frees the specified SAS PHY.
|
||||
*
|
||||
* Note:
|
||||
* This function must only be called on a PHY that has not
|
||||
* sucessfully been added using sas_phy_add().
|
||||
*/
|
||||
void sas_phy_free(struct sas_phy *phy)
|
||||
{
|
||||
transport_destroy_device(&phy->dev);
|
||||
put_device(phy->dev.parent);
|
||||
put_device(phy->dev.parent);
|
||||
put_device(phy->dev.parent);
|
||||
kfree(phy);
|
||||
}
|
||||
EXPORT_SYMBOL(sas_phy_free);
|
||||
|
||||
/**
|
||||
* sas_phy_delete -- remove SAS PHY
|
||||
* @phy: SAS PHY to remove
|
||||
*
|
||||
* Removes the specified SAS PHY. If the SAS PHY has an
|
||||
* associated remote PHY it is removed before.
|
||||
*/
|
||||
void
|
||||
sas_phy_delete(struct sas_phy *phy)
|
||||
{
|
||||
struct device *dev = &phy->dev;
|
||||
|
||||
if (phy->rphy)
|
||||
sas_rphy_delete(phy->rphy);
|
||||
|
||||
transport_remove_device(dev);
|
||||
device_del(dev);
|
||||
transport_destroy_device(dev);
|
||||
put_device(dev->parent);
|
||||
}
|
||||
EXPORT_SYMBOL(sas_phy_delete);
|
||||
|
||||
/**
|
||||
* scsi_is_sas_phy -- check if a struct device represents a SAS PHY
|
||||
* @dev: device to check
|
||||
*
|
||||
* Returns:
|
||||
* %1 if the device represents a SAS PHY, %0 else
|
||||
*/
|
||||
int scsi_is_sas_phy(const struct device *dev)
|
||||
{
|
||||
return dev->release == sas_phy_release;
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_is_sas_phy);
|
||||
|
||||
/*
|
||||
* SAS remote PHY attributes.
|
||||
*/
|
||||
|
||||
#define sas_rphy_show_simple(field, name, format_string, cast) \
|
||||
static ssize_t \
|
||||
show_sas_rphy_##name(struct class_device *cdev, char *buf) \
|
||||
{ \
|
||||
struct sas_rphy *rphy = transport_class_to_rphy(cdev); \
|
||||
\
|
||||
return snprintf(buf, 20, format_string, cast rphy->field); \
|
||||
}
|
||||
|
||||
#define sas_rphy_simple_attr(field, name, format_string, type) \
|
||||
sas_rphy_show_simple(field, name, format_string, (type)) \
|
||||
static SAS_CLASS_DEVICE_ATTR(rphy, name, S_IRUGO, \
|
||||
show_sas_rphy_##name, NULL)
|
||||
|
||||
#define sas_rphy_show_protocol(field, name) \
|
||||
static ssize_t \
|
||||
show_sas_rphy_##name(struct class_device *cdev, char *buf) \
|
||||
{ \
|
||||
struct sas_rphy *rphy = transport_class_to_rphy(cdev); \
|
||||
\
|
||||
if (!rphy->field) \
|
||||
return snprintf(buf, 20, "none\n"); \
|
||||
return get_sas_protocol_names(rphy->field, buf); \
|
||||
}
|
||||
|
||||
#define sas_rphy_protocol_attr(field, name) \
|
||||
sas_rphy_show_protocol(field, name) \
|
||||
static SAS_CLASS_DEVICE_ATTR(rphy, name, S_IRUGO, \
|
||||
show_sas_rphy_##name, NULL)
|
||||
|
||||
static ssize_t
|
||||
show_sas_rphy_device_type(struct class_device *cdev, char *buf)
|
||||
{
|
||||
struct sas_rphy *rphy = transport_class_to_rphy(cdev);
|
||||
|
||||
if (!rphy->identify.device_type)
|
||||
return snprintf(buf, 20, "none\n");
|
||||
return get_sas_device_type_names(
|
||||
rphy->identify.device_type, buf);
|
||||
}
|
||||
|
||||
static SAS_CLASS_DEVICE_ATTR(rphy, device_type, S_IRUGO,
|
||||
show_sas_rphy_device_type, NULL);
|
||||
|
||||
sas_rphy_protocol_attr(identify.initiator_port_protocols,
|
||||
initiator_port_protocols);
|
||||
sas_rphy_protocol_attr(identify.target_port_protocols, target_port_protocols);
|
||||
sas_rphy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
|
||||
unsigned long long);
|
||||
sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
|
||||
|
||||
static DECLARE_TRANSPORT_CLASS(sas_rphy_class,
|
||||
"sas_rphy", NULL, NULL, NULL);
|
||||
|
||||
static int sas_rphy_match(struct attribute_container *cont, struct device *dev)
|
||||
{
|
||||
struct Scsi_Host *shost;
|
||||
struct sas_internal *i;
|
||||
|
||||
if (!scsi_is_sas_rphy(dev))
|
||||
return 0;
|
||||
shost = dev_to_shost(dev->parent->parent);
|
||||
|
||||
if (!shost->transportt)
|
||||
return 0;
|
||||
if (shost->transportt->host_attrs.ac.class !=
|
||||
&sas_host_class.class)
|
||||
return 0;
|
||||
|
||||
i = to_sas_internal(shost->transportt);
|
||||
return &i->rphy_attr_cont.ac == cont;
|
||||
}
|
||||
|
||||
static void sas_rphy_release(struct device *dev)
|
||||
{
|
||||
struct sas_rphy *rphy = dev_to_rphy(dev);
|
||||
|
||||
put_device(dev->parent);
|
||||
kfree(rphy);
|
||||
}
|
||||
|
||||
/**
|
||||
* sas_rphy_alloc -- allocates and initialize a SAS remote PHY structure
|
||||
* @parent: SAS PHY this remote PHY is conneted to
|
||||
*
|
||||
* Allocates an SAS remote PHY structure, connected to @parent.
|
||||
*
|
||||
* Returns:
|
||||
* SAS PHY allocated or %NULL if the allocation failed.
|
||||
*/
|
||||
struct sas_rphy *sas_rphy_alloc(struct sas_phy *parent)
|
||||
{
|
||||
struct Scsi_Host *shost = dev_to_shost(&parent->dev);
|
||||
struct sas_rphy *rphy;
|
||||
|
||||
rphy = kmalloc(sizeof(*rphy), GFP_KERNEL);
|
||||
if (!rphy) {
|
||||
put_device(&parent->dev);
|
||||
return NULL;
|
||||
}
|
||||
memset(rphy, 0, sizeof(*rphy));
|
||||
|
||||
device_initialize(&rphy->dev);
|
||||
rphy->dev.parent = get_device(&parent->dev);
|
||||
rphy->dev.release = sas_rphy_release;
|
||||
sprintf(rphy->dev.bus_id, "rphy-%d:%d",
|
||||
shost->host_no, parent->number);
|
||||
transport_setup_device(&rphy->dev);
|
||||
|
||||
return rphy;
|
||||
}
|
||||
EXPORT_SYMBOL(sas_rphy_alloc);
|
||||
|
||||
/**
|
||||
* sas_rphy_add -- add a SAS remote PHY to the device hierachy
|
||||
* @rphy: The remote PHY to be added
|
||||
*
|
||||
* Publishes a SAS remote PHY to the rest of the system.
|
||||
*/
|
||||
int sas_rphy_add(struct sas_rphy *rphy)
|
||||
{
|
||||
struct sas_phy *parent = dev_to_phy(rphy->dev.parent);
|
||||
struct Scsi_Host *shost = dev_to_shost(parent->dev.parent);
|
||||
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
|
||||
struct sas_identify *identify = &rphy->identify;
|
||||
int error;
|
||||
|
||||
if (parent->rphy)
|
||||
return -ENXIO;
|
||||
parent->rphy = rphy;
|
||||
|
||||
error = device_add(&rphy->dev);
|
||||
if (error)
|
||||
return error;
|
||||
transport_add_device(&rphy->dev);
|
||||
transport_configure_device(&rphy->dev);
|
||||
|
||||
spin_lock(&sas_host->lock);
|
||||
list_add_tail(&rphy->list, &sas_host->rphy_list);
|
||||
if (identify->device_type == SAS_END_DEVICE &&
|
||||
(identify->target_port_protocols &
|
||||
(SAS_PROTOCOL_SSP|SAS_PROTOCOL_STP|SAS_PROTOCOL_SATA)))
|
||||
rphy->scsi_target_id = sas_host->next_target_id++;
|
||||
else
|
||||
rphy->scsi_target_id = -1;
|
||||
spin_unlock(&sas_host->lock);
|
||||
|
||||
if (rphy->scsi_target_id != -1) {
|
||||
scsi_scan_target(&rphy->dev, parent->number,
|
||||
rphy->scsi_target_id, ~0, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(sas_rphy_add);
|
||||
|
||||
/**
|
||||
* sas_rphy_free -- free a SAS remote PHY
|
||||
* @rphy SAS remote PHY to free
|
||||
*
|
||||
* Frees the specified SAS remote PHY.
|
||||
*
|
||||
* Note:
|
||||
* This function must only be called on a remote
|
||||
* PHY that has not sucessfully been added using
|
||||
* sas_rphy_add().
|
||||
*/
|
||||
void sas_rphy_free(struct sas_rphy *rphy)
|
||||
{
|
||||
struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
|
||||
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
|
||||
|
||||
spin_lock(&sas_host->lock);
|
||||
list_del(&rphy->list);
|
||||
spin_unlock(&sas_host->lock);
|
||||
|
||||
transport_destroy_device(&rphy->dev);
|
||||
put_device(rphy->dev.parent);
|
||||
put_device(rphy->dev.parent);
|
||||
put_device(rphy->dev.parent);
|
||||
kfree(rphy);
|
||||
}
|
||||
EXPORT_SYMBOL(sas_rphy_free);
|
||||
|
||||
/**
|
||||
* sas_rphy_delete -- remove SAS remote PHY
|
||||
* @rphy: SAS remote PHY to remove
|
||||
*
|
||||
* Removes the specified SAS remote PHY.
|
||||
*/
|
||||
void
|
||||
sas_rphy_delete(struct sas_rphy *rphy)
|
||||
{
|
||||
struct device *dev = &rphy->dev;
|
||||
struct sas_phy *parent = dev_to_phy(dev->parent);
|
||||
struct Scsi_Host *shost = dev_to_shost(parent->dev.parent);
|
||||
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
|
||||
|
||||
transport_destroy_device(&rphy->dev);
|
||||
|
||||
scsi_remove_target(&rphy->dev);
|
||||
|
||||
spin_lock(&sas_host->lock);
|
||||
list_del(&rphy->list);
|
||||
spin_unlock(&sas_host->lock);
|
||||
|
||||
transport_remove_device(dev);
|
||||
device_del(dev);
|
||||
transport_destroy_device(dev);
|
||||
put_device(&parent->dev);
|
||||
}
|
||||
EXPORT_SYMBOL(sas_rphy_delete);
|
||||
|
||||
/**
|
||||
* scsi_is_sas_rphy -- check if a struct device represents a SAS remote PHY
|
||||
* @dev: device to check
|
||||
*
|
||||
* Returns:
|
||||
* %1 if the device represents a SAS remote PHY, %0 else
|
||||
*/
|
||||
int scsi_is_sas_rphy(const struct device *dev)
|
||||
{
|
||||
return dev->release == sas_rphy_release;
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_is_sas_rphy);
|
||||
|
||||
|
||||
/*
|
||||
* SCSI scan helper
|
||||
*/
|
||||
|
||||
static struct device *sas_target_parent(struct Scsi_Host *shost,
|
||||
int channel, uint id)
|
||||
{
|
||||
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
|
||||
struct sas_rphy *rphy;
|
||||
struct device *dev = NULL;
|
||||
|
||||
spin_lock(&sas_host->lock);
|
||||
list_for_each_entry(rphy, &sas_host->rphy_list, list) {
|
||||
struct sas_phy *parent = dev_to_phy(rphy->dev.parent);
|
||||
if (parent->number == channel &&
|
||||
rphy->scsi_target_id == id)
|
||||
dev = &rphy->dev;
|
||||
}
|
||||
spin_unlock(&sas_host->lock);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Setup / Teardown code
|
||||
*/
|
||||
|
||||
#define SETUP_RPORT_ATTRIBUTE(field) \
|
||||
i->private_rphy_attrs[count] = class_device_attr_##field; \
|
||||
i->private_rphy_attrs[count].attr.mode = S_IRUGO; \
|
||||
i->private_rphy_attrs[count].store = NULL; \
|
||||
i->rphy_attrs[count] = &i->private_rphy_attrs[count]; \
|
||||
count++
|
||||
|
||||
#define SETUP_PORT_ATTRIBUTE(field) \
|
||||
i->private_phy_attrs[count] = class_device_attr_##field; \
|
||||
i->private_phy_attrs[count].attr.mode = S_IRUGO; \
|
||||
i->private_phy_attrs[count].store = NULL; \
|
||||
i->phy_attrs[count] = &i->private_phy_attrs[count]; \
|
||||
count++
|
||||
|
||||
|
||||
/**
|
||||
* sas_attach_transport -- instantiate SAS transport template
|
||||
* @ft: SAS transport class function template
|
||||
*/
|
||||
struct scsi_transport_template *
|
||||
sas_attach_transport(struct sas_function_template *ft)
|
||||
{
|
||||
struct sas_internal *i;
|
||||
int count;
|
||||
|
||||
i = kmalloc(sizeof(struct sas_internal), GFP_KERNEL);
|
||||
if (!i)
|
||||
return NULL;
|
||||
memset(i, 0, sizeof(struct sas_internal));
|
||||
|
||||
i->t.target_parent = sas_target_parent;
|
||||
|
||||
i->t.host_attrs.ac.attrs = &i->host_attrs[0];
|
||||
i->t.host_attrs.ac.class = &sas_host_class.class;
|
||||
i->t.host_attrs.ac.match = sas_host_match;
|
||||
transport_container_register(&i->t.host_attrs);
|
||||
i->t.host_size = sizeof(struct sas_host_attrs);
|
||||
|
||||
i->phy_attr_cont.ac.class = &sas_phy_class.class;
|
||||
i->phy_attr_cont.ac.attrs = &i->phy_attrs[0];
|
||||
i->phy_attr_cont.ac.match = sas_phy_match;
|
||||
transport_container_register(&i->phy_attr_cont);
|
||||
|
||||
i->rphy_attr_cont.ac.class = &sas_rphy_class.class;
|
||||
i->rphy_attr_cont.ac.attrs = &i->rphy_attrs[0];
|
||||
i->rphy_attr_cont.ac.match = sas_rphy_match;
|
||||
transport_container_register(&i->rphy_attr_cont);
|
||||
|
||||
i->f = ft;
|
||||
|
||||
count = 0;
|
||||
i->host_attrs[count] = NULL;
|
||||
|
||||
count = 0;
|
||||
SETUP_PORT_ATTRIBUTE(initiator_port_protocols);
|
||||
SETUP_PORT_ATTRIBUTE(target_port_protocols);
|
||||
SETUP_PORT_ATTRIBUTE(device_type);
|
||||
SETUP_PORT_ATTRIBUTE(sas_address);
|
||||
SETUP_PORT_ATTRIBUTE(phy_identifier);
|
||||
SETUP_PORT_ATTRIBUTE(port_identifier);
|
||||
SETUP_PORT_ATTRIBUTE(negotiated_linkrate);
|
||||
SETUP_PORT_ATTRIBUTE(minimum_linkrate_hw);
|
||||
SETUP_PORT_ATTRIBUTE(minimum_linkrate);
|
||||
SETUP_PORT_ATTRIBUTE(maximum_linkrate_hw);
|
||||
SETUP_PORT_ATTRIBUTE(maximum_linkrate);
|
||||
i->phy_attrs[count] = NULL;
|
||||
|
||||
count = 0;
|
||||
SETUP_RPORT_ATTRIBUTE(rphy_initiator_port_protocols);
|
||||
SETUP_RPORT_ATTRIBUTE(rphy_target_port_protocols);
|
||||
SETUP_RPORT_ATTRIBUTE(rphy_device_type);
|
||||
SETUP_RPORT_ATTRIBUTE(rphy_sas_address);
|
||||
SETUP_RPORT_ATTRIBUTE(rphy_phy_identifier);
|
||||
i->rphy_attrs[count] = NULL;
|
||||
|
||||
return &i->t;
|
||||
}
|
||||
EXPORT_SYMBOL(sas_attach_transport);
|
||||
|
||||
/**
|
||||
* sas_release_transport -- release SAS transport template instance
|
||||
* @t: transport template instance
|
||||
*/
|
||||
void sas_release_transport(struct scsi_transport_template *t)
|
||||
{
|
||||
struct sas_internal *i = to_sas_internal(t);
|
||||
|
||||
transport_container_unregister(&i->t.host_attrs);
|
||||
transport_container_unregister(&i->phy_attr_cont);
|
||||
transport_container_unregister(&i->rphy_attr_cont);
|
||||
|
||||
kfree(i);
|
||||
}
|
||||
EXPORT_SYMBOL(sas_release_transport);
|
||||
|
||||
static __init int sas_transport_init(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = transport_class_register(&sas_host_class);
|
||||
if (error)
|
||||
goto out;
|
||||
error = transport_class_register(&sas_phy_class);
|
||||
if (error)
|
||||
goto out_unregister_transport;
|
||||
error = transport_class_register(&sas_rphy_class);
|
||||
if (error)
|
||||
goto out_unregister_phy;
|
||||
|
||||
return 0;
|
||||
|
||||
out_unregister_phy:
|
||||
transport_class_unregister(&sas_phy_class);
|
||||
out_unregister_transport:
|
||||
transport_class_unregister(&sas_host_class);
|
||||
out:
|
||||
return error;
|
||||
|
||||
}
|
||||
|
||||
static void __exit sas_transport_exit(void)
|
||||
{
|
||||
transport_class_unregister(&sas_host_class);
|
||||
transport_class_unregister(&sas_phy_class);
|
||||
transport_class_unregister(&sas_rphy_class);
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Christoph Hellwig");
|
||||
MODULE_DESCRIPTION("SAS Transphy Attributes");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
module_init(sas_transport_init);
|
||||
module_exit(sas_transport_exit);
|
|
@ -61,7 +61,7 @@ static int sg_version_num = 30533; /* 2 digits for each component */
|
|||
|
||||
#ifdef CONFIG_SCSI_PROC_FS
|
||||
#include <linux/proc_fs.h>
|
||||
static char *sg_version_date = "20050901";
|
||||
static char *sg_version_date = "20050908";
|
||||
|
||||
static int sg_proc_init(void);
|
||||
static void sg_proc_cleanup(void);
|
||||
|
@ -1299,7 +1299,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */
|
||||
sfp->mmap_called = 1;
|
||||
}
|
||||
vma->vm_flags |= (VM_RESERVED | VM_IO);
|
||||
vma->vm_flags |= VM_RESERVED;
|
||||
vma->vm_private_data = sfp;
|
||||
vma->vm_ops = &sg_mmap_vm_ops;
|
||||
return 0;
|
||||
|
|
|
@ -178,8 +178,8 @@ static inline struct scsi_target *scsi_target(struct scsi_device *sdev)
|
|||
|
||||
extern struct scsi_device *__scsi_add_device(struct Scsi_Host *,
|
||||
uint, uint, uint, void *hostdata);
|
||||
#define scsi_add_device(host, channel, target, lun) \
|
||||
__scsi_add_device(host, channel, target, lun, NULL)
|
||||
extern int scsi_add_device(struct Scsi_Host *host, uint channel,
|
||||
uint target, uint lun);
|
||||
extern void scsi_remove_device(struct scsi_device *);
|
||||
extern int scsi_device_cancel(struct scsi_device *, int);
|
||||
|
||||
|
|
|
@ -439,4 +439,12 @@ int fc_remote_port_block(struct fc_rport *rport);
|
|||
void fc_remote_port_unblock(struct fc_rport *rport);
|
||||
int scsi_is_fc_rport(const struct device *);
|
||||
|
||||
static inline u64 wwn_to_u64(u8 *wwn)
|
||||
{
|
||||
return (u64)wwn[0] << 56 | (u64)wwn[1] << 48 |
|
||||
(u64)wwn[2] << 40 | (u64)wwn[3] << 32 |
|
||||
(u64)wwn[4] << 24 | (u64)wwn[5] << 16 |
|
||||
(u64)wwn[6] << 8 | (u64)wwn[7];
|
||||
}
|
||||
|
||||
#endif /* SCSI_TRANSPORT_FC_H */
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
#ifndef SCSI_TRANSPORT_SAS_H
|
||||
#define SCSI_TRANSPORT_SAS_H
|
||||
|
||||
#include <linux/transport_class.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct scsi_transport_template;
|
||||
struct sas_rphy;
|
||||
|
||||
|
||||
enum sas_device_type {
|
||||
SAS_PHY_UNUSED,
|
||||
SAS_END_DEVICE,
|
||||
SAS_EDGE_EXPANDER_DEVICE,
|
||||
SAS_FANOUT_EXPANDER_DEVICE,
|
||||
};
|
||||
|
||||
enum sas_protocol {
|
||||
SAS_PROTOCOL_SATA = 0x01,
|
||||
SAS_PROTOCOL_SMP = 0x02,
|
||||
SAS_PROTOCOL_STP = 0x04,
|
||||
SAS_PROTOCOL_SSP = 0x08,
|
||||
};
|
||||
|
||||
enum sas_linkrate {
|
||||
SAS_LINK_RATE_UNKNOWN,
|
||||
SAS_PHY_DISABLED,
|
||||
SAS_LINK_RATE_FAILED,
|
||||
SAS_SATA_SPINUP_HOLD,
|
||||
SAS_SATA_PORT_SELECTOR,
|
||||
SAS_LINK_RATE_1_5_GBPS,
|
||||
SAS_LINK_RATE_3_0_GBPS,
|
||||
SAS_LINK_VIRTUAL,
|
||||
};
|
||||
|
||||
struct sas_identify {
|
||||
enum sas_device_type device_type;
|
||||
enum sas_protocol initiator_port_protocols;
|
||||
enum sas_protocol target_port_protocols;
|
||||
u64 sas_address;
|
||||
u8 phy_identifier;
|
||||
};
|
||||
|
||||
/* The functions by which the transport class and the driver communicate */
|
||||
struct sas_function_template {
|
||||
};
|
||||
|
||||
struct sas_phy {
|
||||
struct device dev;
|
||||
int number;
|
||||
struct sas_identify identify;
|
||||
enum sas_linkrate negotiated_linkrate;
|
||||
enum sas_linkrate minimum_linkrate_hw;
|
||||
enum sas_linkrate minimum_linkrate;
|
||||
enum sas_linkrate maximum_linkrate_hw;
|
||||
enum sas_linkrate maximum_linkrate;
|
||||
u8 port_identifier;
|
||||
struct sas_rphy *rphy;
|
||||
};
|
||||
|
||||
#define dev_to_phy(d) \
|
||||
container_of((d), struct sas_phy, dev)
|
||||
#define transport_class_to_phy(cdev) \
|
||||
dev_to_phy((cdev)->dev)
|
||||
#define phy_to_shost(phy) \
|
||||
dev_to_shost((phy)->dev.parent)
|
||||
|
||||
struct sas_rphy {
|
||||
struct device dev;
|
||||
struct sas_identify identify;
|
||||
struct list_head list;
|
||||
u32 scsi_target_id;
|
||||
};
|
||||
|
||||
#define dev_to_rphy(d) \
|
||||
container_of((d), struct sas_rphy, dev)
|
||||
#define transport_class_to_rphy(cdev) \
|
||||
dev_to_rphy((cdev)->dev)
|
||||
#define rphy_to_shost(rphy) \
|
||||
dev_to_shost((rphy)->dev.parent)
|
||||
|
||||
extern void sas_remove_host(struct Scsi_Host *);
|
||||
|
||||
extern struct sas_phy *sas_phy_alloc(struct device *, int);
|
||||
extern void sas_phy_free(struct sas_phy *);
|
||||
extern int sas_phy_add(struct sas_phy *);
|
||||
extern void sas_phy_delete(struct sas_phy *);
|
||||
extern int scsi_is_sas_phy(const struct device *);
|
||||
|
||||
extern struct sas_rphy *sas_rphy_alloc(struct sas_phy *);
|
||||
void sas_rphy_free(struct sas_rphy *);
|
||||
extern int sas_rphy_add(struct sas_rphy *);
|
||||
extern void sas_rphy_delete(struct sas_rphy *);
|
||||
extern int scsi_is_sas_rphy(const struct device *);
|
||||
|
||||
extern struct scsi_transport_template *
|
||||
sas_attach_transport(struct sas_function_template *);
|
||||
extern void sas_release_transport(struct scsi_transport_template *);
|
||||
|
||||
#endif /* SCSI_TRANSPORT_SAS_H */
|
Loading…
Reference in New Issue