2011-07-03 13:56:22 +08:00
|
|
|
/*
|
|
|
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
|
|
|
* redistributing this file, you may do so under either license.
|
|
|
|
*
|
|
|
|
* GPL LICENSE SUMMARY
|
|
|
|
*
|
|
|
|
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of version 2 of the GNU General Public License as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
* The full GNU General Public License is included in this distribution
|
|
|
|
* in the file called LICENSE.GPL.
|
|
|
|
*
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
|
|
|
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2011-05-09 08:34:44 +08:00
|
|
|
#include "host.h"
|
2011-06-02 08:10:50 +08:00
|
|
|
#include "isci.h"
|
2011-04-23 10:18:03 +08:00
|
|
|
#include "remote_device.h"
|
|
|
|
#include "remote_node_context.h"
|
2011-07-03 13:56:22 +08:00
|
|
|
#include "scu_event_codes.h"
|
|
|
|
#include "scu_task_context.h"
|
|
|
|
|
2012-02-10 17:18:44 +08:00
|
|
|
#undef C
|
|
|
|
#define C(a) (#a)
|
|
|
|
const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
|
|
|
|
{
|
|
|
|
static const char * const strings[] = RNC_STATES;
|
2011-07-03 13:56:22 +08:00
|
|
|
|
2012-02-10 17:18:44 +08:00
|
|
|
return strings[state];
|
|
|
|
}
|
|
|
|
#undef C
|
2011-07-03 13:56:22 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
*
|
2011-04-21 13:34:49 +08:00
|
|
|
* @sci_rnc: The state of the remote node context object to check.
|
2011-07-03 13:56:22 +08:00
|
|
|
*
|
|
|
|
* This method will return true if the remote node context is in a READY state
|
|
|
|
* otherwise it will return false bool true if the remote node context is in
|
|
|
|
* the ready state. false if the remote node context is not in the ready state.
|
|
|
|
*/
|
2011-07-01 10:14:33 +08:00
|
|
|
bool sci_remote_node_context_is_ready(
|
|
|
|
struct sci_remote_node_context *sci_rnc)
|
2011-07-03 13:56:22 +08:00
|
|
|
{
|
2011-06-02 08:10:43 +08:00
|
|
|
u32 current_state = sci_rnc->sm.current_state_id;
|
2011-07-03 13:56:22 +08:00
|
|
|
|
2011-06-02 08:10:43 +08:00
|
|
|
if (current_state == SCI_RNC_READY) {
|
2011-07-03 13:56:22 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
|
|
|
|
{
|
|
|
|
if (id < ihost->remote_node_entries &&
|
|
|
|
ihost->device_table[id])
|
|
|
|
return &ihost->remote_node_context_table[id];
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
|
2011-07-03 13:56:22 +08:00
|
|
|
{
|
2011-07-01 07:31:37 +08:00
|
|
|
struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
|
|
|
|
struct domain_device *dev = idev->domain_dev;
|
2011-04-27 02:44:06 +08:00
|
|
|
int rni = sci_rnc->remote_node_index;
|
2011-04-22 09:44:45 +08:00
|
|
|
union scu_remote_node_context *rnc;
|
2011-07-01 08:38:32 +08:00
|
|
|
struct isci_host *ihost;
|
2011-04-27 00:41:52 +08:00
|
|
|
__le64 sas_addr;
|
2011-07-03 13:56:22 +08:00
|
|
|
|
2011-07-01 17:25:15 +08:00
|
|
|
ihost = idev->owning_port->owning_controller;
|
2011-07-01 10:14:33 +08:00
|
|
|
rnc = sci_rnc_by_id(ihost, rni);
|
2011-07-03 13:56:22 +08:00
|
|
|
|
2011-04-20 09:35:58 +08:00
|
|
|
memset(rnc, 0, sizeof(union scu_remote_node_context)
|
2011-07-01 10:14:33 +08:00
|
|
|
* sci_remote_device_node_count(idev));
|
2011-07-03 13:56:22 +08:00
|
|
|
|
2011-04-27 02:44:06 +08:00
|
|
|
rnc->ssp.remote_node_index = rni;
|
2011-07-01 07:31:37 +08:00
|
|
|
rnc->ssp.remote_node_port_width = idev->device_port_width;
|
|
|
|
rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
|
2011-07-03 13:56:22 +08:00
|
|
|
|
2011-04-27 00:41:52 +08:00
|
|
|
/* sas address is __be64, context ram format is __le64 */
|
|
|
|
sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
|
|
|
|
rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
|
|
|
|
rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
|
2011-07-03 13:56:22 +08:00
|
|
|
|
|
|
|
rnc->ssp.nexus_loss_timer_enable = true;
|
|
|
|
rnc->ssp.check_bit = false;
|
|
|
|
rnc->ssp.is_valid = false;
|
|
|
|
rnc->ssp.is_remote_node_context = true;
|
|
|
|
rnc->ssp.function_number = 0;
|
|
|
|
|
|
|
|
rnc->ssp.arbitration_wait_time = 0;
|
|
|
|
|
2012-02-01 16:23:10 +08:00
|
|
|
if (dev_is_sata(dev)) {
|
2011-07-03 13:56:22 +08:00
|
|
|
rnc->ssp.connection_occupancy_timeout =
|
2011-07-01 10:14:33 +08:00
|
|
|
ihost->user_parameters.stp_max_occupancy_timeout;
|
2011-07-03 13:56:22 +08:00
|
|
|
rnc->ssp.connection_inactivity_timeout =
|
2011-07-01 10:14:33 +08:00
|
|
|
ihost->user_parameters.stp_inactivity_timeout;
|
2011-07-03 13:56:22 +08:00
|
|
|
} else {
|
|
|
|
rnc->ssp.connection_occupancy_timeout =
|
2011-07-01 10:14:33 +08:00
|
|
|
ihost->user_parameters.ssp_max_occupancy_timeout;
|
2011-07-03 13:56:22 +08:00
|
|
|
rnc->ssp.connection_inactivity_timeout =
|
2011-07-01 10:14:33 +08:00
|
|
|
ihost->user_parameters.ssp_inactivity_timeout;
|
2011-07-03 13:56:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
rnc->ssp.initial_arbitration_wait_time = 0;
|
|
|
|
|
|
|
|
/* Open Address Frame Parameters */
|
2011-07-01 07:31:37 +08:00
|
|
|
rnc->ssp.oaf_connection_rate = idev->connection_rate;
|
2011-07-03 13:56:22 +08:00
|
|
|
rnc->ssp.oaf_features = 0;
|
|
|
|
rnc->ssp.oaf_source_zone_group = 0;
|
|
|
|
rnc->ssp.oaf_more_compatibility_features = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
*
|
2011-04-21 13:34:49 +08:00
|
|
|
* @sci_rnc:
|
|
|
|
* @callback:
|
2011-07-03 13:56:22 +08:00
|
|
|
* @callback_parameter:
|
|
|
|
*
|
|
|
|
* This method will setup the remote node context object so it will transition
|
|
|
|
* to its ready state. If the remote node context is already setup to
|
|
|
|
* transition to its final state then this function does nothing. none
|
|
|
|
*/
|
2011-07-01 10:14:33 +08:00
|
|
|
static void sci_remote_node_context_setup_to_resume(
|
|
|
|
struct sci_remote_node_context *sci_rnc,
|
2011-04-21 13:34:49 +08:00
|
|
|
scics_sds_remote_node_context_callback callback,
|
2011-07-03 13:56:22 +08:00
|
|
|
void *callback_parameter)
|
|
|
|
{
|
2011-04-21 13:34:49 +08:00
|
|
|
if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) {
|
|
|
|
sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY;
|
|
|
|
sci_rnc->user_callback = callback;
|
|
|
|
sci_rnc->user_cookie = callback_parameter;
|
2011-07-03 13:56:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
static void sci_remote_node_context_setup_to_destory(
|
|
|
|
struct sci_remote_node_context *sci_rnc,
|
2011-04-21 13:34:49 +08:00
|
|
|
scics_sds_remote_node_context_callback callback,
|
2011-07-03 13:56:22 +08:00
|
|
|
void *callback_parameter)
|
|
|
|
{
|
2011-04-21 13:34:49 +08:00
|
|
|
sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL;
|
|
|
|
sci_rnc->user_callback = callback;
|
|
|
|
sci_rnc->user_cookie = callback_parameter;
|
2011-07-03 13:56:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* This method just calls the user callback function and then resets the
|
|
|
|
* callback.
|
|
|
|
*/
|
2011-07-01 10:14:33 +08:00
|
|
|
static void sci_remote_node_context_notify_user(
|
|
|
|
struct sci_remote_node_context *rnc)
|
2011-07-03 13:56:22 +08:00
|
|
|
{
|
|
|
|
if (rnc->user_callback != NULL) {
|
|
|
|
(*rnc->user_callback)(rnc->user_cookie);
|
|
|
|
|
|
|
|
rnc->user_callback = NULL;
|
|
|
|
rnc->user_cookie = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
|
2011-07-03 13:56:22 +08:00
|
|
|
{
|
2011-05-12 23:50:23 +08:00
|
|
|
if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_remote_node_context_resume(rnc, rnc->user_callback,
|
2011-05-12 23:50:23 +08:00
|
|
|
rnc->user_cookie);
|
2011-07-03 13:56:22 +08:00
|
|
|
}
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
|
2011-07-03 13:56:22 +08:00
|
|
|
{
|
2011-07-01 10:14:33 +08:00
|
|
|
union scu_remote_node_context *rnc_buffer;
|
2011-07-01 07:31:37 +08:00
|
|
|
struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
|
|
|
|
struct domain_device *dev = idev->domain_dev;
|
2011-07-01 10:14:33 +08:00
|
|
|
struct isci_host *ihost = idev->owning_port->owning_controller;
|
2011-07-03 13:56:22 +08:00
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
|
2011-07-03 13:56:22 +08:00
|
|
|
|
|
|
|
rnc_buffer->ssp.is_valid = true;
|
|
|
|
|
2012-02-01 16:23:10 +08:00
|
|
|
if (dev_is_sata(dev) && dev->parent) {
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
|
2011-07-03 13:56:22 +08:00
|
|
|
} else {
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
|
2011-07-03 13:56:22 +08:00
|
|
|
|
2012-02-01 16:23:10 +08:00
|
|
|
if (!dev->parent)
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_port_setup_transports(idev->owning_port,
|
|
|
|
sci_rnc->remote_node_index);
|
2011-07-03 13:56:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
|
2011-07-03 13:56:22 +08:00
|
|
|
{
|
|
|
|
union scu_remote_node_context *rnc_buffer;
|
2011-07-01 10:14:33 +08:00
|
|
|
struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
|
|
|
|
struct isci_host *ihost = idev->owning_port->owning_controller;
|
2011-07-03 13:56:22 +08:00
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
|
2011-07-03 13:56:22 +08:00
|
|
|
|
|
|
|
rnc_buffer->ssp.is_valid = false;
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_remote_device_post_request(rnc_to_dev(sci_rnc),
|
|
|
|
SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
|
2011-07-03 13:56:22 +08:00
|
|
|
}
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
|
2011-07-03 13:56:22 +08:00
|
|
|
{
|
2011-07-01 10:14:33 +08:00
|
|
|
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
|
2011-07-03 13:56:22 +08:00
|
|
|
|
2011-05-13 00:27:52 +08:00
|
|
|
/* Check to see if we have gotten back to the initial state because
|
|
|
|
* someone requested to destroy the remote node context object.
|
|
|
|
*/
|
2011-06-02 08:10:43 +08:00
|
|
|
if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
|
2011-07-03 13:56:22 +08:00
|
|
|
rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_remote_node_context_notify_user(rnc);
|
2011-07-03 13:56:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
|
2011-07-03 13:56:22 +08:00
|
|
|
{
|
2011-07-01 10:14:33 +08:00
|
|
|
struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
|
2011-07-03 13:56:22 +08:00
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_remote_node_context_validate_context_buffer(sci_rnc);
|
2011-07-03 13:56:22 +08:00
|
|
|
}
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
|
2011-07-03 13:56:22 +08:00
|
|
|
{
|
2011-07-01 10:14:33 +08:00
|
|
|
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
|
2011-07-03 13:56:22 +08:00
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_remote_node_context_invalidate_context_buffer(rnc);
|
2011-07-03 13:56:22 +08:00
|
|
|
}
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
|
2011-07-03 13:56:22 +08:00
|
|
|
{
|
2011-07-01 10:14:33 +08:00
|
|
|
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
|
2011-07-01 07:31:37 +08:00
|
|
|
struct isci_remote_device *idev;
|
2011-04-22 09:44:45 +08:00
|
|
|
struct domain_device *dev;
|
2011-07-03 13:56:22 +08:00
|
|
|
|
2011-07-01 07:31:37 +08:00
|
|
|
idev = rnc_to_dev(rnc);
|
|
|
|
dev = idev->domain_dev;
|
2011-07-03 13:56:22 +08:00
|
|
|
|
2011-02-23 16:08:52 +08:00
|
|
|
/*
|
|
|
|
* For direct attached SATA devices we need to clear the TLCR
|
|
|
|
* NCQ to TCi tag mapping on the phy and in cases where we
|
|
|
|
* resume because of a target reset we also need to update
|
|
|
|
* the STPTLDARNI register with the RNi of the device
|
|
|
|
*/
|
2012-02-01 16:23:10 +08:00
|
|
|
if (dev_is_sata(dev) && !dev->parent)
|
|
|
|
sci_port_setup_transports(idev->owning_port, rnc->remote_node_index);
|
2011-02-23 16:08:52 +08:00
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
|
2011-07-03 13:56:22 +08:00
|
|
|
}
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
|
2011-07-03 13:56:22 +08:00
|
|
|
{
|
2011-07-01 10:14:33 +08:00
|
|
|
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
|
2011-07-03 13:56:22 +08:00
|
|
|
|
|
|
|
rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
|
|
|
|
|
2011-05-13 00:27:52 +08:00
|
|
|
if (rnc->user_callback)
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_remote_node_context_notify_user(rnc);
|
2011-07-03 13:56:22 +08:00
|
|
|
}
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
|
2011-07-03 13:56:22 +08:00
|
|
|
{
|
2011-07-01 10:14:33 +08:00
|
|
|
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
|
2011-07-03 13:56:22 +08:00
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_remote_node_context_continue_state_transitions(rnc);
|
2011-07-03 13:56:22 +08:00
|
|
|
}
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
|
2011-07-03 13:56:22 +08:00
|
|
|
{
|
2011-07-01 10:14:33 +08:00
|
|
|
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
|
2011-07-03 13:56:22 +08:00
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_remote_node_context_continue_state_transitions(rnc);
|
2011-07-03 13:56:22 +08:00
|
|
|
}
|
|
|
|
|
2012-03-09 14:41:48 +08:00
|
|
|
static void sci_remote_node_context_await_suspend_state_exit(
|
|
|
|
struct sci_base_state_machine *sm)
|
|
|
|
{
|
|
|
|
struct sci_remote_node_context *rnc
|
|
|
|
= container_of(sm, typeof(*rnc), sm);
|
|
|
|
|
|
|
|
isci_dev_set_hang_detection_timeout(rnc_to_dev(rnc), 0);
|
|
|
|
}
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
static const struct sci_base_state sci_remote_node_context_state_table[] = {
|
2011-06-02 08:10:43 +08:00
|
|
|
[SCI_RNC_INITIAL] = {
|
2011-07-01 10:14:33 +08:00
|
|
|
.enter_state = sci_remote_node_context_initial_state_enter,
|
2011-07-03 13:56:22 +08:00
|
|
|
},
|
2011-06-02 08:10:43 +08:00
|
|
|
[SCI_RNC_POSTING] = {
|
2011-07-01 10:14:33 +08:00
|
|
|
.enter_state = sci_remote_node_context_posting_state_enter,
|
2011-07-03 13:56:22 +08:00
|
|
|
},
|
2011-06-02 08:10:43 +08:00
|
|
|
[SCI_RNC_INVALIDATING] = {
|
2011-07-01 10:14:33 +08:00
|
|
|
.enter_state = sci_remote_node_context_invalidating_state_enter,
|
2011-07-03 13:56:22 +08:00
|
|
|
},
|
2011-06-02 08:10:43 +08:00
|
|
|
[SCI_RNC_RESUMING] = {
|
2011-07-01 10:14:33 +08:00
|
|
|
.enter_state = sci_remote_node_context_resuming_state_enter,
|
2011-07-03 13:56:22 +08:00
|
|
|
},
|
2011-06-02 08:10:43 +08:00
|
|
|
[SCI_RNC_READY] = {
|
2011-07-01 10:14:33 +08:00
|
|
|
.enter_state = sci_remote_node_context_ready_state_enter,
|
2011-07-03 13:56:22 +08:00
|
|
|
},
|
2011-06-02 08:10:43 +08:00
|
|
|
[SCI_RNC_TX_SUSPENDED] = {
|
2011-07-01 10:14:33 +08:00
|
|
|
.enter_state = sci_remote_node_context_tx_suspended_state_enter,
|
2011-07-03 13:56:22 +08:00
|
|
|
},
|
2011-06-02 08:10:43 +08:00
|
|
|
[SCI_RNC_TX_RX_SUSPENDED] = {
|
2011-07-01 10:14:33 +08:00
|
|
|
.enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
|
2011-07-03 13:56:22 +08:00
|
|
|
},
|
2012-03-09 14:41:48 +08:00
|
|
|
[SCI_RNC_AWAIT_SUSPENSION] = {
|
|
|
|
.exit_state = sci_remote_node_context_await_suspend_state_exit,
|
|
|
|
},
|
2011-07-03 13:56:22 +08:00
|
|
|
};
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
|
2011-04-20 09:35:58 +08:00
|
|
|
u16 remote_node_index)
|
2011-03-27 07:43:01 +08:00
|
|
|
{
|
2011-07-01 10:14:33 +08:00
|
|
|
memset(rnc, 0, sizeof(struct sci_remote_node_context));
|
2011-03-27 07:43:01 +08:00
|
|
|
|
|
|
|
rnc->remote_node_index = remote_node_index;
|
|
|
|
rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
|
2011-03-27 07:43:01 +08:00
|
|
|
}
|
2011-05-12 22:46:59 +08:00
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
|
2011-05-12 22:46:59 +08:00
|
|
|
u32 event_code)
|
|
|
|
{
|
|
|
|
enum scis_sds_remote_node_context_states state;
|
|
|
|
|
2011-06-02 08:10:43 +08:00
|
|
|
state = sci_rnc->sm.current_state_id;
|
2011-05-12 22:46:59 +08:00
|
|
|
switch (state) {
|
2011-06-02 08:10:43 +08:00
|
|
|
case SCI_RNC_POSTING:
|
2011-05-12 22:46:59 +08:00
|
|
|
switch (scu_get_event_code(event_code)) {
|
|
|
|
case SCU_EVENT_POST_RNC_COMPLETE:
|
2011-06-02 08:10:43 +08:00
|
|
|
sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
|
2011-05-12 22:46:59 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
break;
|
2011-06-02 08:10:43 +08:00
|
|
|
case SCI_RNC_INVALIDATING:
|
2011-05-12 22:46:59 +08:00
|
|
|
if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
|
|
|
|
if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL)
|
2011-06-02 08:10:43 +08:00
|
|
|
state = SCI_RNC_INITIAL;
|
2011-05-12 22:46:59 +08:00
|
|
|
else
|
2011-06-02 08:10:43 +08:00
|
|
|
state = SCI_RNC_POSTING;
|
|
|
|
sci_change_state(&sci_rnc->sm, state);
|
2011-05-12 22:46:59 +08:00
|
|
|
} else {
|
|
|
|
switch (scu_get_event_type(event_code)) {
|
|
|
|
case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
|
|
|
|
case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
|
|
|
|
/* We really dont care if the hardware is going to suspend
|
|
|
|
* the device since it's being invalidated anyway */
|
|
|
|
dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
|
|
|
|
"%s: SCIC Remote Node Context 0x%p was "
|
|
|
|
"suspeneded by hardware while being "
|
|
|
|
"invalidated.\n", __func__, sci_rnc);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2011-06-02 08:10:43 +08:00
|
|
|
case SCI_RNC_RESUMING:
|
2011-05-12 22:46:59 +08:00
|
|
|
if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
|
2011-06-02 08:10:43 +08:00
|
|
|
sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
|
2011-05-12 22:46:59 +08:00
|
|
|
} else {
|
|
|
|
switch (scu_get_event_type(event_code)) {
|
|
|
|
case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
|
|
|
|
case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
|
|
|
|
/* We really dont care if the hardware is going to suspend
|
|
|
|
* the device since it's being resumed anyway */
|
|
|
|
dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
|
|
|
|
"%s: SCIC Remote Node Context 0x%p was "
|
|
|
|
"suspeneded by hardware while being resumed.\n",
|
|
|
|
__func__, sci_rnc);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2011-06-02 08:10:43 +08:00
|
|
|
case SCI_RNC_READY:
|
2011-05-12 22:46:59 +08:00
|
|
|
switch (scu_get_event_type(event_code)) {
|
|
|
|
case SCU_EVENT_TL_RNC_SUSPEND_TX:
|
2011-06-02 08:10:43 +08:00
|
|
|
sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
|
2011-05-12 22:46:59 +08:00
|
|
|
sci_rnc->suspension_code = scu_get_event_specifier(event_code);
|
|
|
|
break;
|
|
|
|
case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
|
2011-06-02 08:10:43 +08:00
|
|
|
sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
|
2011-05-12 22:46:59 +08:00
|
|
|
sci_rnc->suspension_code = scu_get_event_specifier(event_code);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
break;
|
2011-06-02 08:10:43 +08:00
|
|
|
case SCI_RNC_AWAIT_SUSPENSION:
|
2011-05-12 22:46:59 +08:00
|
|
|
switch (scu_get_event_type(event_code)) {
|
|
|
|
case SCU_EVENT_TL_RNC_SUSPEND_TX:
|
2011-06-02 08:10:43 +08:00
|
|
|
sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
|
2011-05-12 22:46:59 +08:00
|
|
|
sci_rnc->suspension_code = scu_get_event_specifier(event_code);
|
|
|
|
break;
|
|
|
|
case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
|
2011-06-02 08:10:43 +08:00
|
|
|
sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
|
2011-05-12 22:46:59 +08:00
|
|
|
sci_rnc->suspension_code = scu_get_event_specifier(event_code);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
|
2012-02-10 17:05:43 +08:00
|
|
|
"%s: invalid state: %s\n", __func__,
|
|
|
|
rnc_state_name(state));
|
2011-05-12 22:46:59 +08:00
|
|
|
return SCI_FAILURE_INVALID_STATE;
|
|
|
|
}
|
|
|
|
return SCI_SUCCESS;
|
|
|
|
|
|
|
|
out:
|
|
|
|
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
|
2012-02-10 17:05:43 +08:00
|
|
|
"%s: code: %#x state: %s\n", __func__, event_code,
|
|
|
|
rnc_state_name(state));
|
2011-05-12 22:46:59 +08:00
|
|
|
return SCI_FAILURE;
|
|
|
|
|
|
|
|
}
|
2011-05-12 23:26:56 +08:00
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
|
2011-05-12 23:26:56 +08:00
|
|
|
scics_sds_remote_node_context_callback cb_fn,
|
|
|
|
void *cb_p)
|
|
|
|
{
|
|
|
|
enum scis_sds_remote_node_context_states state;
|
|
|
|
|
2011-06-02 08:10:43 +08:00
|
|
|
state = sci_rnc->sm.current_state_id;
|
2011-05-12 23:26:56 +08:00
|
|
|
switch (state) {
|
2011-06-02 08:10:43 +08:00
|
|
|
case SCI_RNC_INVALIDATING:
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
|
2011-05-12 23:26:56 +08:00
|
|
|
return SCI_SUCCESS;
|
2011-06-02 08:10:43 +08:00
|
|
|
case SCI_RNC_POSTING:
|
|
|
|
case SCI_RNC_RESUMING:
|
|
|
|
case SCI_RNC_READY:
|
|
|
|
case SCI_RNC_TX_SUSPENDED:
|
|
|
|
case SCI_RNC_TX_RX_SUSPENDED:
|
|
|
|
case SCI_RNC_AWAIT_SUSPENSION:
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
|
2011-06-02 08:10:43 +08:00
|
|
|
sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
|
2011-05-12 23:26:56 +08:00
|
|
|
return SCI_SUCCESS;
|
2011-06-02 08:10:43 +08:00
|
|
|
case SCI_RNC_INITIAL:
|
2011-05-12 23:26:56 +08:00
|
|
|
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
|
2012-02-10 17:05:43 +08:00
|
|
|
"%s: invalid state: %s\n", __func__,
|
|
|
|
rnc_state_name(state));
|
2011-05-12 23:26:56 +08:00
|
|
|
/* We have decided that the destruct request on the remote node context
|
|
|
|
* can not fail since it is either in the initial/destroyed state or is
|
|
|
|
* can be destroyed.
|
|
|
|
*/
|
|
|
|
return SCI_SUCCESS;
|
|
|
|
default:
|
|
|
|
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
|
2012-02-10 17:05:43 +08:00
|
|
|
"%s: invalid state %s\n", __func__,
|
|
|
|
rnc_state_name(state));
|
2011-05-12 23:26:56 +08:00
|
|
|
return SCI_FAILURE_INVALID_STATE;
|
|
|
|
}
|
|
|
|
}
|
2011-05-12 23:50:23 +08:00
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
|
2011-05-12 23:50:23 +08:00
|
|
|
u32 suspend_type,
|
|
|
|
scics_sds_remote_node_context_callback cb_fn,
|
|
|
|
void *cb_p)
|
|
|
|
{
|
|
|
|
enum scis_sds_remote_node_context_states state;
|
|
|
|
|
2011-06-02 08:10:43 +08:00
|
|
|
state = sci_rnc->sm.current_state_id;
|
|
|
|
if (state != SCI_RNC_READY) {
|
2011-05-12 23:50:23 +08:00
|
|
|
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
|
2012-02-10 17:05:43 +08:00
|
|
|
"%s: invalid state %s\n", __func__,
|
|
|
|
rnc_state_name(state));
|
2011-05-12 23:50:23 +08:00
|
|
|
return SCI_FAILURE_INVALID_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
sci_rnc->user_callback = cb_fn;
|
|
|
|
sci_rnc->user_cookie = cb_p;
|
|
|
|
sci_rnc->suspension_code = suspend_type;
|
|
|
|
|
|
|
|
if (suspend_type == SCI_SOFTWARE_SUSPENSION) {
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_remote_device_post_request(rnc_to_dev(sci_rnc),
|
2011-05-12 23:50:23 +08:00
|
|
|
SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX);
|
2012-03-09 14:41:48 +08:00
|
|
|
isci_dev_set_hang_detection_timeout(rnc_to_dev(sci_rnc),
|
|
|
|
0x00000001);
|
2011-05-12 23:50:23 +08:00
|
|
|
}
|
|
|
|
|
2011-06-02 08:10:43 +08:00
|
|
|
sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
|
2011-05-12 23:50:23 +08:00
|
|
|
return SCI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
|
2011-05-12 23:50:23 +08:00
|
|
|
scics_sds_remote_node_context_callback cb_fn,
|
|
|
|
void *cb_p)
|
|
|
|
{
|
|
|
|
enum scis_sds_remote_node_context_states state;
|
|
|
|
|
2011-06-02 08:10:43 +08:00
|
|
|
state = sci_rnc->sm.current_state_id;
|
2011-05-12 23:50:23 +08:00
|
|
|
switch (state) {
|
2011-06-02 08:10:43 +08:00
|
|
|
case SCI_RNC_INITIAL:
|
2011-05-12 23:50:23 +08:00
|
|
|
if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
|
|
|
|
return SCI_FAILURE_INVALID_STATE;
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
|
|
|
|
sci_remote_node_context_construct_buffer(sci_rnc);
|
2011-06-02 08:10:43 +08:00
|
|
|
sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
|
2011-05-12 23:50:23 +08:00
|
|
|
return SCI_SUCCESS;
|
2011-06-02 08:10:43 +08:00
|
|
|
case SCI_RNC_POSTING:
|
|
|
|
case SCI_RNC_INVALIDATING:
|
|
|
|
case SCI_RNC_RESUMING:
|
2011-05-12 23:50:23 +08:00
|
|
|
if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
|
|
|
|
return SCI_FAILURE_INVALID_STATE;
|
|
|
|
|
|
|
|
sci_rnc->user_callback = cb_fn;
|
|
|
|
sci_rnc->user_cookie = cb_p;
|
|
|
|
return SCI_SUCCESS;
|
2012-03-09 14:41:49 +08:00
|
|
|
case SCI_RNC_TX_SUSPENDED:
|
|
|
|
case SCI_RNC_TX_RX_SUSPENDED: {
|
2011-07-01 07:31:37 +08:00
|
|
|
struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
|
|
|
|
struct domain_device *dev = idev->domain_dev;
|
2011-05-12 23:50:23 +08:00
|
|
|
|
2012-03-09 14:41:49 +08:00
|
|
|
/* If this is an expander attached SATA device we must
|
|
|
|
* invalidate and repost the RNC since this is the only way
|
|
|
|
* to clear the TCi to NCQ tag mapping table for the RNi.
|
|
|
|
* All other device types we can just resume.
|
|
|
|
*/
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
|
2011-05-12 23:50:23 +08:00
|
|
|
|
2012-02-01 16:23:10 +08:00
|
|
|
if (dev_is_sata(dev) && dev->parent)
|
|
|
|
sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
|
|
|
|
else
|
2011-06-02 08:10:43 +08:00
|
|
|
sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
|
2011-05-12 23:50:23 +08:00
|
|
|
return SCI_SUCCESS;
|
|
|
|
}
|
2011-06-02 08:10:43 +08:00
|
|
|
case SCI_RNC_AWAIT_SUSPENSION:
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
|
2011-05-12 23:50:23 +08:00
|
|
|
return SCI_SUCCESS;
|
|
|
|
default:
|
|
|
|
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
|
2012-02-10 17:05:43 +08:00
|
|
|
"%s: invalid state %s\n", __func__,
|
|
|
|
rnc_state_name(state));
|
2011-05-12 23:50:23 +08:00
|
|
|
return SCI_FAILURE_INVALID_STATE;
|
|
|
|
}
|
|
|
|
}
|
2011-05-13 00:27:52 +08:00
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
|
2011-06-28 05:57:03 +08:00
|
|
|
struct isci_request *ireq)
|
2011-05-13 00:27:52 +08:00
|
|
|
{
|
|
|
|
enum scis_sds_remote_node_context_states state;
|
|
|
|
|
2011-06-02 08:10:43 +08:00
|
|
|
state = sci_rnc->sm.current_state_id;
|
2011-06-21 05:09:22 +08:00
|
|
|
|
|
|
|
switch (state) {
|
|
|
|
case SCI_RNC_READY:
|
|
|
|
return SCI_SUCCESS;
|
|
|
|
case SCI_RNC_TX_SUSPENDED:
|
|
|
|
case SCI_RNC_TX_RX_SUSPENDED:
|
|
|
|
case SCI_RNC_AWAIT_SUSPENSION:
|
2011-05-13 00:27:52 +08:00
|
|
|
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
|
2012-02-10 17:05:43 +08:00
|
|
|
"%s: invalid state %s\n", __func__,
|
|
|
|
rnc_state_name(state));
|
2011-05-13 00:27:52 +08:00
|
|
|
return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
|
2011-06-21 05:09:22 +08:00
|
|
|
default:
|
2012-02-10 17:05:43 +08:00
|
|
|
dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
|
|
|
|
"%s: invalid state %s\n", __func__,
|
|
|
|
rnc_state_name(state));
|
|
|
|
return SCI_FAILURE_INVALID_STATE;
|
2011-05-13 00:27:52 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-01 10:14:33 +08:00
|
|
|
enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
|
2011-06-28 05:57:03 +08:00
|
|
|
struct isci_request *ireq)
|
2011-05-13 00:27:52 +08:00
|
|
|
{
|
|
|
|
enum scis_sds_remote_node_context_states state;
|
|
|
|
|
2011-06-02 08:10:43 +08:00
|
|
|
state = sci_rnc->sm.current_state_id;
|
2011-05-13 00:27:52 +08:00
|
|
|
switch (state) {
|
2011-06-02 08:10:43 +08:00
|
|
|
case SCI_RNC_RESUMING:
|
|
|
|
case SCI_RNC_READY:
|
|
|
|
case SCI_RNC_AWAIT_SUSPENSION:
|
2011-05-13 00:27:52 +08:00
|
|
|
return SCI_SUCCESS;
|
2011-06-02 08:10:43 +08:00
|
|
|
case SCI_RNC_TX_SUSPENDED:
|
|
|
|
case SCI_RNC_TX_RX_SUSPENDED:
|
2011-07-01 10:14:33 +08:00
|
|
|
sci_remote_node_context_resume(sci_rnc, NULL, NULL);
|
2011-05-13 00:27:52 +08:00
|
|
|
return SCI_SUCCESS;
|
|
|
|
default:
|
|
|
|
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
|
2012-02-10 17:05:43 +08:00
|
|
|
"%s: invalid state %s\n", __func__,
|
|
|
|
rnc_state_name(state));
|
2011-05-13 00:27:52 +08:00
|
|
|
return SCI_FAILURE_INVALID_STATE;
|
|
|
|
}
|
|
|
|
}
|